summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 23:15:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-06 23:15:06 -0400
commit81ff5d2cba4f86cd850b9ee4a530cd221ee45aa3 (patch)
tree532847c0823dc864e3aa9da6cde863e48157eafa
parent7aefd944f038c7469571adb37769cb6f3924ecfa (diff)
parente59f755ceb6d6f39f90899d2a4e39c3e05837e12 (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Add support for AEAD in simd - Add fuzz testing to testmgr - Add panic_on_fail module parameter to testmgr - Use per-CPU struct instead multiple variables in scompress - Change verify API for akcipher Algorithms: - Convert x86 AEAD algorithms over to simd - Forbid 2-key 3DES in FIPS mode - Add EC-RDSA (GOST 34.10) algorithm Drivers: - Set output IV with ctr-aes in crypto4xx - Set output IV in rockchip - Fix potential length overflow with hashing in sun4i-ss - Fix computation error with ctr in vmx - Add SM4 protected keys support in ccree - Remove long-broken mxc-scc driver - Add rfc4106(gcm(aes)) cipher support in cavium/nitrox" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (179 commits) crypto: ccree - use a proper le32 type for le32 val crypto: ccree - remove set but not used variable 'du_size' crypto: ccree - Make cc_sec_disable static crypto: ccree - fix spelling mistake "protedcted" -> "protected" crypto: caam/qi2 - generate hash keys in-place crypto: caam/qi2 - fix DMA mapping of stack memory crypto: caam/qi2 - fix zero-length buffer DMA mapping crypto: stm32/cryp - update to return iv_out crypto: stm32/cryp - remove request mutex protection crypto: stm32/cryp - add weak key check for DES crypto: atmel - remove set but not used variable 'alg_name' crypto: picoxcell - Use dev_get_drvdata() crypto: crypto4xx - get rid of redundant using_sd variable crypto: crypto4xx - use sync skcipher for fallback crypto: crypto4xx - fix cfb and ofb "overran dst buffer" issues crypto: crypto4xx - fix ctr-aes missing output IV crypto: ecrdsa - select ASN1 and OID_REGISTRY for EC-RDSA crypto: ux500 - use ccflags-y instead of CFLAGS_<basename>.o crypto: ccree - handle tee fips error during power management resume crypto: ccree - add function to handle cryptocell tee fips error ...
-rw-r--r--Documentation/crypto/api-samples.rst1
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c2
-rw-r--r--arch/arm/crypto/chacha-neon-glue.c5
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c5
-rw-r--r--arch/arm/crypto/crct10dif-ce-glue.c3
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c10
-rw-r--r--arch/arm/crypto/nhpoly1305-neon-glue.c3
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c5
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c5
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c5
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c7
-rw-r--r--arch/arm64/crypto/aes-ce-glue.c5
-rw-r--r--arch/arm64/crypto/aes-glue.c6
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c4
-rw-r--r--arch/arm64/crypto/chacha-neon-glue.c5
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c5
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c17
-rw-r--r--arch/arm64/crypto/nhpoly1305-neon-glue.c3
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sha256-glue.c5
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c5
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c5
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c4
-rw-r--r--arch/powerpc/crypto/crct10dif-vpmsum_glue.c4
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/s390/crypto/des_s390.c21
-rw-r--r--arch/sparc/crypto/des_glue.c11
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c212
-rw-r--r--arch/x86/crypto/chacha_glue.c6
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c5
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c7
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c20
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c11
-rw-r--r--arch/x86/crypto/morus1280-avx2-glue.c12
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c12
-rw-r--r--arch/x86/crypto/morus1280_glue.c85
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c12
-rw-r--r--arch/x86/crypto/morus640_glue.c85
-rw-r--r--arch/x86/crypto/nhpoly1305-avx2-glue.c5
-rw-r--r--arch/x86/crypto/nhpoly1305-sse2-glue.c5
-rw-r--r--arch/x86/crypto/poly1305_glue.c4
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c10
-rw-r--r--arch/x86/power/hibernate.c1
-rw-r--r--crypto/842.c2
-rw-r--r--crypto/Kconfig85
-rw-r--r--crypto/Makefile10
-rw-r--r--crypto/adiantum.c3
-rw-r--r--crypto/aegis128.c2
-rw-r--r--crypto/aegis128l.c2
-rw-r--r--crypto/aegis256.c2
-rw-r--r--crypto/aes_generic.c10
-rw-r--r--crypto/akcipher.c14
-rw-r--r--crypto/algboss.c8
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/anubis.c2
-rw-r--r--crypto/arc4.c2
-rw-r--r--crypto/asymmetric_keys/asym_tpm.c43
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c1
-rw-r--r--crypto/asymmetric_keys/public_key.c105
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c1
-rw-r--r--crypto/asymmetric_keys/x509.asn12
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c57
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c1
-rw-r--r--crypto/authenc.c2
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/blowfish_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/cast5_generic.c2
-rw-r--r--crypto/cast6_generic.c2
-rw-r--r--crypto/cbc.c2
-rw-r--r--crypto/ccm.c46
-rw-r--r--crypto/cfb.c2
-rw-r--r--crypto/chacha20poly1305.c6
-rw-r--r--crypto/chacha_generic.c12
-rw-r--r--crypto/cmac.c2
-rw-r--r--crypto/crc32_generic.c2
-rw-r--r--crypto/crc32c_generic.c2
-rw-r--r--crypto/crct10dif_generic.c13
-rw-r--r--crypto/cryptd.c252
-rw-r--r--crypto/crypto_null.c2
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/cts.c20
-rw-r--r--crypto/deflate.c2
-rw-r--r--crypto/des_generic.c13
-rw-r--r--crypto/dh.c2
-rw-r--r--crypto/drbg.c3
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/ecc.c417
-rw-r--r--crypto/ecc.h153
-rw-r--r--crypto/ecc_curve_defs.h15
-rw-r--r--crypto/ecdh.c2
-rw-r--r--crypto/echainiv.c2
-rw-r--r--crypto/ecrdsa.c296
-rw-r--r--crypto/ecrdsa_defs.h225
-rw-r--r--crypto/ecrdsa_params.asn14
-rw-r--r--crypto/ecrdsa_pub_key.asn11
-rw-r--r--crypto/fcrypt.c2
-rw-r--r--crypto/fips.c2
-rw-r--r--crypto/gcm.c36
-rw-r--r--crypto/ghash-generic.c2
-rw-r--r--crypto/hmac.c13
-rw-r--r--crypto/jitterentropy-kcapi.c2
-rw-r--r--crypto/keywrap.c2
-rw-r--r--crypto/khazad.c2
-rw-r--r--crypto/lrw.c6
-rw-r--r--crypto/lz4.c2
-rw-r--r--crypto/lz4hc.c2
-rw-r--r--crypto/lzo-rle.c2
-rw-r--r--crypto/lzo.c2
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c2
-rw-r--r--crypto/michael_mic.c2
-rw-r--r--crypto/morus1280.c2
-rw-r--r--crypto/morus640.c2
-rw-r--r--crypto/nhpoly1305.c2
-rw-r--r--crypto/ofb.c2
-rw-r--r--crypto/pcbc.c2
-rw-r--r--crypto/pcrypt.c2
-rw-r--r--crypto/poly1305_generic.c2
-rw-r--r--crypto/rmd128.c2
-rw-r--r--crypto/rmd160.c2
-rw-r--r--crypto/rmd256.c2
-rw-r--r--crypto/rmd320.c2
-rw-r--r--crypto/rsa-pkcs1pad.c33
-rw-r--r--crypto/rsa.c111
-rw-r--r--crypto/salsa20_generic.c13
-rw-r--r--crypto/scompress.c129
-rw-r--r--crypto/seed.c2
-rw-r--r--crypto/seqiv.c2
-rw-r--r--crypto/serpent_generic.c2
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/sha3_generic.c2
-rw-r--r--crypto/sha512_generic.c2
-rw-r--r--crypto/shash.c7
-rw-r--r--crypto/simd.c273
-rw-r--r--crypto/skcipher.c9
-rw-r--r--crypto/sm3_generic.c2
-rw-r--r--crypto/sm4_generic.c2
-rw-r--r--crypto/streebog_generic.c27
-rw-r--r--crypto/tcrypt.c2
-rw-r--r--crypto/tea.c2
-rw-r--r--crypto/testmgr.c1242
-rw-r--r--crypto/testmgr.h181
-rw-r--r--crypto/tgr192.c2
-rw-r--r--crypto/twofish_generic.c2
-rw-r--r--crypto/vmac.c2
-rw-r--r--crypto/wp512.c2
-rw-r--r--crypto/xcbc.c2
-rw-r--r--crypto/xts.c2
-rw-r--r--crypto/zstd.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c1
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c1
-rw-r--r--drivers/char/hw_random/stm32-rng.c9
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c24
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c48
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h3
-rw-r--r--drivers/crypto/atmel-tdes.c106
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/bcm/cipher.c22
-rw-r--r--drivers/crypto/bcm/spu.c3
-rw-r--r--drivers/crypto/bcm/util.c1
-rw-r--r--drivers/crypto/caam/caamalg.c75
-rw-r--r--drivers/crypto/caam/caamalg_qi.c66
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c243
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/caampkc.c2
-rw-r--r--drivers/crypto/caam/ctrl.c20
-rw-r--r--drivers/crypto/caam/error.c2
-rw-r--r--drivers/crypto/caam/intern.h4
-rw-r--r--drivers/crypto/caam/jr.c33
-rw-r--r--drivers/crypto/caam/qi.c4
-rw-r--r--drivers/crypto/caam/regs.h11
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_mbox.c17
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c65
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h46
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c8
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c69
-rw-r--r--drivers/crypto/ccree/Makefile1
-rw-r--r--drivers/crypto/ccree/cc_aead.c118
-rw-r--r--drivers/crypto/ccree/cc_aead.h3
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c341
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_cipher.c585
-rw-r--r--drivers/crypto/ccree/cc_cipher.h3
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h10
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c44
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h2
-rw-r--r--drivers/crypto/ccree/cc_driver.c120
-rw-r--r--drivers/crypto/ccree/cc_driver.h36
-rw-r--r--drivers/crypto/ccree/cc_fips.c29
-rw-r--r--drivers/crypto/ccree/cc_fips.h4
-rw-r--r--drivers/crypto/ccree/cc_hash.c64
-rw-r--r--drivers/crypto/ccree/cc_hash.h2
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h123
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h35
-rw-r--r--drivers/crypto/ccree/cc_ivgen.c11
-rw-r--r--drivers/crypto/ccree/cc_ivgen.h2
-rw-r--r--drivers/crypto/ccree/cc_kernel_regs.h2
-rw-r--r--drivers/crypto/ccree/cc_lli_defs.h4
-rw-r--r--drivers/crypto/ccree/cc_pm.c11
-rw-r--r--drivers/crypto/ccree/cc_pm.h2
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c116
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c7
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2
-rw-r--r--drivers/crypto/hifn_795x.c31
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c11
-rw-r--r--drivers/crypto/ixp4xx_crypto.c64
-rw-r--r--drivers/crypto/marvell/cipher.c11
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c3
-rw-r--r--drivers/crypto/mxc-scc.c767
-rw-r--r--drivers/crypto/mxs-dcp.c14
-rw-r--r--drivers/crypto/n2_core.c15
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c6
-rw-r--r--drivers/crypto/nx/nx-842.c3
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c12
-rw-r--r--drivers/crypto/nx/nx-sha256.c6
-rw-r--r--drivers/crypto/nx/nx-sha512.c6
-rw-r--r--drivers/crypto/omap-des.c29
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/padlock-sha.c5
-rw-r--r--drivers/crypto/picoxcell_crypto.c35
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c2
-rw-r--r--drivers/crypto/qce/ablkcipher.c22
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c61
-rw-r--r--drivers/crypto/s5p-sss.c1
-rw-r--r--drivers/crypto/sahara.c6
-rw-r--r--drivers/crypto/stm32/Kconfig1
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c74
-rw-r--r--drivers/crypto/stm32/stm32-hash.c4
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c78
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c19
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h2
-rw-r--r--drivers/crypto/talitos.c108
-rw-r--r--drivers/crypto/ux500/cryp/Makefile6
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c86
-rw-r--r--drivers/crypto/vmx/aes.c14
-rw-r--r--drivers/crypto/vmx/aes_cbc.c14
-rw-r--r--drivers/crypto/vmx/aes_ctr.c10
-rw-r--r--drivers/crypto/vmx/aes_xts.c14
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl4
-rw-r--r--drivers/crypto/vmx/ghash.c10
-rw-r--r--drivers/crypto/vmx/vmx.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h1
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/net/ppp/ppp_mppe.c1
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c1
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c1
-rw-r--r--drivers/staging/ks7010/ks_hostif.c1
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c1
-rw-r--r--drivers/thunderbolt/domain.c1
-rw-r--r--fs/cifs/misc.c1
-rw-r--r--fs/crypto/keyinfo.c1
-rw-r--r--fs/ecryptfs/crypto.c1
-rw-r--r--fs/ecryptfs/keystore.c1
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/f2fs/f2fs.h1
-rw-r--r--fs/nfsd/nfs4recover.c1
-rw-r--r--fs/ubifs/auth.c6
-rw-r--r--fs/ubifs/replay.c2
-rw-r--r--include/crypto/aes.h8
-rw-r--r--include/crypto/akcipher.h54
-rw-r--r--include/crypto/cryptd.h18
-rw-r--r--include/crypto/des.h43
-rw-r--r--include/crypto/hash.h10
-rw-r--r--include/crypto/internal/simd.h44
-rw-r--r--include/crypto/morus1280_glue.h79
-rw-r--r--include/crypto/morus640_glue.h79
-rw-r--r--include/crypto/public_key.h4
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/linux/jbd2.h1
-rw-r--r--include/linux/oid_registry.h18
-rw-r--r--include/linux/psp-sev.h3
-rw-r--r--include/uapi/linux/psp-sev.h18
-rw-r--r--kernel/kexec_file.c1
-rw-r--r--lib/crc-t10dif.c1
-rw-r--r--lib/digsig.c1
-rw-r--r--lib/libcrc32c.c1
-rw-r--r--net/bluetooth/amp.c1
-rw-r--r--net/bluetooth/smp.c1
-rw-r--r--net/sctp/auth.c1
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c1
-rw-r--r--net/wireless/lib80211_crypt_tkip.c1
-rw-r--r--security/apparmor/crypto.c2
-rw-r--r--security/integrity/digsig_asymmetric.c11
-rw-r--r--security/integrity/evm/evm_crypto.c1
-rw-r--r--security/integrity/ima/ima_crypto.c4
-rw-r--r--security/keys/dh.c1
-rw-r--r--security/keys/encrypted-keys/encrypted.c1
-rw-r--r--security/keys/trusted.c1
322 files changed, 5973 insertions, 4248 deletions
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 0f6ca8b7261e..f14afaaf2f32 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -133,7 +133,6 @@ Code Example For Use of Operational State Memory With SHASH
133 if (!sdesc) 133 if (!sdesc)
134 return ERR_PTR(-ENOMEM); 134 return ERR_PTR(-ENOMEM);
135 sdesc->shash.tfm = alg; 135 sdesc->shash.tfm = alg;
136 sdesc->shash.flags = 0x0;
137 return sdesc; 136 return sdesc;
138 } 137 }
139 138
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 07e31941dc67..617c2c99ebfb 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
278 int err; 278 int err;
279 279
280 err = skcipher_walk_virt(&walk, req, true); 280 err = skcipher_walk_virt(&walk, req, true);
281 if (err)
282 return err;
281 283
282 crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); 284 crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
283 285
diff --git a/arch/arm/crypto/chacha-neon-glue.c b/arch/arm/crypto/chacha-neon-glue.c
index 9d6fda81986d..48a89537b828 100644
--- a/arch/arm/crypto/chacha-neon-glue.c
+++ b/arch/arm/crypto/chacha-neon-glue.c
@@ -21,6 +21,7 @@
21 21
22#include <crypto/algapi.h> 22#include <crypto/algapi.h>
23#include <crypto/chacha.h> 23#include <crypto/chacha.h>
24#include <crypto/internal/simd.h>
24#include <crypto/internal/skcipher.h> 25#include <crypto/internal/skcipher.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/module.h> 27#include <linux/module.h>
@@ -93,7 +94,7 @@ static int chacha_neon(struct skcipher_request *req)
93 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 94 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
94 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); 95 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
95 96
96 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) 97 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
97 return crypto_chacha_crypt(req); 98 return crypto_chacha_crypt(req);
98 99
99 return chacha_neon_stream_xor(req, ctx, req->iv); 100 return chacha_neon_stream_xor(req, ctx, req->iv);
@@ -107,7 +108,7 @@ static int xchacha_neon(struct skcipher_request *req)
107 u32 state[16]; 108 u32 state[16];
108 u8 real_iv[16]; 109 u8 real_iv[16];
109 110
110 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) 111 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
111 return crypto_xchacha_crypt(req); 112 return crypto_xchacha_crypt(req);
112 113
113 crypto_chacha_init(state, ctx, req->iv); 114 crypto_chacha_init(state, ctx, req->iv);
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index cd9e93b46c2d..e712c2a7d387 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17 17
18#include <crypto/internal/hash.h> 18#include <crypto/internal/hash.h>
19#include <crypto/internal/simd.h>
19 20
20#include <asm/hwcap.h> 21#include <asm/hwcap.h>
21#include <asm/neon.h> 22#include <asm/neon.h>
@@ -113,7 +114,7 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
113 u32 *crc = shash_desc_ctx(desc); 114 u32 *crc = shash_desc_ctx(desc);
114 unsigned int l; 115 unsigned int l;
115 116
116 if (may_use_simd()) { 117 if (crypto_simd_usable()) {
117 if ((u32)data % SCALE_F) { 118 if ((u32)data % SCALE_F) {
118 l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); 119 l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
119 120
@@ -147,7 +148,7 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
147 u32 *crc = shash_desc_ctx(desc); 148 u32 *crc = shash_desc_ctx(desc);
148 unsigned int l; 149 unsigned int l;
149 150
150 if (may_use_simd()) { 151 if (crypto_simd_usable()) {
151 if ((u32)data % SCALE_F) { 152 if ((u32)data % SCALE_F) {
152 l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); 153 l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
153 154
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
index 3d6b800b8396..3b24f2872592 100644
--- a/arch/arm/crypto/crct10dif-ce-glue.c
+++ b/arch/arm/crypto/crct10dif-ce-glue.c
@@ -15,6 +15,7 @@
15#include <linux/string.h> 15#include <linux/string.h>
16 16
17#include <crypto/internal/hash.h> 17#include <crypto/internal/hash.h>
18#include <crypto/internal/simd.h>
18 19
19#include <asm/neon.h> 20#include <asm/neon.h>
20#include <asm/simd.h> 21#include <asm/simd.h>
@@ -36,7 +37,7 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
36{ 37{
37 u16 *crc = shash_desc_ctx(desc); 38 u16 *crc = shash_desc_ctx(desc);
38 39
39 if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { 40 if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
40 kernel_neon_begin(); 41 kernel_neon_begin();
41 *crc = crc_t10dif_pmull(*crc, data, length); 42 *crc = crc_t10dif_pmull(*crc, data, length);
42 kernel_neon_end(); 43 kernel_neon_end();
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index b7d30b6cf49c..39d1ccec1aab 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -14,6 +14,7 @@
14#include <asm/unaligned.h> 14#include <asm/unaligned.h>
15#include <crypto/cryptd.h> 15#include <crypto/cryptd.h>
16#include <crypto/internal/hash.h> 16#include <crypto/internal/hash.h>
17#include <crypto/internal/simd.h>
17#include <crypto/gf128mul.h> 18#include <crypto/gf128mul.h>
18#include <linux/cpufeature.h> 19#include <linux/cpufeature.h>
19#include <linux/crypto.h> 20#include <linux/crypto.h>
@@ -185,7 +186,6 @@ static int ghash_async_init(struct ahash_request *req)
185 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); 186 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
186 187
187 desc->tfm = child; 188 desc->tfm = child;
188 desc->flags = req->base.flags;
189 return crypto_shash_init(desc); 189 return crypto_shash_init(desc);
190} 190}
191 191
@@ -196,7 +196,7 @@ static int ghash_async_update(struct ahash_request *req)
196 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); 196 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
197 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 197 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
198 198
199 if (!may_use_simd() || 199 if (!crypto_simd_usable() ||
200 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { 200 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
201 memcpy(cryptd_req, req, sizeof(*req)); 201 memcpy(cryptd_req, req, sizeof(*req));
202 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 202 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -214,7 +214,7 @@ static int ghash_async_final(struct ahash_request *req)
214 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); 214 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
215 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 215 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
216 216
217 if (!may_use_simd() || 217 if (!crypto_simd_usable() ||
218 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { 218 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
219 memcpy(cryptd_req, req, sizeof(*req)); 219 memcpy(cryptd_req, req, sizeof(*req));
220 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 220 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -232,7 +232,7 @@ static int ghash_async_digest(struct ahash_request *req)
232 struct ahash_request *cryptd_req = ahash_request_ctx(req); 232 struct ahash_request *cryptd_req = ahash_request_ctx(req);
233 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 233 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
234 234
235 if (!may_use_simd() || 235 if (!crypto_simd_usable() ||
236 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { 236 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
237 memcpy(cryptd_req, req, sizeof(*req)); 237 memcpy(cryptd_req, req, sizeof(*req));
238 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 238 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -242,7 +242,6 @@ static int ghash_async_digest(struct ahash_request *req)
242 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); 242 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
243 243
244 desc->tfm = child; 244 desc->tfm = child;
245 desc->flags = req->base.flags;
246 return shash_ahash_digest(req, desc); 245 return shash_ahash_digest(req, desc);
247 } 246 }
248} 247}
@@ -255,7 +254,6 @@ static int ghash_async_import(struct ahash_request *req, const void *in)
255 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); 254 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
256 255
257 desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm); 256 desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
258 desc->flags = req->base.flags;
259 257
260 return crypto_shash_import(desc, in); 258 return crypto_shash_import(desc, in);
261} 259}
diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c
index 49aae87cb2bc..ae5aefc44a4d 100644
--- a/arch/arm/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm/crypto/nhpoly1305-neon-glue.c
@@ -9,6 +9,7 @@
9#include <asm/neon.h> 9#include <asm/neon.h>
10#include <asm/simd.h> 10#include <asm/simd.h>
11#include <crypto/internal/hash.h> 11#include <crypto/internal/hash.h>
12#include <crypto/internal/simd.h>
12#include <crypto/nhpoly1305.h> 13#include <crypto/nhpoly1305.h>
13#include <linux/module.h> 14#include <linux/module.h>
14 15
@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
25static int nhpoly1305_neon_update(struct shash_desc *desc, 26static int nhpoly1305_neon_update(struct shash_desc *desc,
26 const u8 *src, unsigned int srclen) 27 const u8 *src, unsigned int srclen)
27{ 28{
28 if (srclen < 64 || !may_use_simd()) 29 if (srclen < 64 || !crypto_simd_usable())
29 return crypto_nhpoly1305_update(desc, src, srclen); 30 return crypto_nhpoly1305_update(desc, src, srclen);
30 31
31 do { 32 do {
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index b732522e20f8..4c6c6900853c 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <crypto/internal/hash.h> 11#include <crypto/internal/hash.h>
12#include <crypto/internal/simd.h>
12#include <crypto/sha.h> 13#include <crypto/sha.h>
13#include <crypto/sha1_base.h> 14#include <crypto/sha1_base.h>
14#include <linux/cpufeature.h> 15#include <linux/cpufeature.h>
@@ -33,7 +34,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
33{ 34{
34 struct sha1_state *sctx = shash_desc_ctx(desc); 35 struct sha1_state *sctx = shash_desc_ctx(desc);
35 36
36 if (!may_use_simd() || 37 if (!crypto_simd_usable() ||
37 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) 38 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
38 return sha1_update_arm(desc, data, len); 39 return sha1_update_arm(desc, data, len);
39 40
@@ -47,7 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
47static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, 48static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
48 unsigned int len, u8 *out) 49 unsigned int len, u8 *out)
49{ 50{
50 if (!may_use_simd()) 51 if (!crypto_simd_usable())
51 return sha1_finup_arm(desc, data, len, out); 52 return sha1_finup_arm(desc, data, len, out);
52 53
53 kernel_neon_begin(); 54 kernel_neon_begin();
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index d15e0ea2c95e..d6c95c213d42 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <crypto/internal/hash.h> 21#include <crypto/internal/hash.h>
22#include <crypto/internal/simd.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
@@ -39,7 +40,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
39{ 40{
40 struct sha1_state *sctx = shash_desc_ctx(desc); 41 struct sha1_state *sctx = shash_desc_ctx(desc);
41 42
42 if (!may_use_simd() || 43 if (!crypto_simd_usable() ||
43 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) 44 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
44 return sha1_update_arm(desc, data, len); 45 return sha1_update_arm(desc, data, len);
45 46
@@ -54,7 +55,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
54static int sha1_neon_finup(struct shash_desc *desc, const u8 *data, 55static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
55 unsigned int len, u8 *out) 56 unsigned int len, u8 *out)
56{ 57{
57 if (!may_use_simd()) 58 if (!crypto_simd_usable())
58 return sha1_finup_arm(desc, data, len, out); 59 return sha1_finup_arm(desc, data, len, out);
59 60
60 kernel_neon_begin(); 61 kernel_neon_begin();
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index 1211a5c129fc..a47a9d4b663e 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <crypto/internal/hash.h> 11#include <crypto/internal/hash.h>
12#include <crypto/internal/simd.h>
12#include <crypto/sha.h> 13#include <crypto/sha.h>
13#include <crypto/sha256_base.h> 14#include <crypto/sha256_base.h>
14#include <linux/cpufeature.h> 15#include <linux/cpufeature.h>
@@ -34,7 +35,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
34{ 35{
35 struct sha256_state *sctx = shash_desc_ctx(desc); 36 struct sha256_state *sctx = shash_desc_ctx(desc);
36 37
37 if (!may_use_simd() || 38 if (!crypto_simd_usable() ||
38 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) 39 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
39 return crypto_sha256_arm_update(desc, data, len); 40 return crypto_sha256_arm_update(desc, data, len);
40 41
@@ -49,7 +50,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
49static int sha2_ce_finup(struct shash_desc *desc, const u8 *data, 50static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
50 unsigned int len, u8 *out) 51 unsigned int len, u8 *out)
51{ 52{
52 if (!may_use_simd()) 53 if (!crypto_simd_usable())
53 return crypto_sha256_arm_finup(desc, data, len, out); 54 return crypto_sha256_arm_finup(desc, data, len, out);
54 55
55 kernel_neon_begin(); 56 kernel_neon_begin();
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 1d82c6cd31a4..f3f6b1624fc3 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <crypto/internal/hash.h> 17#include <crypto/internal/hash.h>
18#include <crypto/internal/simd.h>
18#include <linux/cryptohash.h> 19#include <linux/cryptohash.h>
19#include <linux/types.h> 20#include <linux/types.h>
20#include <linux/string.h> 21#include <linux/string.h>
@@ -34,7 +35,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
34{ 35{
35 struct sha256_state *sctx = shash_desc_ctx(desc); 36 struct sha256_state *sctx = shash_desc_ctx(desc);
36 37
37 if (!may_use_simd() || 38 if (!crypto_simd_usable() ||
38 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) 39 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
39 return crypto_sha256_arm_update(desc, data, len); 40 return crypto_sha256_arm_update(desc, data, len);
40 41
@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
49static int sha256_finup(struct shash_desc *desc, const u8 *data, 50static int sha256_finup(struct shash_desc *desc, const u8 *data,
50 unsigned int len, u8 *out) 51 unsigned int len, u8 *out)
51{ 52{
52 if (!may_use_simd()) 53 if (!crypto_simd_usable())
53 return crypto_sha256_arm_finup(desc, data, len, out); 54 return crypto_sha256_arm_finup(desc, data, len, out);
54 55
55 kernel_neon_begin(); 56 kernel_neon_begin();
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index 8a5642b41fd6..d33ab59c26c0 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <crypto/internal/hash.h> 11#include <crypto/internal/hash.h>
12#include <crypto/internal/simd.h>
12#include <crypto/sha.h> 13#include <crypto/sha.h>
13#include <crypto/sha512_base.h> 14#include <crypto/sha512_base.h>
14#include <linux/crypto.h> 15#include <linux/crypto.h>
@@ -30,7 +31,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
30{ 31{
31 struct sha512_state *sctx = shash_desc_ctx(desc); 32 struct sha512_state *sctx = shash_desc_ctx(desc);
32 33
33 if (!may_use_simd() || 34 if (!crypto_simd_usable() ||
34 (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) 35 (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
35 return sha512_arm_update(desc, data, len); 36 return sha512_arm_update(desc, data, len);
36 37
@@ -45,7 +46,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
45static int sha512_neon_finup(struct shash_desc *desc, const u8 *data, 46static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
46 unsigned int len, u8 *out) 47 unsigned int len, u8 *out)
47{ 48{
48 if (!may_use_simd()) 49 if (!crypto_simd_usable())
49 return sha512_arm_finup(desc, data, len, out); 50 return sha512_arm_finup(desc, data, len, out);
50 51
51 kernel_neon_begin(); 52 kernel_neon_begin();
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 036ea77f83bc..cb89c80800b5 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -14,6 +14,7 @@
14#include <crypto/aes.h> 14#include <crypto/aes.h>
15#include <crypto/scatterwalk.h> 15#include <crypto/scatterwalk.h>
16#include <crypto/internal/aead.h> 16#include <crypto/internal/aead.h>
17#include <crypto/internal/simd.h>
17#include <crypto/internal/skcipher.h> 18#include <crypto/internal/skcipher.h>
18#include <linux/module.h> 19#include <linux/module.h>
19 20
@@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
109static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], 110static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
110 u32 abytes, u32 *macp) 111 u32 abytes, u32 *macp)
111{ 112{
112 if (may_use_simd()) { 113 if (crypto_simd_usable()) {
113 kernel_neon_begin(); 114 kernel_neon_begin();
114 ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, 115 ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
115 num_rounds(key)); 116 num_rounds(key));
@@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req)
255 256
256 err = skcipher_walk_aead_encrypt(&walk, req, false); 257 err = skcipher_walk_aead_encrypt(&walk, req, false);
257 258
258 if (may_use_simd()) { 259 if (crypto_simd_usable()) {
259 while (walk.nbytes) { 260 while (walk.nbytes) {
260 u32 tail = walk.nbytes % AES_BLOCK_SIZE; 261 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
261 262
@@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req)
313 314
314 err = skcipher_walk_aead_decrypt(&walk, req, false); 315 err = skcipher_walk_aead_decrypt(&walk, req, false);
315 316
316 if (may_use_simd()) { 317 if (crypto_simd_usable()) {
317 while (walk.nbytes) { 318 while (walk.nbytes) {
318 u32 tail = walk.nbytes % AES_BLOCK_SIZE; 319 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
319 320
diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c
index e6b3227bbf57..3213843fcb46 100644
--- a/arch/arm64/crypto/aes-ce-glue.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -12,6 +12,7 @@
12#include <asm/simd.h> 12#include <asm/simd.h>
13#include <asm/unaligned.h> 13#include <asm/unaligned.h>
14#include <crypto/aes.h> 14#include <crypto/aes.h>
15#include <crypto/internal/simd.h>
15#include <linux/cpufeature.h> 16#include <linux/cpufeature.h>
16#include <linux/crypto.h> 17#include <linux/crypto.h>
17#include <linux/module.h> 18#include <linux/module.h>
@@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
52{ 53{
53 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 54 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
54 55
55 if (!may_use_simd()) { 56 if (!crypto_simd_usable()) {
56 __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); 57 __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
57 return; 58 return;
58 } 59 }
@@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
66{ 67{
67 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 68 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
68 69
69 if (!may_use_simd()) { 70 if (!crypto_simd_usable()) {
70 __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); 71 __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
71 return; 72 return;
72 } 73 }
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 1e676625ef33..f0ceb545bd1e 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
405 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 405 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
406 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 406 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
407 407
408 if (!may_use_simd()) 408 if (!crypto_simd_usable())
409 return aes_ctr_encrypt_fallback(ctx, req); 409 return aes_ctr_encrypt_fallback(ctx, req);
410 410
411 return ctr_encrypt(req); 411 return ctr_encrypt(req);
@@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
642{ 642{
643 int rounds = 6 + ctx->key_length / 4; 643 int rounds = 6 + ctx->key_length / 4;
644 644
645 if (may_use_simd()) { 645 if (crypto_simd_usable()) {
646 kernel_neon_begin(); 646 kernel_neon_begin();
647 aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before, 647 aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
648 enc_after); 648 enc_after);
@@ -707,7 +707,7 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out)
707 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 707 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
708 struct mac_desc_ctx *ctx = shash_desc_ctx(desc); 708 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
709 709
710 mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0); 710 mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0);
711 711
712 memcpy(out, ctx->dg, AES_BLOCK_SIZE); 712 memcpy(out, ctx->dg, AES_BLOCK_SIZE);
713 713
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index bf1b321ff4c1..02b65d9eb947 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
288 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 288 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
289 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); 289 struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
290 290
291 if (!may_use_simd()) 291 if (!crypto_simd_usable())
292 return aes_ctr_encrypt_fallback(&ctx->fallback, req); 292 return aes_ctr_encrypt_fallback(&ctx->fallback, req);
293 293
294 return ctr_encrypt(req); 294 return ctr_encrypt(req);
@@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
304 int err; 304 int err;
305 305
306 err = skcipher_walk_virt(&walk, req, false); 306 err = skcipher_walk_virt(&walk, req, false);
307 if (err)
308 return err;
307 309
308 kernel_neon_begin(); 310 kernel_neon_begin();
309 neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1); 311 neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
index cb054f51c917..82029cda2e77 100644
--- a/arch/arm64/crypto/chacha-neon-glue.c
+++ b/arch/arm64/crypto/chacha-neon-glue.c
@@ -21,6 +21,7 @@
21 21
22#include <crypto/algapi.h> 22#include <crypto/algapi.h>
23#include <crypto/chacha.h> 23#include <crypto/chacha.h>
24#include <crypto/internal/simd.h>
24#include <crypto/internal/skcipher.h> 25#include <crypto/internal/skcipher.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/module.h> 27#include <linux/module.h>
@@ -90,7 +91,7 @@ static int chacha_neon(struct skcipher_request *req)
90 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 91 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
91 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); 92 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
92 93
93 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) 94 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
94 return crypto_chacha_crypt(req); 95 return crypto_chacha_crypt(req);
95 96
96 return chacha_neon_stream_xor(req, ctx, req->iv); 97 return chacha_neon_stream_xor(req, ctx, req->iv);
@@ -104,7 +105,7 @@ static int xchacha_neon(struct skcipher_request *req)
104 u32 state[16]; 105 u32 state[16];
105 u8 real_iv[16]; 106 u8 real_iv[16];
106 107
107 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) 108 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
108 return crypto_xchacha_crypt(req); 109 return crypto_xchacha_crypt(req);
109 110
110 crypto_chacha_init(state, ctx, req->iv); 111 crypto_chacha_init(state, ctx, req->iv);
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index e81d5bd555c0..2e0a7d2eee24 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -16,6 +16,7 @@
16#include <linux/string.h> 16#include <linux/string.h>
17 17
18#include <crypto/internal/hash.h> 18#include <crypto/internal/hash.h>
19#include <crypto/internal/simd.h>
19 20
20#include <asm/neon.h> 21#include <asm/neon.h>
21#include <asm/simd.h> 22#include <asm/simd.h>
@@ -38,7 +39,7 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
38{ 39{
39 u16 *crc = shash_desc_ctx(desc); 40 u16 *crc = shash_desc_ctx(desc);
40 41
41 if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { 42 if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
42 kernel_neon_begin(); 43 kernel_neon_begin();
43 *crc = crc_t10dif_pmull_p8(*crc, data, length); 44 *crc = crc_t10dif_pmull_p8(*crc, data, length);
44 kernel_neon_end(); 45 kernel_neon_end();
@@ -54,7 +55,7 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
54{ 55{
55 u16 *crc = shash_desc_ctx(desc); 56 u16 *crc = shash_desc_ctx(desc);
56 57
57 if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { 58 if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
58 kernel_neon_begin(); 59 kernel_neon_begin();
59 *crc = crc_t10dif_pmull_p64(*crc, data, length); 60 *crc = crc_t10dif_pmull_p64(*crc, data, length);
60 kernel_neon_end(); 61 kernel_neon_end();
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 4e69bb78ea89..b39ed99b06fb 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -17,6 +17,7 @@
17#include <crypto/gf128mul.h> 17#include <crypto/gf128mul.h>
18#include <crypto/internal/aead.h> 18#include <crypto/internal/aead.h>
19#include <crypto/internal/hash.h> 19#include <crypto/internal/hash.h>
20#include <crypto/internal/simd.h>
20#include <crypto/internal/skcipher.h> 21#include <crypto/internal/skcipher.h>
21#include <crypto/scatterwalk.h> 22#include <crypto/scatterwalk.h>
22#include <linux/cpufeature.h> 23#include <linux/cpufeature.h>
@@ -89,7 +90,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
89 struct ghash_key const *k, 90 struct ghash_key const *k,
90 const char *head)) 91 const char *head))
91{ 92{
92 if (likely(may_use_simd())) { 93 if (likely(crypto_simd_usable())) {
93 kernel_neon_begin(); 94 kernel_neon_begin();
94 simd_update(blocks, dg, src, key, head); 95 simd_update(blocks, dg, src, key, head);
95 kernel_neon_end(); 96 kernel_neon_end();
@@ -441,7 +442,7 @@ static int gcm_encrypt(struct aead_request *req)
441 442
442 err = skcipher_walk_aead_encrypt(&walk, req, false); 443 err = skcipher_walk_aead_encrypt(&walk, req, false);
443 444
444 if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { 445 if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
445 u32 const *rk = NULL; 446 u32 const *rk = NULL;
446 447
447 kernel_neon_begin(); 448 kernel_neon_begin();
@@ -473,9 +474,11 @@ static int gcm_encrypt(struct aead_request *req)
473 put_unaligned_be32(2, iv + GCM_IV_SIZE); 474 put_unaligned_be32(2, iv + GCM_IV_SIZE);
474 475
475 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { 476 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
476 int blocks = walk.nbytes / AES_BLOCK_SIZE; 477 const int blocks =
478 walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
477 u8 *dst = walk.dst.virt.addr; 479 u8 *dst = walk.dst.virt.addr;
478 u8 *src = walk.src.virt.addr; 480 u8 *src = walk.src.virt.addr;
481 int remaining = blocks;
479 482
480 do { 483 do {
481 __aes_arm64_encrypt(ctx->aes_key.key_enc, 484 __aes_arm64_encrypt(ctx->aes_key.key_enc,
@@ -485,9 +488,9 @@ static int gcm_encrypt(struct aead_request *req)
485 488
486 dst += AES_BLOCK_SIZE; 489 dst += AES_BLOCK_SIZE;
487 src += AES_BLOCK_SIZE; 490 src += AES_BLOCK_SIZE;
488 } while (--blocks > 0); 491 } while (--remaining > 0);
489 492
490 ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, 493 ghash_do_update(blocks, dg,
491 walk.dst.virt.addr, &ctx->ghash_key, 494 walk.dst.virt.addr, &ctx->ghash_key,
492 NULL, pmull_ghash_update_p64); 495 NULL, pmull_ghash_update_p64);
493 496
@@ -563,7 +566,7 @@ static int gcm_decrypt(struct aead_request *req)
563 566
564 err = skcipher_walk_aead_decrypt(&walk, req, false); 567 err = skcipher_walk_aead_decrypt(&walk, req, false);
565 568
566 if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { 569 if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
567 u32 const *rk = NULL; 570 u32 const *rk = NULL;
568 571
569 kernel_neon_begin(); 572 kernel_neon_begin();
@@ -609,7 +612,7 @@ static int gcm_decrypt(struct aead_request *req)
609 put_unaligned_be32(2, iv + GCM_IV_SIZE); 612 put_unaligned_be32(2, iv + GCM_IV_SIZE);
610 613
611 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { 614 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
612 int blocks = walk.nbytes / AES_BLOCK_SIZE; 615 int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
613 u8 *dst = walk.dst.virt.addr; 616 u8 *dst = walk.dst.virt.addr;
614 u8 *src = walk.src.virt.addr; 617 u8 *src = walk.src.virt.addr;
615 618
diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
index 38a589044b6c..895d3727c1fb 100644
--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
@@ -9,6 +9,7 @@
9#include <asm/neon.h> 9#include <asm/neon.h>
10#include <asm/simd.h> 10#include <asm/simd.h>
11#include <crypto/internal/hash.h> 11#include <crypto/internal/hash.h>
12#include <crypto/internal/simd.h>
12#include <crypto/nhpoly1305.h> 13#include <crypto/nhpoly1305.h>
13#include <linux/module.h> 14#include <linux/module.h>
14 15
@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
25static int nhpoly1305_neon_update(struct shash_desc *desc, 26static int nhpoly1305_neon_update(struct shash_desc *desc,
26 const u8 *src, unsigned int srclen) 27 const u8 *src, unsigned int srclen)
27{ 28{
28 if (srclen < 64 || !may_use_simd()) 29 if (srclen < 64 || !crypto_simd_usable())
29 return crypto_nhpoly1305_update(desc, src, srclen); 30 return crypto_nhpoly1305_update(desc, src, srclen);
30 31
31 do { 32 do {
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 17fac2889f56..eaa7a8258f1c 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -12,6 +12,7 @@
12#include <asm/simd.h> 12#include <asm/simd.h>
13#include <asm/unaligned.h> 13#include <asm/unaligned.h>
14#include <crypto/internal/hash.h> 14#include <crypto/internal/hash.h>
15#include <crypto/internal/simd.h>
15#include <crypto/sha.h> 16#include <crypto/sha.h>
16#include <crypto/sha1_base.h> 17#include <crypto/sha1_base.h>
17#include <linux/cpufeature.h> 18#include <linux/cpufeature.h>
@@ -38,7 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
38{ 39{
39 struct sha1_ce_state *sctx = shash_desc_ctx(desc); 40 struct sha1_ce_state *sctx = shash_desc_ctx(desc);
40 41
41 if (!may_use_simd()) 42 if (!crypto_simd_usable())
42 return crypto_sha1_update(desc, data, len); 43 return crypto_sha1_update(desc, data, len);
43 44
44 sctx->finalize = 0; 45 sctx->finalize = 0;
@@ -56,7 +57,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
56 struct sha1_ce_state *sctx = shash_desc_ctx(desc); 57 struct sha1_ce_state *sctx = shash_desc_ctx(desc);
57 bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); 58 bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
58 59
59 if (!may_use_simd()) 60 if (!crypto_simd_usable())
60 return crypto_sha1_finup(desc, data, len, out); 61 return crypto_sha1_finup(desc, data, len, out);
61 62
62 /* 63 /*
@@ -78,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
78{ 79{
79 struct sha1_ce_state *sctx = shash_desc_ctx(desc); 80 struct sha1_ce_state *sctx = shash_desc_ctx(desc);
80 81
81 if (!may_use_simd()) 82 if (!crypto_simd_usable())
82 return crypto_sha1_finup(desc, NULL, 0, out); 83 return crypto_sha1_finup(desc, NULL, 0, out);
83 84
84 sctx->finalize = 0; 85 sctx->finalize = 0;
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 261f5195cab7..a725997e55f2 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -12,6 +12,7 @@
12#include <asm/simd.h> 12#include <asm/simd.h>
13#include <asm/unaligned.h> 13#include <asm/unaligned.h>
14#include <crypto/internal/hash.h> 14#include <crypto/internal/hash.h>
15#include <crypto/internal/simd.h>
15#include <crypto/sha.h> 16#include <crypto/sha.h>
16#include <crypto/sha256_base.h> 17#include <crypto/sha256_base.h>
17#include <linux/cpufeature.h> 18#include <linux/cpufeature.h>
@@ -42,7 +43,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
42{ 43{
43 struct sha256_ce_state *sctx = shash_desc_ctx(desc); 44 struct sha256_ce_state *sctx = shash_desc_ctx(desc);
44 45
45 if (!may_use_simd()) 46 if (!crypto_simd_usable())
46 return sha256_base_do_update(desc, data, len, 47 return sha256_base_do_update(desc, data, len,
47 (sha256_block_fn *)sha256_block_data_order); 48 (sha256_block_fn *)sha256_block_data_order);
48 49
@@ -61,7 +62,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
61 struct sha256_ce_state *sctx = shash_desc_ctx(desc); 62 struct sha256_ce_state *sctx = shash_desc_ctx(desc);
62 bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); 63 bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
63 64
64 if (!may_use_simd()) { 65 if (!crypto_simd_usable()) {
65 if (len) 66 if (len)
66 sha256_base_do_update(desc, data, len, 67 sha256_base_do_update(desc, data, len,
67 (sha256_block_fn *)sha256_block_data_order); 68 (sha256_block_fn *)sha256_block_data_order);
@@ -90,7 +91,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
90{ 91{
91 struct sha256_ce_state *sctx = shash_desc_ctx(desc); 92 struct sha256_ce_state *sctx = shash_desc_ctx(desc);
92 93
93 if (!may_use_simd()) { 94 if (!crypto_simd_usable()) {
94 sha256_base_do_finalize(desc, 95 sha256_base_do_finalize(desc,
95 (sha256_block_fn *)sha256_block_data_order); 96 (sha256_block_fn *)sha256_block_data_order);
96 return sha256_base_finish(desc, out); 97 return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 0cccdb9cc2c0..e62298740e31 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -14,6 +14,7 @@
14#include <asm/neon.h> 14#include <asm/neon.h>
15#include <asm/simd.h> 15#include <asm/simd.h>
16#include <crypto/internal/hash.h> 16#include <crypto/internal/hash.h>
17#include <crypto/internal/simd.h>
17#include <crypto/sha.h> 18#include <crypto/sha.h>
18#include <crypto/sha256_base.h> 19#include <crypto/sha256_base.h>
19#include <linux/cryptohash.h> 20#include <linux/cryptohash.h>
@@ -89,7 +90,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
89{ 90{
90 struct sha256_state *sctx = shash_desc_ctx(desc); 91 struct sha256_state *sctx = shash_desc_ctx(desc);
91 92
92 if (!may_use_simd()) 93 if (!crypto_simd_usable())
93 return sha256_base_do_update(desc, data, len, 94 return sha256_base_do_update(desc, data, len,
94 (sha256_block_fn *)sha256_block_data_order); 95 (sha256_block_fn *)sha256_block_data_order);
95 96
@@ -119,7 +120,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
119static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, 120static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
120 unsigned int len, u8 *out) 121 unsigned int len, u8 *out)
121{ 122{
122 if (!may_use_simd()) { 123 if (!crypto_simd_usable()) {
123 if (len) 124 if (len)
124 sha256_base_do_update(desc, data, len, 125 sha256_base_do_update(desc, data, len,
125 (sha256_block_fn *)sha256_block_data_order); 126 (sha256_block_fn *)sha256_block_data_order);
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index a336feac0f59..9a4bbfc45f40 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -14,6 +14,7 @@
14#include <asm/simd.h> 14#include <asm/simd.h>
15#include <asm/unaligned.h> 15#include <asm/unaligned.h>
16#include <crypto/internal/hash.h> 16#include <crypto/internal/hash.h>
17#include <crypto/internal/simd.h>
17#include <crypto/sha3.h> 18#include <crypto/sha3.h>
18#include <linux/cpufeature.h> 19#include <linux/cpufeature.h>
19#include <linux/crypto.h> 20#include <linux/crypto.h>
@@ -32,7 +33,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
32 struct sha3_state *sctx = shash_desc_ctx(desc); 33 struct sha3_state *sctx = shash_desc_ctx(desc);
33 unsigned int digest_size = crypto_shash_digestsize(desc->tfm); 34 unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
34 35
35 if (!may_use_simd()) 36 if (!crypto_simd_usable())
36 return crypto_sha3_update(desc, data, len); 37 return crypto_sha3_update(desc, data, len);
37 38
38 if ((sctx->partial + len) >= sctx->rsiz) { 39 if ((sctx->partial + len) >= sctx->rsiz) {
@@ -76,7 +77,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
76 __le64 *digest = (__le64 *)out; 77 __le64 *digest = (__le64 *)out;
77 int i; 78 int i;
78 79
79 if (!may_use_simd()) 80 if (!crypto_simd_usable())
80 return crypto_sha3_final(desc, out); 81 return crypto_sha3_final(desc, out);
81 82
82 sctx->buf[sctx->partial++] = 0x06; 83 sctx->buf[sctx->partial++] = 0x06;
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index f2c5f28c622a..2369540040aa 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -13,6 +13,7 @@
13#include <asm/simd.h> 13#include <asm/simd.h>
14#include <asm/unaligned.h> 14#include <asm/unaligned.h>
15#include <crypto/internal/hash.h> 15#include <crypto/internal/hash.h>
16#include <crypto/internal/simd.h>
16#include <crypto/sha.h> 17#include <crypto/sha.h>
17#include <crypto/sha512_base.h> 18#include <crypto/sha512_base.h>
18#include <linux/cpufeature.h> 19#include <linux/cpufeature.h>
@@ -31,7 +32,7 @@ asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
31static int sha512_ce_update(struct shash_desc *desc, const u8 *data, 32static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
32 unsigned int len) 33 unsigned int len)
33{ 34{
34 if (!may_use_simd()) 35 if (!crypto_simd_usable())
35 return sha512_base_do_update(desc, data, len, 36 return sha512_base_do_update(desc, data, len,
36 (sha512_block_fn *)sha512_block_data_order); 37 (sha512_block_fn *)sha512_block_data_order);
37 38
@@ -46,7 +47,7 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
46static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, 47static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
47 unsigned int len, u8 *out) 48 unsigned int len, u8 *out)
48{ 49{
49 if (!may_use_simd()) { 50 if (!crypto_simd_usable()) {
50 if (len) 51 if (len)
51 sha512_base_do_update(desc, data, len, 52 sha512_base_do_update(desc, data, len,
52 (sha512_block_fn *)sha512_block_data_order); 53 (sha512_block_fn *)sha512_block_data_order);
@@ -65,7 +66,7 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
65 66
66static int sha512_ce_final(struct shash_desc *desc, u8 *out) 67static int sha512_ce_final(struct shash_desc *desc, u8 *out)
67{ 68{
68 if (!may_use_simd()) { 69 if (!crypto_simd_usable()) {
69 sha512_base_do_finalize(desc, 70 sha512_base_do_finalize(desc,
70 (sha512_block_fn *)sha512_block_data_order); 71 (sha512_block_fn *)sha512_block_data_order);
71 return sha512_base_finish(desc, out); 72 return sha512_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index 88938a20d9b2..5d15533799a2 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -12,6 +12,7 @@
12#include <asm/simd.h> 12#include <asm/simd.h>
13#include <asm/unaligned.h> 13#include <asm/unaligned.h>
14#include <crypto/internal/hash.h> 14#include <crypto/internal/hash.h>
15#include <crypto/internal/simd.h>
15#include <crypto/sm3.h> 16#include <crypto/sm3.h>
16#include <crypto/sm3_base.h> 17#include <crypto/sm3_base.h>
17#include <linux/cpufeature.h> 18#include <linux/cpufeature.h>
@@ -28,7 +29,7 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
28static int sm3_ce_update(struct shash_desc *desc, const u8 *data, 29static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
29 unsigned int len) 30 unsigned int len)
30{ 31{
31 if (!may_use_simd()) 32 if (!crypto_simd_usable())
32 return crypto_sm3_update(desc, data, len); 33 return crypto_sm3_update(desc, data, len);
33 34
34 kernel_neon_begin(); 35 kernel_neon_begin();
@@ -40,7 +41,7 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
40 41
41static int sm3_ce_final(struct shash_desc *desc, u8 *out) 42static int sm3_ce_final(struct shash_desc *desc, u8 *out)
42{ 43{
43 if (!may_use_simd()) 44 if (!crypto_simd_usable())
44 return crypto_sm3_finup(desc, NULL, 0, out); 45 return crypto_sm3_finup(desc, NULL, 0, out);
45 46
46 kernel_neon_begin(); 47 kernel_neon_begin();
@@ -53,7 +54,7 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out)
53static int sm3_ce_finup(struct shash_desc *desc, const u8 *data, 54static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
54 unsigned int len, u8 *out) 55 unsigned int len, u8 *out)
55{ 56{
56 if (!may_use_simd()) 57 if (!crypto_simd_usable())
57 return crypto_sm3_finup(desc, data, len, out); 58 return crypto_sm3_finup(desc, data, len, out);
58 59
59 kernel_neon_begin(); 60 kernel_neon_begin();
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 0c4fc223f225..2754c875d39c 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -3,6 +3,7 @@
3#include <asm/neon.h> 3#include <asm/neon.h>
4#include <asm/simd.h> 4#include <asm/simd.h>
5#include <crypto/sm4.h> 5#include <crypto/sm4.h>
6#include <crypto/internal/simd.h>
6#include <linux/module.h> 7#include <linux/module.h>
7#include <linux/cpufeature.h> 8#include <linux/cpufeature.h>
8#include <linux/crypto.h> 9#include <linux/crypto.h>
@@ -20,7 +21,7 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
20{ 21{
21 const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); 22 const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
22 23
23 if (!may_use_simd()) { 24 if (!crypto_simd_usable()) {
24 crypto_sm4_encrypt(tfm, out, in); 25 crypto_sm4_encrypt(tfm, out, in);
25 } else { 26 } else {
26 kernel_neon_begin(); 27 kernel_neon_begin();
@@ -33,7 +34,7 @@ static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
33{ 34{
34 const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); 35 const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
35 36
36 if (!may_use_simd()) { 37 if (!crypto_simd_usable()) {
37 crypto_sm4_decrypt(tfm, out, in); 38 crypto_sm4_decrypt(tfm, out, in);
38 } else { 39 } else {
39 kernel_neon_begin(); 40 kernel_neon_begin();
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index fd1d6c83f0c0..c4fa242dd652 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -1,10 +1,12 @@
1#include <linux/crc32.h> 1#include <linux/crc32.h>
2#include <crypto/internal/hash.h> 2#include <crypto/internal/hash.h>
3#include <crypto/internal/simd.h>
3#include <linux/init.h> 4#include <linux/init.h>
4#include <linux/module.h> 5#include <linux/module.h>
5#include <linux/string.h> 6#include <linux/string.h>
6#include <linux/kernel.h> 7#include <linux/kernel.h>
7#include <linux/cpufeature.h> 8#include <linux/cpufeature.h>
9#include <asm/simd.h>
8#include <asm/switch_to.h> 10#include <asm/switch_to.h>
9 11
10#define CHKSUM_BLOCK_SIZE 1 12#define CHKSUM_BLOCK_SIZE 1
@@ -22,7 +24,7 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
22 unsigned int prealign; 24 unsigned int prealign;
23 unsigned int tail; 25 unsigned int tail;
24 26
25 if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt()) 27 if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
26 return __crc32c_le(crc, p, len); 28 return __crc32c_le(crc, p, len);
27 29
28 if ((unsigned long)p & VMX_ALIGN_MASK) { 30 if ((unsigned long)p & VMX_ALIGN_MASK) {
diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
index 02ea277863d1..e27ff16573b5 100644
--- a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
@@ -12,11 +12,13 @@
12 12
13#include <linux/crc-t10dif.h> 13#include <linux/crc-t10dif.h>
14#include <crypto/internal/hash.h> 14#include <crypto/internal/hash.h>
15#include <crypto/internal/simd.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/string.h> 18#include <linux/string.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/cpufeature.h> 20#include <linux/cpufeature.h>
21#include <asm/simd.h>
20#include <asm/switch_to.h> 22#include <asm/switch_to.h>
21 23
22#define VMX_ALIGN 16 24#define VMX_ALIGN 16
@@ -32,7 +34,7 @@ static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
32 unsigned int tail; 34 unsigned int tail;
33 u32 crc = crci; 35 u32 crc = crci;
34 36
35 if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt()) 37 if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
36 return crc_t10dif_generic(crc, p, len); 38 return crc_t10dif_generic(crc, p, len);
37 39
38 if ((unsigned long)p & VMX_ALIGN_MASK) { 40 if ((unsigned long)p & VMX_ALIGN_MASK) {
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 36bda391e549..b9f6e72bf4e5 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -10,3 +10,4 @@ generic-y += mcs_spinlock.h
10generic-y += preempt.h 10generic-y += preempt.h
11generic-y += vtime.h 11generic-y += vtime.h
12generic-y += msi.h 12generic-y += msi.h
13generic-y += simd.h
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 0d15383d0ff1..1f9ab24dc048 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -224,24 +224,11 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
224 unsigned int key_len) 224 unsigned int key_len)
225{ 225{
226 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm); 226 struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
227 int err;
227 228
228 if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && 229 err = __des3_verify_key(&tfm->crt_flags, key);
229 crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], 230 if (unlikely(err))
230 DES_KEY_SIZE)) && 231 return err;
231 (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
232 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
233 return -EINVAL;
234 }
235
236 /* in fips mode, ensure k1 != k2 and k2 != k3 and k1 != k3 */
237 if (fips_enabled &&
238 !(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
239 crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
240 DES_KEY_SIZE) &&
241 crypto_memneq(key, &key[DES_KEY_SIZE * 2], DES_KEY_SIZE))) {
242 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
243 return -EINVAL;
244 }
245 232
246 memcpy(ctx->key, key, key_len); 233 memcpy(ctx->key, key, key_len);
247 return 0; 234 return 0;
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 4884315daff4..453a4cf5492a 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -201,18 +201,15 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
201 unsigned int keylen) 201 unsigned int keylen)
202{ 202{
203 struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm); 203 struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
204 const u32 *K = (const u32 *)key;
205 u32 *flags = &tfm->crt_flags; 204 u32 *flags = &tfm->crt_flags;
206 u64 k1[DES_EXPKEY_WORDS / 2]; 205 u64 k1[DES_EXPKEY_WORDS / 2];
207 u64 k2[DES_EXPKEY_WORDS / 2]; 206 u64 k2[DES_EXPKEY_WORDS / 2];
208 u64 k3[DES_EXPKEY_WORDS / 2]; 207 u64 k3[DES_EXPKEY_WORDS / 2];
208 int err;
209 209
210 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 210 err = __des3_verify_key(flags, key);
211 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && 211 if (unlikely(err))
212 (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 212 return err;
213 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
214 return -EINVAL;
215 }
216 213
217 des_sparc64_key_expand((const u32 *)key, k1); 214 des_sparc64_key_expand((const u32 *)key, k1);
218 key += DES_KEY_SIZE; 215 key += DES_KEY_SIZE;
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 3ea71b871813..bdeee1b830be 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -11,8 +11,8 @@
11 * any later version. 11 * any later version.
12 */ 12 */
13 13
14#include <crypto/cryptd.h>
15#include <crypto/internal/aead.h> 14#include <crypto/internal/aead.h>
15#include <crypto/internal/simd.h>
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h> 17#include <crypto/scatterwalk.h>
18#include <linux/module.h> 18#include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
242{ 242{
243} 243}
244 244
245static int cryptd_aegis128_aesni_setkey(struct crypto_aead *aead, 245static struct aead_alg crypto_aegis128_aesni_alg = {
246 const u8 *key, unsigned int keylen) 246 .setkey = crypto_aegis128_aesni_setkey,
247{ 247 .setauthsize = crypto_aegis128_aesni_setauthsize,
248 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 248 .encrypt = crypto_aegis128_aesni_encrypt,
249 struct cryptd_aead *cryptd_tfm = *ctx; 249 .decrypt = crypto_aegis128_aesni_decrypt,
250 250 .init = crypto_aegis128_aesni_init_tfm,
251 return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); 251 .exit = crypto_aegis128_aesni_exit_tfm,
252} 252
253 253 .ivsize = AEGIS128_NONCE_SIZE,
254static int cryptd_aegis128_aesni_setauthsize(struct crypto_aead *aead, 254 .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
255 unsigned int authsize) 255 .chunksize = AEGIS128_BLOCK_SIZE,
256{ 256
257 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 257 .base = {
258 struct cryptd_aead *cryptd_tfm = *ctx; 258 .cra_flags = CRYPTO_ALG_INTERNAL,
259 259 .cra_blocksize = 1,
260 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); 260 .cra_ctxsize = sizeof(struct aegis_ctx) +
261} 261 __alignof__(struct aegis_ctx),
262 262 .cra_alignmask = 0,
263static int cryptd_aegis128_aesni_encrypt(struct aead_request *req) 263 .cra_priority = 400,
264{ 264
265 struct crypto_aead *aead = crypto_aead_reqtfm(req); 265 .cra_name = "__aegis128",
266 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 266 .cra_driver_name = "__aegis128-aesni",
267 struct cryptd_aead *cryptd_tfm = *ctx; 267
268 268 .cra_module = THIS_MODULE,
269 aead = &cryptd_tfm->base;
270 if (irq_fpu_usable() && (!in_atomic() ||
271 !cryptd_aead_queued(cryptd_tfm)))
272 aead = cryptd_aead_child(cryptd_tfm);
273
274 aead_request_set_tfm(req, aead);
275
276 return crypto_aead_encrypt(req);
277}
278
279static int cryptd_aegis128_aesni_decrypt(struct aead_request *req)
280{
281 struct crypto_aead *aead = crypto_aead_reqtfm(req);
282 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
283 struct cryptd_aead *cryptd_tfm = *ctx;
284
285 aead = &cryptd_tfm->base;
286 if (irq_fpu_usable() && (!in_atomic() ||
287 !cryptd_aead_queued(cryptd_tfm)))
288 aead = cryptd_aead_child(cryptd_tfm);
289
290 aead_request_set_tfm(req, aead);
291
292 return crypto_aead_decrypt(req);
293}
294
295static int cryptd_aegis128_aesni_init_tfm(struct crypto_aead *aead)
296{
297 struct cryptd_aead *cryptd_tfm;
298 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
299
300 cryptd_tfm = cryptd_alloc_aead("__aegis128-aesni", CRYPTO_ALG_INTERNAL,
301 CRYPTO_ALG_INTERNAL);
302 if (IS_ERR(cryptd_tfm))
303 return PTR_ERR(cryptd_tfm);
304
305 *ctx = cryptd_tfm;
306 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
307 return 0;
308}
309
310static void cryptd_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
311{
312 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
313
314 cryptd_free_aead(*ctx);
315}
316
317static struct aead_alg crypto_aegis128_aesni_alg[] = {
318 {
319 .setkey = crypto_aegis128_aesni_setkey,
320 .setauthsize = crypto_aegis128_aesni_setauthsize,
321 .encrypt = crypto_aegis128_aesni_encrypt,
322 .decrypt = crypto_aegis128_aesni_decrypt,
323 .init = crypto_aegis128_aesni_init_tfm,
324 .exit = crypto_aegis128_aesni_exit_tfm,
325
326 .ivsize = AEGIS128_NONCE_SIZE,
327 .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
328 .chunksize = AEGIS128_BLOCK_SIZE,
329
330 .base = {
331 .cra_flags = CRYPTO_ALG_INTERNAL,
332 .cra_blocksize = 1,
333 .cra_ctxsize = sizeof(struct aegis_ctx) +
334 __alignof__(struct aegis_ctx),
335 .cra_alignmask = 0,
336
337 .cra_name = "__aegis128",
338 .cra_driver_name = "__aegis128-aesni",
339
340 .cra_module = THIS_MODULE,
341 }
342 }, {
343 .setkey = cryptd_aegis128_aesni_setkey,
344 .setauthsize = cryptd_aegis128_aesni_setauthsize,
345 .encrypt = cryptd_aegis128_aesni_encrypt,
346 .decrypt = cryptd_aegis128_aesni_decrypt,
347 .init = cryptd_aegis128_aesni_init_tfm,
348 .exit = cryptd_aegis128_aesni_exit_tfm,
349
350 .ivsize = AEGIS128_NONCE_SIZE,
351 .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
352 .chunksize = AEGIS128_BLOCK_SIZE,
353
354 .base = {
355 .cra_flags = CRYPTO_ALG_ASYNC,
356 .cra_blocksize = 1,
357 .cra_ctxsize = sizeof(struct cryptd_aead *),
358 .cra_alignmask = 0,
359
360 .cra_priority = 400,
361
362 .cra_name = "aegis128",
363 .cra_driver_name = "aegis128-aesni",
364
365 .cra_module = THIS_MODULE,
366 }
367 } 269 }
368}; 270};
369 271
272static struct simd_aead_alg *simd_alg;
273
370static int __init crypto_aegis128_aesni_module_init(void) 274static int __init crypto_aegis128_aesni_module_init(void)
371{ 275{
372 if (!boot_cpu_has(X86_FEATURE_XMM2) || 276 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis128_aesni_module_init(void)
374 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 278 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
375 return -ENODEV; 279 return -ENODEV;
376 280
377 return crypto_register_aeads(crypto_aegis128_aesni_alg, 281 return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1,
378 ARRAY_SIZE(crypto_aegis128_aesni_alg)); 282 &simd_alg);
379} 283}
380 284
381static void __exit crypto_aegis128_aesni_module_exit(void) 285static void __exit crypto_aegis128_aesni_module_exit(void)
382{ 286{
383 crypto_unregister_aeads(crypto_aegis128_aesni_alg, 287 simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
384 ARRAY_SIZE(crypto_aegis128_aesni_alg));
385} 288}
386 289
387module_init(crypto_aegis128_aesni_module_init); 290module_init(crypto_aegis128_aesni_module_init);
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 1b1b39c66c5e..80d917f7e467 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -11,8 +11,8 @@
11 * any later version. 11 * any later version.
12 */ 12 */
13 13
14#include <crypto/cryptd.h>
15#include <crypto/internal/aead.h> 14#include <crypto/internal/aead.h>
15#include <crypto/internal/simd.h>
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h> 17#include <crypto/scatterwalk.h>
18#include <linux/module.h> 18#include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
242{ 242{
243} 243}
244 244
245static int cryptd_aegis128l_aesni_setkey(struct crypto_aead *aead, 245static struct aead_alg crypto_aegis128l_aesni_alg = {
246 const u8 *key, unsigned int keylen) 246 .setkey = crypto_aegis128l_aesni_setkey,
247{ 247 .setauthsize = crypto_aegis128l_aesni_setauthsize,
248 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 248 .encrypt = crypto_aegis128l_aesni_encrypt,
249 struct cryptd_aead *cryptd_tfm = *ctx; 249 .decrypt = crypto_aegis128l_aesni_decrypt,
250 250 .init = crypto_aegis128l_aesni_init_tfm,
251 return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); 251 .exit = crypto_aegis128l_aesni_exit_tfm,
252} 252
253 253 .ivsize = AEGIS128L_NONCE_SIZE,
254static int cryptd_aegis128l_aesni_setauthsize(struct crypto_aead *aead, 254 .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
255 unsigned int authsize) 255 .chunksize = AEGIS128L_BLOCK_SIZE,
256{ 256
257 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 257 .base = {
258 struct cryptd_aead *cryptd_tfm = *ctx; 258 .cra_flags = CRYPTO_ALG_INTERNAL,
259 259 .cra_blocksize = 1,
260 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); 260 .cra_ctxsize = sizeof(struct aegis_ctx) +
261} 261 __alignof__(struct aegis_ctx),
262 262 .cra_alignmask = 0,
263static int cryptd_aegis128l_aesni_encrypt(struct aead_request *req) 263 .cra_priority = 400,
264{ 264
265 struct crypto_aead *aead = crypto_aead_reqtfm(req); 265 .cra_name = "__aegis128l",
266 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 266 .cra_driver_name = "__aegis128l-aesni",
267 struct cryptd_aead *cryptd_tfm = *ctx; 267
268 268 .cra_module = THIS_MODULE,
269 aead = &cryptd_tfm->base;
270 if (irq_fpu_usable() && (!in_atomic() ||
271 !cryptd_aead_queued(cryptd_tfm)))
272 aead = cryptd_aead_child(cryptd_tfm);
273
274 aead_request_set_tfm(req, aead);
275
276 return crypto_aead_encrypt(req);
277}
278
279static int cryptd_aegis128l_aesni_decrypt(struct aead_request *req)
280{
281 struct crypto_aead *aead = crypto_aead_reqtfm(req);
282 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
283 struct cryptd_aead *cryptd_tfm = *ctx;
284
285 aead = &cryptd_tfm->base;
286 if (irq_fpu_usable() && (!in_atomic() ||
287 !cryptd_aead_queued(cryptd_tfm)))
288 aead = cryptd_aead_child(cryptd_tfm);
289
290 aead_request_set_tfm(req, aead);
291
292 return crypto_aead_decrypt(req);
293}
294
295static int cryptd_aegis128l_aesni_init_tfm(struct crypto_aead *aead)
296{
297 struct cryptd_aead *cryptd_tfm;
298 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
299
300 cryptd_tfm = cryptd_alloc_aead("__aegis128l-aesni", CRYPTO_ALG_INTERNAL,
301 CRYPTO_ALG_INTERNAL);
302 if (IS_ERR(cryptd_tfm))
303 return PTR_ERR(cryptd_tfm);
304
305 *ctx = cryptd_tfm;
306 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
307 return 0;
308}
309
310static void cryptd_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
311{
312 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
313
314 cryptd_free_aead(*ctx);
315}
316
317static struct aead_alg crypto_aegis128l_aesni_alg[] = {
318 {
319 .setkey = crypto_aegis128l_aesni_setkey,
320 .setauthsize = crypto_aegis128l_aesni_setauthsize,
321 .encrypt = crypto_aegis128l_aesni_encrypt,
322 .decrypt = crypto_aegis128l_aesni_decrypt,
323 .init = crypto_aegis128l_aesni_init_tfm,
324 .exit = crypto_aegis128l_aesni_exit_tfm,
325
326 .ivsize = AEGIS128L_NONCE_SIZE,
327 .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
328 .chunksize = AEGIS128L_BLOCK_SIZE,
329
330 .base = {
331 .cra_flags = CRYPTO_ALG_INTERNAL,
332 .cra_blocksize = 1,
333 .cra_ctxsize = sizeof(struct aegis_ctx) +
334 __alignof__(struct aegis_ctx),
335 .cra_alignmask = 0,
336
337 .cra_name = "__aegis128l",
338 .cra_driver_name = "__aegis128l-aesni",
339
340 .cra_module = THIS_MODULE,
341 }
342 }, {
343 .setkey = cryptd_aegis128l_aesni_setkey,
344 .setauthsize = cryptd_aegis128l_aesni_setauthsize,
345 .encrypt = cryptd_aegis128l_aesni_encrypt,
346 .decrypt = cryptd_aegis128l_aesni_decrypt,
347 .init = cryptd_aegis128l_aesni_init_tfm,
348 .exit = cryptd_aegis128l_aesni_exit_tfm,
349
350 .ivsize = AEGIS128L_NONCE_SIZE,
351 .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
352 .chunksize = AEGIS128L_BLOCK_SIZE,
353
354 .base = {
355 .cra_flags = CRYPTO_ALG_ASYNC,
356 .cra_blocksize = 1,
357 .cra_ctxsize = sizeof(struct cryptd_aead *),
358 .cra_alignmask = 0,
359
360 .cra_priority = 400,
361
362 .cra_name = "aegis128l",
363 .cra_driver_name = "aegis128l-aesni",
364
365 .cra_module = THIS_MODULE,
366 }
367 } 269 }
368}; 270};
369 271
272static struct simd_aead_alg *simd_alg;
273
370static int __init crypto_aegis128l_aesni_module_init(void) 274static int __init crypto_aegis128l_aesni_module_init(void)
371{ 275{
372 if (!boot_cpu_has(X86_FEATURE_XMM2) || 276 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis128l_aesni_module_init(void)
374 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 278 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
375 return -ENODEV; 279 return -ENODEV;
376 280
377 return crypto_register_aeads(crypto_aegis128l_aesni_alg, 281 return simd_register_aeads_compat(&crypto_aegis128l_aesni_alg, 1,
378 ARRAY_SIZE(crypto_aegis128l_aesni_alg)); 282 &simd_alg);
379} 283}
380 284
381static void __exit crypto_aegis128l_aesni_module_exit(void) 285static void __exit crypto_aegis128l_aesni_module_exit(void)
382{ 286{
383 crypto_unregister_aeads(crypto_aegis128l_aesni_alg, 287 simd_unregister_aeads(&crypto_aegis128l_aesni_alg, 1, &simd_alg);
384 ARRAY_SIZE(crypto_aegis128l_aesni_alg));
385} 288}
386 289
387module_init(crypto_aegis128l_aesni_module_init); 290module_init(crypto_aegis128l_aesni_module_init);
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 6227ca3220a0..716eecb66bd5 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -11,8 +11,8 @@
11 * any later version. 11 * any later version.
12 */ 12 */
13 13
14#include <crypto/cryptd.h>
15#include <crypto/internal/aead.h> 14#include <crypto/internal/aead.h>
15#include <crypto/internal/simd.h>
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h> 17#include <crypto/scatterwalk.h>
18#include <linux/module.h> 18#include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
242{ 242{
243} 243}
244 244
245static int cryptd_aegis256_aesni_setkey(struct crypto_aead *aead, 245static struct aead_alg crypto_aegis256_aesni_alg = {
246 const u8 *key, unsigned int keylen) 246 .setkey = crypto_aegis256_aesni_setkey,
247{ 247 .setauthsize = crypto_aegis256_aesni_setauthsize,
248 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 248 .encrypt = crypto_aegis256_aesni_encrypt,
249 struct cryptd_aead *cryptd_tfm = *ctx; 249 .decrypt = crypto_aegis256_aesni_decrypt,
250 250 .init = crypto_aegis256_aesni_init_tfm,
251 return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); 251 .exit = crypto_aegis256_aesni_exit_tfm,
252} 252
253 253 .ivsize = AEGIS256_NONCE_SIZE,
254static int cryptd_aegis256_aesni_setauthsize(struct crypto_aead *aead, 254 .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
255 unsigned int authsize) 255 .chunksize = AEGIS256_BLOCK_SIZE,
256{ 256
257 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 257 .base = {
258 struct cryptd_aead *cryptd_tfm = *ctx; 258 .cra_flags = CRYPTO_ALG_INTERNAL,
259 259 .cra_blocksize = 1,
260 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); 260 .cra_ctxsize = sizeof(struct aegis_ctx) +
261} 261 __alignof__(struct aegis_ctx),
262 262 .cra_alignmask = 0,
263static int cryptd_aegis256_aesni_encrypt(struct aead_request *req) 263 .cra_priority = 400,
264{ 264
265 struct crypto_aead *aead = crypto_aead_reqtfm(req); 265 .cra_name = "__aegis256",
266 struct cryptd_aead **ctx = crypto_aead_ctx(aead); 266 .cra_driver_name = "__aegis256-aesni",
267 struct cryptd_aead *cryptd_tfm = *ctx; 267
268 268 .cra_module = THIS_MODULE,
269 aead = &cryptd_tfm->base;
270 if (irq_fpu_usable() && (!in_atomic() ||
271 !cryptd_aead_queued(cryptd_tfm)))
272 aead = cryptd_aead_child(cryptd_tfm);
273
274 aead_request_set_tfm(req, aead);
275
276 return crypto_aead_encrypt(req);
277}
278
279static int cryptd_aegis256_aesni_decrypt(struct aead_request *req)
280{
281 struct crypto_aead *aead = crypto_aead_reqtfm(req);
282 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
283 struct cryptd_aead *cryptd_tfm = *ctx;
284
285 aead = &cryptd_tfm->base;
286 if (irq_fpu_usable() && (!in_atomic() ||
287 !cryptd_aead_queued(cryptd_tfm)))
288 aead = cryptd_aead_child(cryptd_tfm);
289
290 aead_request_set_tfm(req, aead);
291
292 return crypto_aead_decrypt(req);
293}
294
295static int cryptd_aegis256_aesni_init_tfm(struct crypto_aead *aead)
296{
297 struct cryptd_aead *cryptd_tfm;
298 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
299
300 cryptd_tfm = cryptd_alloc_aead("__aegis256-aesni", CRYPTO_ALG_INTERNAL,
301 CRYPTO_ALG_INTERNAL);
302 if (IS_ERR(cryptd_tfm))
303 return PTR_ERR(cryptd_tfm);
304
305 *ctx = cryptd_tfm;
306 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
307 return 0;
308}
309
310static void cryptd_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
311{
312 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
313
314 cryptd_free_aead(*ctx);
315}
316
317static struct aead_alg crypto_aegis256_aesni_alg[] = {
318 {
319 .setkey = crypto_aegis256_aesni_setkey,
320 .setauthsize = crypto_aegis256_aesni_setauthsize,
321 .encrypt = crypto_aegis256_aesni_encrypt,
322 .decrypt = crypto_aegis256_aesni_decrypt,
323 .init = crypto_aegis256_aesni_init_tfm,
324 .exit = crypto_aegis256_aesni_exit_tfm,
325
326 .ivsize = AEGIS256_NONCE_SIZE,
327 .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
328 .chunksize = AEGIS256_BLOCK_SIZE,
329
330 .base = {
331 .cra_flags = CRYPTO_ALG_INTERNAL,
332 .cra_blocksize = 1,
333 .cra_ctxsize = sizeof(struct aegis_ctx) +
334 __alignof__(struct aegis_ctx),
335 .cra_alignmask = 0,
336
337 .cra_name = "__aegis256",
338 .cra_driver_name = "__aegis256-aesni",
339
340 .cra_module = THIS_MODULE,
341 }
342 }, {
343 .setkey = cryptd_aegis256_aesni_setkey,
344 .setauthsize = cryptd_aegis256_aesni_setauthsize,
345 .encrypt = cryptd_aegis256_aesni_encrypt,
346 .decrypt = cryptd_aegis256_aesni_decrypt,
347 .init = cryptd_aegis256_aesni_init_tfm,
348 .exit = cryptd_aegis256_aesni_exit_tfm,
349
350 .ivsize = AEGIS256_NONCE_SIZE,
351 .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
352 .chunksize = AEGIS256_BLOCK_SIZE,
353
354 .base = {
355 .cra_flags = CRYPTO_ALG_ASYNC,
356 .cra_blocksize = 1,
357 .cra_ctxsize = sizeof(struct cryptd_aead *),
358 .cra_alignmask = 0,
359
360 .cra_priority = 400,
361
362 .cra_name = "aegis256",
363 .cra_driver_name = "aegis256-aesni",
364
365 .cra_module = THIS_MODULE,
366 }
367 } 269 }
368}; 270};
369 271
272static struct simd_aead_alg *simd_alg;
273
370static int __init crypto_aegis256_aesni_module_init(void) 274static int __init crypto_aegis256_aesni_module_init(void)
371{ 275{
372 if (!boot_cpu_has(X86_FEATURE_XMM2) || 276 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis256_aesni_module_init(void)
374 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 278 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
375 return -ENODEV; 279 return -ENODEV;
376 280
377 return crypto_register_aeads(crypto_aegis256_aesni_alg, 281 return simd_register_aeads_compat(&crypto_aegis256_aesni_alg, 1,
378 ARRAY_SIZE(crypto_aegis256_aesni_alg)); 282 &simd_alg);
379} 283}
380 284
381static void __exit crypto_aegis256_aesni_module_exit(void) 285static void __exit crypto_aegis256_aesni_module_exit(void)
382{ 286{
383 crypto_unregister_aeads(crypto_aegis256_aesni_alg, 287 simd_unregister_aeads(&crypto_aegis256_aesni_alg, 1, &simd_alg);
384 ARRAY_SIZE(crypto_aegis256_aesni_alg));
385} 288}
386 289
387module_init(crypto_aegis256_aesni_module_init); 290module_init(crypto_aegis256_aesni_module_init);
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 1e3d2102033a..21c246799aa5 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -25,14 +25,13 @@
25#include <linux/err.h> 25#include <linux/err.h>
26#include <crypto/algapi.h> 26#include <crypto/algapi.h>
27#include <crypto/aes.h> 27#include <crypto/aes.h>
28#include <crypto/cryptd.h>
29#include <crypto/ctr.h> 28#include <crypto/ctr.h>
30#include <crypto/b128ops.h> 29#include <crypto/b128ops.h>
31#include <crypto/gcm.h> 30#include <crypto/gcm.h>
32#include <crypto/xts.h> 31#include <crypto/xts.h>
33#include <asm/cpu_device_id.h> 32#include <asm/cpu_device_id.h>
34#include <asm/fpu/api.h>
35#include <asm/crypto/aes.h> 33#include <asm/crypto/aes.h>
34#include <asm/simd.h>
36#include <crypto/scatterwalk.h> 35#include <crypto/scatterwalk.h>
37#include <crypto/internal/aead.h> 36#include <crypto/internal/aead.h>
38#include <crypto/internal/simd.h> 37#include <crypto/internal/simd.h>
@@ -333,7 +332,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
333 return -EINVAL; 332 return -EINVAL;
334 } 333 }
335 334
336 if (!irq_fpu_usable()) 335 if (!crypto_simd_usable())
337 err = crypto_aes_expand_key(ctx, in_key, key_len); 336 err = crypto_aes_expand_key(ctx, in_key, key_len);
338 else { 337 else {
339 kernel_fpu_begin(); 338 kernel_fpu_begin();
@@ -354,7 +353,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
354{ 353{
355 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 354 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
356 355
357 if (!irq_fpu_usable()) 356 if (!crypto_simd_usable())
358 crypto_aes_encrypt_x86(ctx, dst, src); 357 crypto_aes_encrypt_x86(ctx, dst, src);
359 else { 358 else {
360 kernel_fpu_begin(); 359 kernel_fpu_begin();
@@ -367,7 +366,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
367{ 366{
368 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 367 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
369 368
370 if (!irq_fpu_usable()) 369 if (!crypto_simd_usable())
371 crypto_aes_decrypt_x86(ctx, dst, src); 370 crypto_aes_decrypt_x86(ctx, dst, src);
372 else { 371 else {
373 kernel_fpu_begin(); 372 kernel_fpu_begin();
@@ -643,29 +642,6 @@ static int xts_decrypt(struct skcipher_request *req)
643 aes_ctx(ctx->raw_crypt_ctx)); 642 aes_ctx(ctx->raw_crypt_ctx));
644} 643}
645 644
646static int rfc4106_init(struct crypto_aead *aead)
647{
648 struct cryptd_aead *cryptd_tfm;
649 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
650
651 cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
652 CRYPTO_ALG_INTERNAL,
653 CRYPTO_ALG_INTERNAL);
654 if (IS_ERR(cryptd_tfm))
655 return PTR_ERR(cryptd_tfm);
656
657 *ctx = cryptd_tfm;
658 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
659 return 0;
660}
661
662static void rfc4106_exit(struct crypto_aead *aead)
663{
664 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
665
666 cryptd_free_aead(*ctx);
667}
668
669static int 645static int
670rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) 646rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
671{ 647{
@@ -710,15 +686,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
710 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 686 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
711} 687}
712 688
713static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key, 689/* This is the Integrity Check Value (aka the authentication tag) length and can
714 unsigned int key_len) 690 * be 8, 12 or 16 bytes long. */
715{
716 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
717 struct cryptd_aead *cryptd_tfm = *ctx;
718
719 return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
720}
721
722static int common_rfc4106_set_authsize(struct crypto_aead *aead, 691static int common_rfc4106_set_authsize(struct crypto_aead *aead,
723 unsigned int authsize) 692 unsigned int authsize)
724{ 693{
@@ -734,17 +703,6 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
734 return 0; 703 return 0;
735} 704}
736 705
737/* This is the Integrity Check Value (aka the authentication tag length and can
738 * be 8, 12 or 16 bytes long. */
739static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
740 unsigned int authsize)
741{
742 struct cryptd_aead **ctx = crypto_aead_ctx(parent);
743 struct cryptd_aead *cryptd_tfm = *ctx;
744
745 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
746}
747
748static int generic_gcmaes_set_authsize(struct crypto_aead *tfm, 706static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
749 unsigned int authsize) 707 unsigned int authsize)
750{ 708{
@@ -964,38 +922,6 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
964 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv, 922 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
965 aes_ctx); 923 aes_ctx);
966} 924}
967
968static int gcmaes_wrapper_encrypt(struct aead_request *req)
969{
970 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
971 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
972 struct cryptd_aead *cryptd_tfm = *ctx;
973
974 tfm = &cryptd_tfm->base;
975 if (irq_fpu_usable() && (!in_atomic() ||
976 !cryptd_aead_queued(cryptd_tfm)))
977 tfm = cryptd_aead_child(cryptd_tfm);
978
979 aead_request_set_tfm(req, tfm);
980
981 return crypto_aead_encrypt(req);
982}
983
984static int gcmaes_wrapper_decrypt(struct aead_request *req)
985{
986 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
987 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
988 struct cryptd_aead *cryptd_tfm = *ctx;
989
990 tfm = &cryptd_tfm->base;
991 if (irq_fpu_usable() && (!in_atomic() ||
992 !cryptd_aead_queued(cryptd_tfm)))
993 tfm = cryptd_aead_child(cryptd_tfm);
994
995 aead_request_set_tfm(req, tfm);
996
997 return crypto_aead_decrypt(req);
998}
999#endif 925#endif
1000 926
1001static struct crypto_alg aesni_algs[] = { { 927static struct crypto_alg aesni_algs[] = { {
@@ -1148,31 +1074,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
1148 aes_ctx); 1074 aes_ctx);
1149} 1075}
1150 1076
1151static int generic_gcmaes_init(struct crypto_aead *aead) 1077static struct aead_alg aesni_aeads[] = { {
1152{
1153 struct cryptd_aead *cryptd_tfm;
1154 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1155
1156 cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
1157 CRYPTO_ALG_INTERNAL,
1158 CRYPTO_ALG_INTERNAL);
1159 if (IS_ERR(cryptd_tfm))
1160 return PTR_ERR(cryptd_tfm);
1161
1162 *ctx = cryptd_tfm;
1163 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
1164
1165 return 0;
1166}
1167
1168static void generic_gcmaes_exit(struct crypto_aead *aead)
1169{
1170 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1171
1172 cryptd_free_aead(*ctx);
1173}
1174
1175static struct aead_alg aesni_aead_algs[] = { {
1176 .setkey = common_rfc4106_set_key, 1078 .setkey = common_rfc4106_set_key,
1177 .setauthsize = common_rfc4106_set_authsize, 1079 .setauthsize = common_rfc4106_set_authsize,
1178 .encrypt = helper_rfc4106_encrypt, 1080 .encrypt = helper_rfc4106_encrypt,
@@ -1180,8 +1082,9 @@ static struct aead_alg aesni_aead_algs[] = { {
1180 .ivsize = GCM_RFC4106_IV_SIZE, 1082 .ivsize = GCM_RFC4106_IV_SIZE,
1181 .maxauthsize = 16, 1083 .maxauthsize = 16,
1182 .base = { 1084 .base = {
1183 .cra_name = "__gcm-aes-aesni", 1085 .cra_name = "__rfc4106(gcm(aes))",
1184 .cra_driver_name = "__driver-gcm-aes-aesni", 1086 .cra_driver_name = "__rfc4106-gcm-aesni",
1087 .cra_priority = 400,
1185 .cra_flags = CRYPTO_ALG_INTERNAL, 1088 .cra_flags = CRYPTO_ALG_INTERNAL,
1186 .cra_blocksize = 1, 1089 .cra_blocksize = 1,
1187 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx), 1090 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
@@ -1189,24 +1092,6 @@ static struct aead_alg aesni_aead_algs[] = { {
1189 .cra_module = THIS_MODULE, 1092 .cra_module = THIS_MODULE,
1190 }, 1093 },
1191}, { 1094}, {
1192 .init = rfc4106_init,
1193 .exit = rfc4106_exit,
1194 .setkey = gcmaes_wrapper_set_key,
1195 .setauthsize = gcmaes_wrapper_set_authsize,
1196 .encrypt = gcmaes_wrapper_encrypt,
1197 .decrypt = gcmaes_wrapper_decrypt,
1198 .ivsize = GCM_RFC4106_IV_SIZE,
1199 .maxauthsize = 16,
1200 .base = {
1201 .cra_name = "rfc4106(gcm(aes))",
1202 .cra_driver_name = "rfc4106-gcm-aesni",
1203 .cra_priority = 400,
1204 .cra_flags = CRYPTO_ALG_ASYNC,
1205 .cra_blocksize = 1,
1206 .cra_ctxsize = sizeof(struct cryptd_aead *),
1207 .cra_module = THIS_MODULE,
1208 },
1209}, {
1210 .setkey = generic_gcmaes_set_key, 1095 .setkey = generic_gcmaes_set_key,
1211 .setauthsize = generic_gcmaes_set_authsize, 1096 .setauthsize = generic_gcmaes_set_authsize,
1212 .encrypt = generic_gcmaes_encrypt, 1097 .encrypt = generic_gcmaes_encrypt,
@@ -1214,38 +1099,21 @@ static struct aead_alg aesni_aead_algs[] = { {
1214 .ivsize = GCM_AES_IV_SIZE, 1099 .ivsize = GCM_AES_IV_SIZE,
1215 .maxauthsize = 16, 1100 .maxauthsize = 16,
1216 .base = { 1101 .base = {
1217 .cra_name = "__generic-gcm-aes-aesni", 1102 .cra_name = "__gcm(aes)",
1218 .cra_driver_name = "__driver-generic-gcm-aes-aesni", 1103 .cra_driver_name = "__generic-gcm-aesni",
1219 .cra_priority = 0, 1104 .cra_priority = 400,
1220 .cra_flags = CRYPTO_ALG_INTERNAL, 1105 .cra_flags = CRYPTO_ALG_INTERNAL,
1221 .cra_blocksize = 1, 1106 .cra_blocksize = 1,
1222 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), 1107 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
1223 .cra_alignmask = AESNI_ALIGN - 1, 1108 .cra_alignmask = AESNI_ALIGN - 1,
1224 .cra_module = THIS_MODULE, 1109 .cra_module = THIS_MODULE,
1225 }, 1110 },
1226}, {
1227 .init = generic_gcmaes_init,
1228 .exit = generic_gcmaes_exit,
1229 .setkey = gcmaes_wrapper_set_key,
1230 .setauthsize = gcmaes_wrapper_set_authsize,
1231 .encrypt = gcmaes_wrapper_encrypt,
1232 .decrypt = gcmaes_wrapper_decrypt,
1233 .ivsize = GCM_AES_IV_SIZE,
1234 .maxauthsize = 16,
1235 .base = {
1236 .cra_name = "gcm(aes)",
1237 .cra_driver_name = "generic-gcm-aesni",
1238 .cra_priority = 400,
1239 .cra_flags = CRYPTO_ALG_ASYNC,
1240 .cra_blocksize = 1,
1241 .cra_ctxsize = sizeof(struct cryptd_aead *),
1242 .cra_module = THIS_MODULE,
1243 },
1244} }; 1111} };
1245#else 1112#else
1246static struct aead_alg aesni_aead_algs[0]; 1113static struct aead_alg aesni_aeads[0];
1247#endif 1114#endif
1248 1115
1116static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1249 1117
1250static const struct x86_cpu_id aesni_cpu_id[] = { 1118static const struct x86_cpu_id aesni_cpu_id[] = {
1251 X86_FEATURE_MATCH(X86_FEATURE_AES), 1119 X86_FEATURE_MATCH(X86_FEATURE_AES),
@@ -1253,23 +1121,9 @@ static const struct x86_cpu_id aesni_cpu_id[] = {
1253}; 1121};
1254MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id); 1122MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1255 1123
1256static void aesni_free_simds(void)
1257{
1258 int i;
1259
1260 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1261 aesni_simd_skciphers[i]; i++)
1262 simd_skcipher_free(aesni_simd_skciphers[i]);
1263}
1264
1265static int __init aesni_init(void) 1124static int __init aesni_init(void)
1266{ 1125{
1267 struct simd_skcipher_alg *simd;
1268 const char *basename;
1269 const char *algname;
1270 const char *drvname;
1271 int err; 1126 int err;
1272 int i;
1273 1127
1274 if (!x86_match_cpu(aesni_cpu_id)) 1128 if (!x86_match_cpu(aesni_cpu_id))
1275 return -ENODEV; 1129 return -ENODEV;
@@ -1304,36 +1158,22 @@ static int __init aesni_init(void)
1304 if (err) 1158 if (err)
1305 return err; 1159 return err;
1306 1160
1307 err = crypto_register_skciphers(aesni_skciphers, 1161 err = simd_register_skciphers_compat(aesni_skciphers,
1308 ARRAY_SIZE(aesni_skciphers)); 1162 ARRAY_SIZE(aesni_skciphers),
1163 aesni_simd_skciphers);
1309 if (err) 1164 if (err)
1310 goto unregister_algs; 1165 goto unregister_algs;
1311 1166
1312 err = crypto_register_aeads(aesni_aead_algs, 1167 err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1313 ARRAY_SIZE(aesni_aead_algs)); 1168 aesni_simd_aeads);
1314 if (err) 1169 if (err)
1315 goto unregister_skciphers; 1170 goto unregister_skciphers;
1316 1171
1317 for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1318 algname = aesni_skciphers[i].base.cra_name + 2;
1319 drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1320 basename = aesni_skciphers[i].base.cra_driver_name;
1321 simd = simd_skcipher_create_compat(algname, drvname, basename);
1322 err = PTR_ERR(simd);
1323 if (IS_ERR(simd))
1324 goto unregister_simds;
1325
1326 aesni_simd_skciphers[i] = simd;
1327 }
1328
1329 return 0; 1172 return 0;
1330 1173
1331unregister_simds:
1332 aesni_free_simds();
1333 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1334unregister_skciphers: 1174unregister_skciphers:
1335 crypto_unregister_skciphers(aesni_skciphers, 1175 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1336 ARRAY_SIZE(aesni_skciphers)); 1176 aesni_simd_skciphers);
1337unregister_algs: 1177unregister_algs:
1338 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1178 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1339 return err; 1179 return err;
@@ -1341,10 +1181,10 @@ unregister_algs:
1341 1181
1342static void __exit aesni_exit(void) 1182static void __exit aesni_exit(void)
1343{ 1183{
1344 aesni_free_simds(); 1184 simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1345 crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); 1185 aesni_simd_aeads);
1346 crypto_unregister_skciphers(aesni_skciphers, 1186 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1347 ARRAY_SIZE(aesni_skciphers)); 1187 aesni_simd_skciphers);
1348 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); 1188 crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1349} 1189}
1350 1190
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index 45c1c4143176..4967ad620775 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -12,10 +12,10 @@
12 12
13#include <crypto/algapi.h> 13#include <crypto/algapi.h>
14#include <crypto/chacha.h> 14#include <crypto/chacha.h>
15#include <crypto/internal/simd.h>
15#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <asm/fpu/api.h>
19#include <asm/simd.h> 19#include <asm/simd.h>
20 20
21#define CHACHA_STATE_ALIGN 16 21#define CHACHA_STATE_ALIGN 16
@@ -170,7 +170,7 @@ static int chacha_simd(struct skcipher_request *req)
170 struct skcipher_walk walk; 170 struct skcipher_walk walk;
171 int err; 171 int err;
172 172
173 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable()) 173 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
174 return crypto_chacha_crypt(req); 174 return crypto_chacha_crypt(req);
175 175
176 err = skcipher_walk_virt(&walk, req, true); 176 err = skcipher_walk_virt(&walk, req, true);
@@ -193,7 +193,7 @@ static int xchacha_simd(struct skcipher_request *req)
193 u8 real_iv[16]; 193 u8 real_iv[16];
194 int err; 194 int err;
195 195
196 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable()) 196 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
197 return crypto_xchacha_crypt(req); 197 return crypto_xchacha_crypt(req);
198 198
199 err = skcipher_walk_virt(&walk, req, true); 199 err = skcipher_walk_virt(&walk, req, true);
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index c8d9cdacbf10..cb4ab6645106 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -32,10 +32,11 @@
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/crc32.h> 33#include <linux/crc32.h>
34#include <crypto/internal/hash.h> 34#include <crypto/internal/hash.h>
35#include <crypto/internal/simd.h>
35 36
36#include <asm/cpufeatures.h> 37#include <asm/cpufeatures.h>
37#include <asm/cpu_device_id.h> 38#include <asm/cpu_device_id.h>
38#include <asm/fpu/api.h> 39#include <asm/simd.h>
39 40
40#define CHKSUM_BLOCK_SIZE 1 41#define CHKSUM_BLOCK_SIZE 1
41#define CHKSUM_DIGEST_SIZE 4 42#define CHKSUM_DIGEST_SIZE 4
@@ -54,7 +55,7 @@ static u32 __attribute__((pure))
54 unsigned int iremainder; 55 unsigned int iremainder;
55 unsigned int prealign; 56 unsigned int prealign;
56 57
57 if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !irq_fpu_usable()) 58 if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !crypto_simd_usable())
58 return crc32_le(crc, p, len); 59 return crc32_le(crc, p, len);
59 60
60 if ((long)p & SCALE_F_MASK) { 61 if ((long)p & SCALE_F_MASK) {
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 5773e1161072..a58fe217c856 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -29,10 +29,11 @@
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <crypto/internal/hash.h> 31#include <crypto/internal/hash.h>
32#include <crypto/internal/simd.h>
32 33
33#include <asm/cpufeatures.h> 34#include <asm/cpufeatures.h>
34#include <asm/cpu_device_id.h> 35#include <asm/cpu_device_id.h>
35#include <asm/fpu/internal.h> 36#include <asm/simd.h>
36 37
37#define CHKSUM_BLOCK_SIZE 1 38#define CHKSUM_BLOCK_SIZE 1
38#define CHKSUM_DIGEST_SIZE 4 39#define CHKSUM_DIGEST_SIZE 4
@@ -177,7 +178,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
177 * use faster PCL version if datasize is large enough to 178 * use faster PCL version if datasize is large enough to
178 * overcome kernel fpu state save/restore overhead 179 * overcome kernel fpu state save/restore overhead
179 */ 180 */
180 if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) { 181 if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
181 kernel_fpu_begin(); 182 kernel_fpu_begin();
182 *crcp = crc_pcl(data, len, *crcp); 183 *crcp = crc_pcl(data, len, *crcp);
183 kernel_fpu_end(); 184 kernel_fpu_end();
@@ -189,7 +190,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
189static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len, 190static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
190 u8 *out) 191 u8 *out)
191{ 192{
192 if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) { 193 if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
193 kernel_fpu_begin(); 194 kernel_fpu_begin();
194 *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp)); 195 *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
195 kernel_fpu_end(); 196 kernel_fpu_end();
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index 0e785c0b2354..3c81e15b0873 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -26,12 +26,13 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/crc-t10dif.h> 27#include <linux/crc-t10dif.h>
28#include <crypto/internal/hash.h> 28#include <crypto/internal/hash.h>
29#include <crypto/internal/simd.h>
29#include <linux/init.h> 30#include <linux/init.h>
30#include <linux/string.h> 31#include <linux/string.h>
31#include <linux/kernel.h> 32#include <linux/kernel.h>
32#include <asm/fpu/api.h>
33#include <asm/cpufeatures.h> 33#include <asm/cpufeatures.h>
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35#include <asm/simd.h>
35 36
36asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len); 37asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
37 38
@@ -53,7 +54,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
53{ 54{
54 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); 55 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
55 56
56 if (length >= 16 && irq_fpu_usable()) { 57 if (length >= 16 && crypto_simd_usable()) {
57 kernel_fpu_begin(); 58 kernel_fpu_begin();
58 ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); 59 ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
59 kernel_fpu_end(); 60 kernel_fpu_end();
@@ -70,15 +71,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
70 return 0; 71 return 0;
71} 72}
72 73
73static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, 74static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
74 u8 *out)
75{ 75{
76 if (len >= 16 && irq_fpu_usable()) { 76 if (len >= 16 && crypto_simd_usable()) {
77 kernel_fpu_begin(); 77 kernel_fpu_begin();
78 *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); 78 *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
79 kernel_fpu_end(); 79 kernel_fpu_end();
80 } else 80 } else
81 *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); 81 *(__u16 *)out = crc_t10dif_generic(crc, data, len);
82 return 0; 82 return 0;
83} 83}
84 84
@@ -87,15 +87,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
87{ 87{
88 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); 88 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
89 89
90 return __chksum_finup(&ctx->crc, data, len, out); 90 return __chksum_finup(ctx->crc, data, len, out);
91} 91}
92 92
93static int chksum_digest(struct shash_desc *desc, const u8 *data, 93static int chksum_digest(struct shash_desc *desc, const u8 *data,
94 unsigned int length, u8 *out) 94 unsigned int length, u8 *out)
95{ 95{
96 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); 96 return __chksum_finup(0, data, length, out);
97
98 return __chksum_finup(&ctx->crc, data, length, out);
99} 97}
100 98
101static struct shash_alg alg = { 99static struct shash_alg alg = {
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 3582ae885ee1..e3f3e6fd9d65 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -19,8 +19,9 @@
19#include <crypto/cryptd.h> 19#include <crypto/cryptd.h>
20#include <crypto/gf128mul.h> 20#include <crypto/gf128mul.h>
21#include <crypto/internal/hash.h> 21#include <crypto/internal/hash.h>
22#include <asm/fpu/api.h> 22#include <crypto/internal/simd.h>
23#include <asm/cpu_device_id.h> 23#include <asm/cpu_device_id.h>
24#include <asm/simd.h>
24 25
25#define GHASH_BLOCK_SIZE 16 26#define GHASH_BLOCK_SIZE 16
26#define GHASH_DIGEST_SIZE 16 27#define GHASH_DIGEST_SIZE 16
@@ -171,7 +172,6 @@ static int ghash_async_init(struct ahash_request *req)
171 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); 172 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
172 173
173 desc->tfm = child; 174 desc->tfm = child;
174 desc->flags = req->base.flags;
175 return crypto_shash_init(desc); 175 return crypto_shash_init(desc);
176} 176}
177 177
@@ -182,7 +182,7 @@ static int ghash_async_update(struct ahash_request *req)
182 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); 182 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
183 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 183 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
184 184
185 if (!irq_fpu_usable() || 185 if (!crypto_simd_usable() ||
186 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { 186 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
187 memcpy(cryptd_req, req, sizeof(*req)); 187 memcpy(cryptd_req, req, sizeof(*req));
188 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 188 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -200,7 +200,7 @@ static int ghash_async_final(struct ahash_request *req)
200 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); 200 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
201 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 201 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
202 202
203 if (!irq_fpu_usable() || 203 if (!crypto_simd_usable() ||
204 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { 204 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
205 memcpy(cryptd_req, req, sizeof(*req)); 205 memcpy(cryptd_req, req, sizeof(*req));
206 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 206 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -241,7 +241,7 @@ static int ghash_async_digest(struct ahash_request *req)
241 struct ahash_request *cryptd_req = ahash_request_ctx(req); 241 struct ahash_request *cryptd_req = ahash_request_ctx(req);
242 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 242 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
243 243
244 if (!irq_fpu_usable() || 244 if (!crypto_simd_usable() ||
245 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { 245 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
246 memcpy(cryptd_req, req, sizeof(*req)); 246 memcpy(cryptd_req, req, sizeof(*req));
247 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 247 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -251,7 +251,6 @@ static int ghash_async_digest(struct ahash_request *req)
251 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); 251 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
252 252
253 desc->tfm = child; 253 desc->tfm = child;
254 desc->flags = req->base.flags;
255 return shash_ahash_digest(req, desc); 254 return shash_ahash_digest(req, desc);
256 } 255 }
257} 256}
diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c
index 6634907d6ccd..679627a2a824 100644
--- a/arch/x86/crypto/morus1280-avx2-glue.c
+++ b/arch/x86/crypto/morus1280-avx2-glue.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <crypto/internal/aead.h> 14#include <crypto/internal/aead.h>
15#include <crypto/internal/simd.h>
15#include <crypto/morus1280_glue.h> 16#include <crypto/morus1280_glue.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <asm/fpu/api.h> 18#include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_avx2_dec_tail(void *state, const void *src,
35asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor, 36asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
36 u64 assoclen, u64 cryptlen); 37 u64 assoclen, u64 cryptlen);
37 38
38MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400); 39MORUS1280_DECLARE_ALG(avx2, "morus1280-avx2", 400);
40
41static struct simd_aead_alg *simd_alg;
39 42
40static int __init crypto_morus1280_avx2_module_init(void) 43static int __init crypto_morus1280_avx2_module_init(void)
41{ 44{
@@ -44,14 +47,13 @@ static int __init crypto_morus1280_avx2_module_init(void)
44 !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) 47 !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
45 return -ENODEV; 48 return -ENODEV;
46 49
47 return crypto_register_aeads(crypto_morus1280_avx2_algs, 50 return simd_register_aeads_compat(&crypto_morus1280_avx2_alg, 1,
48 ARRAY_SIZE(crypto_morus1280_avx2_algs)); 51 &simd_alg);
49} 52}
50 53
51static void __exit crypto_morus1280_avx2_module_exit(void) 54static void __exit crypto_morus1280_avx2_module_exit(void)
52{ 55{
53 crypto_unregister_aeads(crypto_morus1280_avx2_algs, 56 simd_unregister_aeads(&crypto_morus1280_avx2_alg, 1, &simd_alg);
54 ARRAY_SIZE(crypto_morus1280_avx2_algs));
55} 57}
56 58
57module_init(crypto_morus1280_avx2_module_init); 59module_init(crypto_morus1280_avx2_module_init);
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index f40244eaf14d..c35c0638d0bb 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <crypto/internal/aead.h> 14#include <crypto/internal/aead.h>
15#include <crypto/internal/simd.h>
15#include <crypto/morus1280_glue.h> 16#include <crypto/morus1280_glue.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <asm/fpu/api.h> 18#include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_sse2_dec_tail(void *state, const void *src,
35asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor, 36asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
36 u64 assoclen, u64 cryptlen); 37 u64 assoclen, u64 cryptlen);
37 38
38MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350); 39MORUS1280_DECLARE_ALG(sse2, "morus1280-sse2", 350);
40
41static struct simd_aead_alg *simd_alg;
39 42
40static int __init crypto_morus1280_sse2_module_init(void) 43static int __init crypto_morus1280_sse2_module_init(void)
41{ 44{
@@ -43,14 +46,13 @@ static int __init crypto_morus1280_sse2_module_init(void)
43 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 46 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
44 return -ENODEV; 47 return -ENODEV;
45 48
46 return crypto_register_aeads(crypto_morus1280_sse2_algs, 49 return simd_register_aeads_compat(&crypto_morus1280_sse2_alg, 1,
47 ARRAY_SIZE(crypto_morus1280_sse2_algs)); 50 &simd_alg);
48} 51}
49 52
50static void __exit crypto_morus1280_sse2_module_exit(void) 53static void __exit crypto_morus1280_sse2_module_exit(void)
51{ 54{
52 crypto_unregister_aeads(crypto_morus1280_sse2_algs, 55 simd_unregister_aeads(&crypto_morus1280_sse2_alg, 1, &simd_alg);
53 ARRAY_SIZE(crypto_morus1280_sse2_algs));
54} 56}
55 57
56module_init(crypto_morus1280_sse2_module_init); 58module_init(crypto_morus1280_sse2_module_init);
diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
index 7e600f8bcdad..30fc1bd98ec3 100644
--- a/arch/x86/crypto/morus1280_glue.c
+++ b/arch/x86/crypto/morus1280_glue.c
@@ -11,7 +11,6 @@
11 * any later version. 11 * any later version.
12 */ 12 */
13 13
14#include <crypto/cryptd.h>
15#include <crypto/internal/aead.h> 14#include <crypto/internal/aead.h>
16#include <crypto/internal/skcipher.h> 15#include <crypto/internal/skcipher.h>
17#include <crypto/morus1280_glue.h> 16#include <crypto/morus1280_glue.h>
@@ -205,90 +204,6 @@ void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
205} 204}
206EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops); 205EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops);
207 206
208int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
209 unsigned int keylen)
210{
211 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
212 struct cryptd_aead *cryptd_tfm = *ctx;
213
214 return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
215}
216EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey);
217
218int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
219 unsigned int authsize)
220{
221 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
222 struct cryptd_aead *cryptd_tfm = *ctx;
223
224 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
225}
226EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize);
227
228int cryptd_morus1280_glue_encrypt(struct aead_request *req)
229{
230 struct crypto_aead *aead = crypto_aead_reqtfm(req);
231 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
232 struct cryptd_aead *cryptd_tfm = *ctx;
233
234 aead = &cryptd_tfm->base;
235 if (irq_fpu_usable() && (!in_atomic() ||
236 !cryptd_aead_queued(cryptd_tfm)))
237 aead = cryptd_aead_child(cryptd_tfm);
238
239 aead_request_set_tfm(req, aead);
240
241 return crypto_aead_encrypt(req);
242}
243EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt);
244
245int cryptd_morus1280_glue_decrypt(struct aead_request *req)
246{
247 struct crypto_aead *aead = crypto_aead_reqtfm(req);
248 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
249 struct cryptd_aead *cryptd_tfm = *ctx;
250
251 aead = &cryptd_tfm->base;
252 if (irq_fpu_usable() && (!in_atomic() ||
253 !cryptd_aead_queued(cryptd_tfm)))
254 aead = cryptd_aead_child(cryptd_tfm);
255
256 aead_request_set_tfm(req, aead);
257
258 return crypto_aead_decrypt(req);
259}
260EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt);
261
262int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead)
263{
264 struct cryptd_aead *cryptd_tfm;
265 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
266 const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
267 char internal_name[CRYPTO_MAX_ALG_NAME];
268
269 if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
270 >= CRYPTO_MAX_ALG_NAME)
271 return -ENAMETOOLONG;
272
273 cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
274 CRYPTO_ALG_INTERNAL);
275 if (IS_ERR(cryptd_tfm))
276 return PTR_ERR(cryptd_tfm);
277
278 *ctx = cryptd_tfm;
279 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
280 return 0;
281}
282EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm);
283
284void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead)
285{
286 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
287
288 cryptd_free_aead(*ctx);
289}
290EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm);
291
292MODULE_LICENSE("GPL"); 207MODULE_LICENSE("GPL");
293MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); 208MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
294MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations"); 209MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 9afaf8f8565a..32da56b3bdad 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <crypto/internal/aead.h> 14#include <crypto/internal/aead.h>
15#include <crypto/internal/simd.h>
15#include <crypto/morus640_glue.h> 16#include <crypto/morus640_glue.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <asm/fpu/api.h> 18#include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus640_sse2_dec_tail(void *state, const void *src,
35asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor, 36asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
36 u64 assoclen, u64 cryptlen); 37 u64 assoclen, u64 cryptlen);
37 38
38MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400); 39MORUS640_DECLARE_ALG(sse2, "morus640-sse2", 400);
40
41static struct simd_aead_alg *simd_alg;
39 42
40static int __init crypto_morus640_sse2_module_init(void) 43static int __init crypto_morus640_sse2_module_init(void)
41{ 44{
@@ -43,14 +46,13 @@ static int __init crypto_morus640_sse2_module_init(void)
43 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 46 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
44 return -ENODEV; 47 return -ENODEV;
45 48
46 return crypto_register_aeads(crypto_morus640_sse2_algs, 49 return simd_register_aeads_compat(&crypto_morus640_sse2_alg, 1,
47 ARRAY_SIZE(crypto_morus640_sse2_algs)); 50 &simd_alg);
48} 51}
49 52
50static void __exit crypto_morus640_sse2_module_exit(void) 53static void __exit crypto_morus640_sse2_module_exit(void)
51{ 54{
52 crypto_unregister_aeads(crypto_morus640_sse2_algs, 55 simd_unregister_aeads(&crypto_morus640_sse2_alg, 1, &simd_alg);
53 ARRAY_SIZE(crypto_morus640_sse2_algs));
54} 56}
55 57
56module_init(crypto_morus640_sse2_module_init); 58module_init(crypto_morus640_sse2_module_init);
diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
index cb3a81732016..1dea33d84426 100644
--- a/arch/x86/crypto/morus640_glue.c
+++ b/arch/x86/crypto/morus640_glue.c
@@ -11,7 +11,6 @@
11 * any later version. 11 * any later version.
12 */ 12 */
13 13
14#include <crypto/cryptd.h>
15#include <crypto/internal/aead.h> 14#include <crypto/internal/aead.h>
16#include <crypto/internal/skcipher.h> 15#include <crypto/internal/skcipher.h>
17#include <crypto/morus640_glue.h> 16#include <crypto/morus640_glue.h>
@@ -200,90 +199,6 @@ void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
200} 199}
201EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops); 200EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops);
202 201
203int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
204 unsigned int keylen)
205{
206 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
207 struct cryptd_aead *cryptd_tfm = *ctx;
208
209 return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
210}
211EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey);
212
213int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
214 unsigned int authsize)
215{
216 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
217 struct cryptd_aead *cryptd_tfm = *ctx;
218
219 return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
220}
221EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize);
222
223int cryptd_morus640_glue_encrypt(struct aead_request *req)
224{
225 struct crypto_aead *aead = crypto_aead_reqtfm(req);
226 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
227 struct cryptd_aead *cryptd_tfm = *ctx;
228
229 aead = &cryptd_tfm->base;
230 if (irq_fpu_usable() && (!in_atomic() ||
231 !cryptd_aead_queued(cryptd_tfm)))
232 aead = cryptd_aead_child(cryptd_tfm);
233
234 aead_request_set_tfm(req, aead);
235
236 return crypto_aead_encrypt(req);
237}
238EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt);
239
240int cryptd_morus640_glue_decrypt(struct aead_request *req)
241{
242 struct crypto_aead *aead = crypto_aead_reqtfm(req);
243 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
244 struct cryptd_aead *cryptd_tfm = *ctx;
245
246 aead = &cryptd_tfm->base;
247 if (irq_fpu_usable() && (!in_atomic() ||
248 !cryptd_aead_queued(cryptd_tfm)))
249 aead = cryptd_aead_child(cryptd_tfm);
250
251 aead_request_set_tfm(req, aead);
252
253 return crypto_aead_decrypt(req);
254}
255EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt);
256
257int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead)
258{
259 struct cryptd_aead *cryptd_tfm;
260 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
261 const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
262 char internal_name[CRYPTO_MAX_ALG_NAME];
263
264 if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
265 >= CRYPTO_MAX_ALG_NAME)
266 return -ENAMETOOLONG;
267
268 cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
269 CRYPTO_ALG_INTERNAL);
270 if (IS_ERR(cryptd_tfm))
271 return PTR_ERR(cryptd_tfm);
272
273 *ctx = cryptd_tfm;
274 crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
275 return 0;
276}
277EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm);
278
279void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead)
280{
281 struct cryptd_aead **ctx = crypto_aead_ctx(aead);
282
283 cryptd_free_aead(*ctx);
284}
285EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm);
286
287MODULE_LICENSE("GPL"); 202MODULE_LICENSE("GPL");
288MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); 203MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
289MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations"); 204MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
index 20d815ea4b6a..f7567cbd35b6 100644
--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
@@ -7,9 +7,10 @@
7 */ 7 */
8 8
9#include <crypto/internal/hash.h> 9#include <crypto/internal/hash.h>
10#include <crypto/internal/simd.h>
10#include <crypto/nhpoly1305.h> 11#include <crypto/nhpoly1305.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <asm/fpu/api.h> 13#include <asm/simd.h>
13 14
14asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len, 15asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
15 u8 hash[NH_HASH_BYTES]); 16 u8 hash[NH_HASH_BYTES]);
@@ -24,7 +25,7 @@ static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len,
24static int nhpoly1305_avx2_update(struct shash_desc *desc, 25static int nhpoly1305_avx2_update(struct shash_desc *desc,
25 const u8 *src, unsigned int srclen) 26 const u8 *src, unsigned int srclen)
26{ 27{
27 if (srclen < 64 || !irq_fpu_usable()) 28 if (srclen < 64 || !crypto_simd_usable())
28 return crypto_nhpoly1305_update(desc, src, srclen); 29 return crypto_nhpoly1305_update(desc, src, srclen);
29 30
30 do { 31 do {
diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c
index ed68d164ce14..a661ede3b5cf 100644
--- a/arch/x86/crypto/nhpoly1305-sse2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c
@@ -7,9 +7,10 @@
7 */ 7 */
8 8
9#include <crypto/internal/hash.h> 9#include <crypto/internal/hash.h>
10#include <crypto/internal/simd.h>
10#include <crypto/nhpoly1305.h> 11#include <crypto/nhpoly1305.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <asm/fpu/api.h> 13#include <asm/simd.h>
13 14
14asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len, 15asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
15 u8 hash[NH_HASH_BYTES]); 16 u8 hash[NH_HASH_BYTES]);
@@ -24,7 +25,7 @@ static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len,
24static int nhpoly1305_sse2_update(struct shash_desc *desc, 25static int nhpoly1305_sse2_update(struct shash_desc *desc,
25 const u8 *src, unsigned int srclen) 26 const u8 *src, unsigned int srclen)
26{ 27{
27 if (srclen < 64 || !irq_fpu_usable()) 28 if (srclen < 64 || !crypto_simd_usable())
28 return crypto_nhpoly1305_update(desc, src, srclen); 29 return crypto_nhpoly1305_update(desc, src, srclen);
29 30
30 do { 31 do {
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 88cc01506c84..6eb65b237b3c 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -11,11 +11,11 @@
11 11
12#include <crypto/algapi.h> 12#include <crypto/algapi.h>
13#include <crypto/internal/hash.h> 13#include <crypto/internal/hash.h>
14#include <crypto/internal/simd.h>
14#include <crypto/poly1305.h> 15#include <crypto/poly1305.h>
15#include <linux/crypto.h> 16#include <linux/crypto.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <asm/fpu/api.h>
19#include <asm/simd.h> 19#include <asm/simd.h>
20 20
21struct poly1305_simd_desc_ctx { 21struct poly1305_simd_desc_ctx {
@@ -126,7 +126,7 @@ static int poly1305_simd_update(struct shash_desc *desc,
126 unsigned int bytes; 126 unsigned int bytes;
127 127
128 /* kernel_fpu_begin/end is costly, use fallback for small updates */ 128 /* kernel_fpu_begin/end is costly, use fallback for small updates */
129 if (srclen <= 288 || !may_use_simd()) 129 if (srclen <= 288 || !crypto_simd_usable())
130 return crypto_poly1305_update(desc, src, srclen); 130 return crypto_poly1305_update(desc, src, srclen);
131 131
132 kernel_fpu_begin(); 132 kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 7391c7de72c7..42f177afc33a 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -22,6 +22,7 @@
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 23
24#include <crypto/internal/hash.h> 24#include <crypto/internal/hash.h>
25#include <crypto/internal/simd.h>
25#include <linux/init.h> 26#include <linux/init.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/mm.h> 28#include <linux/mm.h>
@@ -29,7 +30,7 @@
29#include <linux/types.h> 30#include <linux/types.h>
30#include <crypto/sha.h> 31#include <crypto/sha.h>
31#include <crypto/sha1_base.h> 32#include <crypto/sha1_base.h>
32#include <asm/fpu/api.h> 33#include <asm/simd.h>
33 34
34typedef void (sha1_transform_fn)(u32 *digest, const char *data, 35typedef void (sha1_transform_fn)(u32 *digest, const char *data,
35 unsigned int rounds); 36 unsigned int rounds);
@@ -39,7 +40,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
39{ 40{
40 struct sha1_state *sctx = shash_desc_ctx(desc); 41 struct sha1_state *sctx = shash_desc_ctx(desc);
41 42
42 if (!irq_fpu_usable() || 43 if (!crypto_simd_usable() ||
43 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) 44 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
44 return crypto_sha1_update(desc, data, len); 45 return crypto_sha1_update(desc, data, len);
45 46
@@ -57,7 +58,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
57static int sha1_finup(struct shash_desc *desc, const u8 *data, 58static int sha1_finup(struct shash_desc *desc, const u8 *data,
58 unsigned int len, u8 *out, sha1_transform_fn *sha1_xform) 59 unsigned int len, u8 *out, sha1_transform_fn *sha1_xform)
59{ 60{
60 if (!irq_fpu_usable()) 61 if (!crypto_simd_usable())
61 return crypto_sha1_finup(desc, data, len, out); 62 return crypto_sha1_finup(desc, data, len, out);
62 63
63 kernel_fpu_begin(); 64 kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 773a873d2b28..73867da3cbee 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -30,6 +30,7 @@
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 31
32#include <crypto/internal/hash.h> 32#include <crypto/internal/hash.h>
33#include <crypto/internal/simd.h>
33#include <linux/init.h> 34#include <linux/init.h>
34#include <linux/module.h> 35#include <linux/module.h>
35#include <linux/mm.h> 36#include <linux/mm.h>
@@ -37,8 +38,8 @@
37#include <linux/types.h> 38#include <linux/types.h>
38#include <crypto/sha.h> 39#include <crypto/sha.h>
39#include <crypto/sha256_base.h> 40#include <crypto/sha256_base.h>
40#include <asm/fpu/api.h>
41#include <linux/string.h> 41#include <linux/string.h>
42#include <asm/simd.h>
42 43
43asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data, 44asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
44 u64 rounds); 45 u64 rounds);
@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
49{ 50{
50 struct sha256_state *sctx = shash_desc_ctx(desc); 51 struct sha256_state *sctx = shash_desc_ctx(desc);
51 52
52 if (!irq_fpu_usable() || 53 if (!crypto_simd_usable() ||
53 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) 54 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
54 return crypto_sha256_update(desc, data, len); 55 return crypto_sha256_update(desc, data, len);
55 56
@@ -67,7 +68,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
67static int sha256_finup(struct shash_desc *desc, const u8 *data, 68static int sha256_finup(struct shash_desc *desc, const u8 *data,
68 unsigned int len, u8 *out, sha256_transform_fn *sha256_xform) 69 unsigned int len, u8 *out, sha256_transform_fn *sha256_xform)
69{ 70{
70 if (!irq_fpu_usable()) 71 if (!crypto_simd_usable())
71 return crypto_sha256_finup(desc, data, len, out); 72 return crypto_sha256_finup(desc, data, len, out);
72 73
73 kernel_fpu_begin(); 74 kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index f1b811b60ba6..458356a3f124 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -28,16 +28,16 @@
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 29
30#include <crypto/internal/hash.h> 30#include <crypto/internal/hash.h>
31#include <crypto/internal/simd.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/module.h> 33#include <linux/module.h>
33#include <linux/mm.h> 34#include <linux/mm.h>
34#include <linux/cryptohash.h> 35#include <linux/cryptohash.h>
36#include <linux/string.h>
35#include <linux/types.h> 37#include <linux/types.h>
36#include <crypto/sha.h> 38#include <crypto/sha.h>
37#include <crypto/sha512_base.h> 39#include <crypto/sha512_base.h>
38#include <asm/fpu/api.h> 40#include <asm/simd.h>
39
40#include <linux/string.h>
41 41
42asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data, 42asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
43 u64 rounds); 43 u64 rounds);
@@ -49,7 +49,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
49{ 49{
50 struct sha512_state *sctx = shash_desc_ctx(desc); 50 struct sha512_state *sctx = shash_desc_ctx(desc);
51 51
52 if (!irq_fpu_usable() || 52 if (!crypto_simd_usable() ||
53 (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) 53 (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
54 return crypto_sha512_update(desc, data, len); 54 return crypto_sha512_update(desc, data, len);
55 55
@@ -67,7 +67,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
67static int sha512_finup(struct shash_desc *desc, const u8 *data, 67static int sha512_finup(struct shash_desc *desc, const u8 *data,
68 unsigned int len, u8 *out, sha512_transform_fn *sha512_xform) 68 unsigned int len, u8 *out, sha512_transform_fn *sha512_xform)
69{ 69{
70 if (!irq_fpu_usable()) 70 if (!crypto_simd_usable())
71 return crypto_sha512_finup(desc, data, len, out); 71 return crypto_sha512_finup(desc, data, len, out);
72 72
73 kernel_fpu_begin(); 73 kernel_fpu_begin();
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index bcddf09b5aa3..4845b8c7be7f 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -90,7 +90,6 @@ static int get_e820_md5(struct e820_table *table, void *buf)
90 } 90 }
91 91
92 desc->tfm = tfm; 92 desc->tfm = tfm;
93 desc->flags = 0;
94 93
95 size = offsetof(struct e820_table, entries) + 94 size = offsetof(struct e820_table, entries) +
96 sizeof(struct e820_entry) * table->nr_entries; 95 sizeof(struct e820_entry) * table->nr_entries;
diff --git a/crypto/842.c b/crypto/842.c
index bc26dc942821..5f98393b65d1 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -144,7 +144,7 @@ static int __init crypto842_mod_init(void)
144 144
145 return ret; 145 return ret;
146} 146}
147module_init(crypto842_mod_init); 147subsys_initcall(crypto842_mod_init);
148 148
149static void __exit crypto842_mod_exit(void) 149static void __exit crypto842_mod_exit(void)
150{ 150{
diff --git a/crypto/Kconfig b/crypto/Kconfig
index bbab6bf33519..3d056e7da65f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -27,8 +27,8 @@ config CRYPTO_FIPS
27 depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS 27 depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
28 depends on (MODULE_SIG || !MODULES) 28 depends on (MODULE_SIG || !MODULES)
29 help 29 help
30 This options enables the fips boot option which is 30 This option enables the fips boot option which is
31 required if you want to system to operate in a FIPS 200 31 required if you want the system to operate in a FIPS 200
32 certification. You should say no unless you know what 32 certification. You should say no unless you know what
33 this is. 33 this is.
34 34
@@ -113,29 +113,6 @@ config CRYPTO_ACOMP
113 select CRYPTO_ALGAPI 113 select CRYPTO_ALGAPI
114 select CRYPTO_ACOMP2 114 select CRYPTO_ACOMP2
115 115
116config CRYPTO_RSA
117 tristate "RSA algorithm"
118 select CRYPTO_AKCIPHER
119 select CRYPTO_MANAGER
120 select MPILIB
121 select ASN1
122 help
123 Generic implementation of the RSA public key algorithm.
124
125config CRYPTO_DH
126 tristate "Diffie-Hellman algorithm"
127 select CRYPTO_KPP
128 select MPILIB
129 help
130 Generic implementation of the Diffie-Hellman algorithm.
131
132config CRYPTO_ECDH
133 tristate "ECDH algorithm"
134 select CRYPTO_KPP
135 select CRYPTO_RNG_DEFAULT
136 help
137 Generic implementation of the ECDH algorithm
138
139config CRYPTO_MANAGER 116config CRYPTO_MANAGER
140 tristate "Cryptographic algorithm manager" 117 tristate "Cryptographic algorithm manager"
141 select CRYPTO_MANAGER2 118 select CRYPTO_MANAGER2
@@ -253,6 +230,48 @@ config CRYPTO_GLUE_HELPER_X86
253config CRYPTO_ENGINE 230config CRYPTO_ENGINE
254 tristate 231 tristate
255 232
233comment "Public-key cryptography"
234
235config CRYPTO_RSA
236 tristate "RSA algorithm"
237 select CRYPTO_AKCIPHER
238 select CRYPTO_MANAGER
239 select MPILIB
240 select ASN1
241 help
242 Generic implementation of the RSA public key algorithm.
243
244config CRYPTO_DH
245 tristate "Diffie-Hellman algorithm"
246 select CRYPTO_KPP
247 select MPILIB
248 help
249 Generic implementation of the Diffie-Hellman algorithm.
250
251config CRYPTO_ECC
252 tristate
253
254config CRYPTO_ECDH
255 tristate "ECDH algorithm"
256 select CRYPTO_ECC
257 select CRYPTO_KPP
258 select CRYPTO_RNG_DEFAULT
259 help
260 Generic implementation of the ECDH algorithm
261
262config CRYPTO_ECRDSA
263 tristate "EC-RDSA (GOST 34.10) algorithm"
264 select CRYPTO_ECC
265 select CRYPTO_AKCIPHER
266 select CRYPTO_STREEBOG
267 select OID_REGISTRY
268 select ASN1
269 help
270 Elliptic Curve Russian Digital Signature Algorithm (GOST R 34.10-2012,
271 RFC 7091, ISO/IEC 14888-3:2018) is one of the Russian cryptographic
272 standard algorithms (called GOST algorithms). Only signature verification
273 is implemented.
274
256comment "Authenticated Encryption with Associated Data" 275comment "Authenticated Encryption with Associated Data"
257 276
258config CRYPTO_CCM 277config CRYPTO_CCM
@@ -310,25 +329,25 @@ config CRYPTO_AEGIS128_AESNI_SSE2
310 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" 329 tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
311 depends on X86 && 64BIT 330 depends on X86 && 64BIT
312 select CRYPTO_AEAD 331 select CRYPTO_AEAD
313 select CRYPTO_CRYPTD 332 select CRYPTO_SIMD
314 help 333 help
315 AESNI+SSE2 implementation of the AEGSI-128 dedicated AEAD algorithm. 334 AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm.
316 335
317config CRYPTO_AEGIS128L_AESNI_SSE2 336config CRYPTO_AEGIS128L_AESNI_SSE2
318 tristate "AEGIS-128L AEAD algorithm (x86_64 AESNI+SSE2 implementation)" 337 tristate "AEGIS-128L AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
319 depends on X86 && 64BIT 338 depends on X86 && 64BIT
320 select CRYPTO_AEAD 339 select CRYPTO_AEAD
321 select CRYPTO_CRYPTD 340 select CRYPTO_SIMD
322 help 341 help
323 AESNI+SSE2 implementation of the AEGSI-128L dedicated AEAD algorithm. 342 AESNI+SSE2 implementation of the AEGIS-128L dedicated AEAD algorithm.
324 343
325config CRYPTO_AEGIS256_AESNI_SSE2 344config CRYPTO_AEGIS256_AESNI_SSE2
326 tristate "AEGIS-256 AEAD algorithm (x86_64 AESNI+SSE2 implementation)" 345 tristate "AEGIS-256 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
327 depends on X86 && 64BIT 346 depends on X86 && 64BIT
328 select CRYPTO_AEAD 347 select CRYPTO_AEAD
329 select CRYPTO_CRYPTD 348 select CRYPTO_SIMD
330 help 349 help
331 AESNI+SSE2 implementation of the AEGSI-256 dedicated AEAD algorithm. 350 AESNI+SSE2 implementation of the AEGIS-256 dedicated AEAD algorithm.
332 351
333config CRYPTO_MORUS640 352config CRYPTO_MORUS640
334 tristate "MORUS-640 AEAD algorithm" 353 tristate "MORUS-640 AEAD algorithm"
@@ -340,7 +359,7 @@ config CRYPTO_MORUS640_GLUE
340 tristate 359 tristate
341 depends on X86 360 depends on X86
342 select CRYPTO_AEAD 361 select CRYPTO_AEAD
343 select CRYPTO_CRYPTD 362 select CRYPTO_SIMD
344 help 363 help
345 Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD 364 Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD
346 algorithm. 365 algorithm.
@@ -363,7 +382,7 @@ config CRYPTO_MORUS1280_GLUE
363 tristate 382 tristate
364 depends on X86 383 depends on X86
365 select CRYPTO_AEAD 384 select CRYPTO_AEAD
366 select CRYPTO_CRYPTD 385 select CRYPTO_SIMD
367 help 386 help
368 Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD 387 Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD
369 algorithm. 388 algorithm.
diff --git a/crypto/Makefile b/crypto/Makefile
index fb5bf2a3a666..266a4cdbb9e2 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -147,12 +147,20 @@ obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
147obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o 147obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
148obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o 148obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
149obj-$(CONFIG_CRYPTO_OFB) += ofb.o 149obj-$(CONFIG_CRYPTO_OFB) += ofb.o
150obj-$(CONFIG_CRYPTO_ECC) += ecc.o
150 151
151ecdh_generic-y := ecc.o
152ecdh_generic-y += ecdh.o 152ecdh_generic-y += ecdh.o
153ecdh_generic-y += ecdh_helper.o 153ecdh_generic-y += ecdh_helper.o
154obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o 154obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
155 155
156$(obj)/ecrdsa_params.asn1.o: $(obj)/ecrdsa_params.asn1.c $(obj)/ecrdsa_params.asn1.h
157$(obj)/ecrdsa_pub_key.asn1.o: $(obj)/ecrdsa_pub_key.asn1.c $(obj)/ecrdsa_pub_key.asn1.h
158$(obj)/ecrdsa.o: $(obj)/ecrdsa_params.asn1.h $(obj)/ecrdsa_pub_key.asn1.h
159ecrdsa_generic-y += ecrdsa.o
160ecrdsa_generic-y += ecrdsa_params.asn1.o
161ecrdsa_generic-y += ecrdsa_pub_key.asn1.o
162obj-$(CONFIG_CRYPTO_ECRDSA) += ecrdsa_generic.o
163
156# 164#
157# generic algorithms and the async_tx api 165# generic algorithms and the async_tx api
158# 166#
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 5564e73266a6..395a3ddd3707 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -265,7 +265,6 @@ static int adiantum_hash_message(struct skcipher_request *req,
265 int err; 265 int err;
266 266
267 hash_desc->tfm = tctx->hash; 267 hash_desc->tfm = tctx->hash;
268 hash_desc->flags = 0;
269 268
270 err = crypto_shash_init(hash_desc); 269 err = crypto_shash_init(hash_desc);
271 if (err) 270 if (err)
@@ -659,7 +658,7 @@ static void __exit adiantum_module_exit(void)
659 crypto_unregister_template(&adiantum_tmpl); 658 crypto_unregister_template(&adiantum_tmpl);
660} 659}
661 660
662module_init(adiantum_module_init); 661subsys_initcall(adiantum_module_init);
663module_exit(adiantum_module_exit); 662module_exit(adiantum_module_exit);
664 663
665MODULE_DESCRIPTION("Adiantum length-preserving encryption mode"); 664MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 3718a8341303..d78f77fc5dd1 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -448,7 +448,7 @@ static void __exit crypto_aegis128_module_exit(void)
448 crypto_unregister_aead(&crypto_aegis128_alg); 448 crypto_unregister_aead(&crypto_aegis128_alg);
449} 449}
450 450
451module_init(crypto_aegis128_module_init); 451subsys_initcall(crypto_aegis128_module_init);
452module_exit(crypto_aegis128_module_exit); 452module_exit(crypto_aegis128_module_exit);
453 453
454MODULE_LICENSE("GPL"); 454MODULE_LICENSE("GPL");
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index 275a8616d71b..9bca3d619a22 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -512,7 +512,7 @@ static void __exit crypto_aegis128l_module_exit(void)
512 crypto_unregister_aead(&crypto_aegis128l_alg); 512 crypto_unregister_aead(&crypto_aegis128l_alg);
513} 513}
514 514
515module_init(crypto_aegis128l_module_init); 515subsys_initcall(crypto_aegis128l_module_init);
516module_exit(crypto_aegis128l_module_exit); 516module_exit(crypto_aegis128l_module_exit);
517 517
518MODULE_LICENSE("GPL"); 518MODULE_LICENSE("GPL");
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index ecd6b7f34a2d..b47fd39595ad 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -463,7 +463,7 @@ static void __exit crypto_aegis256_module_exit(void)
463 crypto_unregister_aead(&crypto_aegis256_alg); 463 crypto_unregister_aead(&crypto_aegis256_alg);
464} 464}
465 465
466module_init(crypto_aegis256_module_init); 466subsys_initcall(crypto_aegis256_module_init);
467module_exit(crypto_aegis256_module_exit); 467module_exit(crypto_aegis256_module_exit);
468 468
469MODULE_LICENSE("GPL"); 469MODULE_LICENSE("GPL");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 13df33aca463..f217568917e4 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -64,7 +64,7 @@ static inline u8 byte(const u32 x, const unsigned n)
64static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 }; 64static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
65 65
66/* cacheline-aligned to facilitate prefetching into cache */ 66/* cacheline-aligned to facilitate prefetching into cache */
67__visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = { 67__visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = {
68 { 68 {
69 0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6, 69 0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
70 0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591, 70 0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
@@ -328,7 +328,7 @@ __visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
328 } 328 }
329}; 329};
330 330
331__visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = { 331__visible const u32 crypto_fl_tab[4][256] ____cacheline_aligned = {
332 { 332 {
333 0x00000063, 0x0000007c, 0x00000077, 0x0000007b, 333 0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
334 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5, 334 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
@@ -592,7 +592,7 @@ __visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
592 } 592 }
593}; 593};
594 594
595__visible const u32 crypto_it_tab[4][256] __cacheline_aligned = { 595__visible const u32 crypto_it_tab[4][256] ____cacheline_aligned = {
596 { 596 {
597 0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a, 597 0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
598 0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b, 598 0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
@@ -856,7 +856,7 @@ __visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
856 } 856 }
857}; 857};
858 858
859__visible const u32 crypto_il_tab[4][256] __cacheline_aligned = { 859__visible const u32 crypto_il_tab[4][256] ____cacheline_aligned = {
860 { 860 {
861 0x00000052, 0x00000009, 0x0000006a, 0x000000d5, 861 0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
862 0x00000030, 0x00000036, 0x000000a5, 0x00000038, 862 0x00000030, 0x00000036, 0x000000a5, 0x00000038,
@@ -1470,7 +1470,7 @@ static void __exit aes_fini(void)
1470 crypto_unregister_alg(&aes_alg); 1470 crypto_unregister_alg(&aes_alg);
1471} 1471}
1472 1472
1473module_init(aes_init); 1473subsys_initcall(aes_init);
1474module_exit(aes_fini); 1474module_exit(aes_fini);
1475 1475
1476MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 1476MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 0cbeae137e0a..780daa436dac 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -119,10 +119,24 @@ static void akcipher_prepare_alg(struct akcipher_alg *alg)
119 base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; 119 base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
120} 120}
121 121
122static int akcipher_default_op(struct akcipher_request *req)
123{
124 return -ENOSYS;
125}
126
122int crypto_register_akcipher(struct akcipher_alg *alg) 127int crypto_register_akcipher(struct akcipher_alg *alg)
123{ 128{
124 struct crypto_alg *base = &alg->base; 129 struct crypto_alg *base = &alg->base;
125 130
131 if (!alg->sign)
132 alg->sign = akcipher_default_op;
133 if (!alg->verify)
134 alg->verify = akcipher_default_op;
135 if (!alg->encrypt)
136 alg->encrypt = akcipher_default_op;
137 if (!alg->decrypt)
138 alg->decrypt = akcipher_default_op;
139
126 akcipher_prepare_alg(alg); 140 akcipher_prepare_alg(alg);
127 return crypto_register_alg(base); 141 return crypto_register_alg(base);
128} 142}
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 527b44d0af21..bb97cfb38836 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -296,7 +296,13 @@ static void __exit cryptomgr_exit(void)
296 BUG_ON(err); 296 BUG_ON(err);
297} 297}
298 298
299subsys_initcall(cryptomgr_init); 299/*
300 * This is arch_initcall() so that the crypto self-tests are run on algorithms
301 * registered early by subsys_initcall(). subsys_initcall() is needed for
302 * generic implementations so that they're available for comparison tests when
303 * other implementations are registered later by module_init().
304 */
305arch_initcall(cryptomgr_init);
300module_exit(cryptomgr_exit); 306module_exit(cryptomgr_exit);
301 307
302MODULE_LICENSE("GPL"); 308MODULE_LICENSE("GPL");
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index eff337ce9003..e7c43ea4ce9d 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -472,7 +472,7 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
472MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); 472MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
473module_param(dbg, int, 0); 473module_param(dbg, int, 0);
474MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); 474MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
475module_init(prng_mod_init); 475subsys_initcall(prng_mod_init);
476module_exit(prng_mod_fini); 476module_exit(prng_mod_fini);
477MODULE_ALIAS_CRYPTO("stdrng"); 477MODULE_ALIAS_CRYPTO("stdrng");
478MODULE_ALIAS_CRYPTO("ansi_cprng"); 478MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 4bb187c2a902..673927de0eb9 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -699,7 +699,7 @@ static void __exit anubis_mod_fini(void)
699 crypto_unregister_alg(&anubis_alg); 699 crypto_unregister_alg(&anubis_alg);
700} 700}
701 701
702module_init(anubis_mod_init); 702subsys_initcall(anubis_mod_init);
703module_exit(anubis_mod_fini); 703module_exit(anubis_mod_fini);
704 704
705MODULE_LICENSE("GPL"); 705MODULE_LICENSE("GPL");
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 6c93342e3405..2233d36456e2 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -163,7 +163,7 @@ static void __exit arc4_exit(void)
163 crypto_unregister_skcipher(&arc4_skcipher); 163 crypto_unregister_skcipher(&arc4_skcipher);
164} 164}
165 165
166module_init(arc4_init); 166subsys_initcall(arc4_init);
167module_exit(arc4_exit); 167module_exit(arc4_exit);
168 168
169MODULE_LICENSE("GPL"); 169MODULE_LICENSE("GPL");
diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
index 5d4c270463f6..76d2ce3a1b5b 100644
--- a/crypto/asymmetric_keys/asym_tpm.c
+++ b/crypto/asymmetric_keys/asym_tpm.c
@@ -276,6 +276,10 @@ static int tpm_sign(struct tpm_buf *tb,
276 276
277 return datalen; 277 return datalen;
278} 278}
279
280/* Room to fit two u32 zeros for algo id and parameters length. */
281#define SETKEY_PARAMS_SIZE (sizeof(u32) * 2)
282
279/* 283/*
280 * Maximum buffer size for the BER/DER encoded public key. The public key 284 * Maximum buffer size for the BER/DER encoded public key. The public key
281 * is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048 285 * is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048
@@ -286,8 +290,9 @@ static int tpm_sign(struct tpm_buf *tb,
286 * - 257 bytes of n 290 * - 257 bytes of n
287 * - max 2 bytes for INTEGER e type/length 291 * - max 2 bytes for INTEGER e type/length
288 * - 3 bytes of e 292 * - 3 bytes of e
293 * - 4+4 of zeros for set_pub_key parameters (SETKEY_PARAMS_SIZE)
289 */ 294 */
290#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3) 295#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3 + SETKEY_PARAMS_SIZE)
291 296
292/* 297/*
293 * Provide a part of a description of the key for /proc/keys. 298 * Provide a part of a description of the key for /proc/keys.
@@ -364,6 +369,8 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
364 cur = encode_tag_length(cur, 0x02, sizeof(e)); 369 cur = encode_tag_length(cur, 0x02, sizeof(e));
365 memcpy(cur, e, sizeof(e)); 370 memcpy(cur, e, sizeof(e));
366 cur += sizeof(e); 371 cur += sizeof(e);
372 /* Zero parameters to satisfy set_pub_key ABI. */
373 memset(cur, 0, SETKEY_PARAMS_SIZE);
367 374
368 return cur - buf; 375 return cur - buf;
369} 376}
@@ -744,12 +751,10 @@ static int tpm_key_verify_signature(const struct key *key,
744 struct crypto_wait cwait; 751 struct crypto_wait cwait;
745 struct crypto_akcipher *tfm; 752 struct crypto_akcipher *tfm;
746 struct akcipher_request *req; 753 struct akcipher_request *req;
747 struct scatterlist sig_sg, digest_sg; 754 struct scatterlist src_sg[2];
748 char alg_name[CRYPTO_MAX_ALG_NAME]; 755 char alg_name[CRYPTO_MAX_ALG_NAME];
749 uint8_t der_pub_key[PUB_KEY_BUF_SIZE]; 756 uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
750 uint32_t der_pub_key_len; 757 uint32_t der_pub_key_len;
751 void *output;
752 unsigned int outlen;
753 int ret; 758 int ret;
754 759
755 pr_devel("==>%s()\n", __func__); 760 pr_devel("==>%s()\n", __func__);
@@ -781,37 +786,17 @@ static int tpm_key_verify_signature(const struct key *key,
781 if (!req) 786 if (!req)
782 goto error_free_tfm; 787 goto error_free_tfm;
783 788
784 ret = -ENOMEM; 789 sg_init_table(src_sg, 2);
785 outlen = crypto_akcipher_maxsize(tfm); 790 sg_set_buf(&src_sg[0], sig->s, sig->s_size);
786 output = kmalloc(outlen, GFP_KERNEL); 791 sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
787 if (!output) 792 akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
788 goto error_free_req; 793 sig->digest_size);
789
790 sg_init_one(&sig_sg, sig->s, sig->s_size);
791 sg_init_one(&digest_sg, output, outlen);
792 akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
793 outlen);
794 crypto_init_wait(&cwait); 794 crypto_init_wait(&cwait);
795 akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 795 akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
796 CRYPTO_TFM_REQ_MAY_SLEEP, 796 CRYPTO_TFM_REQ_MAY_SLEEP,
797 crypto_req_done, &cwait); 797 crypto_req_done, &cwait);
798
799 /* Perform the verification calculation. This doesn't actually do the
800 * verification, but rather calculates the hash expected by the
801 * signature and returns that to us.
802 */
803 ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); 798 ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
804 if (ret)
805 goto out_free_output;
806
807 /* Do the actual verification step. */
808 if (req->dst_len != sig->digest_size ||
809 memcmp(sig->digest, output, sig->digest_size) != 0)
810 ret = -EKEYREJECTED;
811 799
812out_free_output:
813 kfree(output);
814error_free_req:
815 akcipher_request_free(req); 800 akcipher_request_free(req);
816error_free_tfm: 801error_free_tfm:
817 crypto_free_akcipher(tfm); 802 crypto_free_akcipher(tfm);
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 97c77f66b20d..f7b0980bf02d 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -56,7 +56,6 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
56 goto error_no_desc; 56 goto error_no_desc;
57 57
58 desc->tfm = tfm; 58 desc->tfm = tfm;
59 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
60 59
61 /* Digest the message [RFC2315 9.3] */ 60 /* Digest the message [RFC2315 9.3] */
62 ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len, 61 ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index f5d85b47fcc6..77e0ae7840ff 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -45,6 +45,7 @@ void public_key_free(struct public_key *key)
45{ 45{
46 if (key) { 46 if (key) {
47 kfree(key->key); 47 kfree(key->key);
48 kfree(key->params);
48 kfree(key); 49 kfree(key);
49 } 50 }
50} 51}
@@ -94,6 +95,12 @@ int software_key_determine_akcipher(const char *encoding,
94 return -ENOPKG; 95 return -ENOPKG;
95} 96}
96 97
98static u8 *pkey_pack_u32(u8 *dst, u32 val)
99{
100 memcpy(dst, &val, sizeof(val));
101 return dst + sizeof(val);
102}
103
97/* 104/*
98 * Query information about a key. 105 * Query information about a key.
99 */ 106 */
@@ -103,6 +110,7 @@ static int software_key_query(const struct kernel_pkey_params *params,
103 struct crypto_akcipher *tfm; 110 struct crypto_akcipher *tfm;
104 struct public_key *pkey = params->key->payload.data[asym_crypto]; 111 struct public_key *pkey = params->key->payload.data[asym_crypto];
105 char alg_name[CRYPTO_MAX_ALG_NAME]; 112 char alg_name[CRYPTO_MAX_ALG_NAME];
113 u8 *key, *ptr;
106 int ret, len; 114 int ret, len;
107 115
108 ret = software_key_determine_akcipher(params->encoding, 116 ret = software_key_determine_akcipher(params->encoding,
@@ -115,14 +123,22 @@ static int software_key_query(const struct kernel_pkey_params *params,
115 if (IS_ERR(tfm)) 123 if (IS_ERR(tfm))
116 return PTR_ERR(tfm); 124 return PTR_ERR(tfm);
117 125
126 key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
127 GFP_KERNEL);
128 if (!key)
129 goto error_free_tfm;
130 memcpy(key, pkey->key, pkey->keylen);
131 ptr = key + pkey->keylen;
132 ptr = pkey_pack_u32(ptr, pkey->algo);
133 ptr = pkey_pack_u32(ptr, pkey->paramlen);
134 memcpy(ptr, pkey->params, pkey->paramlen);
135
118 if (pkey->key_is_private) 136 if (pkey->key_is_private)
119 ret = crypto_akcipher_set_priv_key(tfm, 137 ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
120 pkey->key, pkey->keylen);
121 else 138 else
122 ret = crypto_akcipher_set_pub_key(tfm, 139 ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
123 pkey->key, pkey->keylen);
124 if (ret < 0) 140 if (ret < 0)
125 goto error_free_tfm; 141 goto error_free_key;
126 142
127 len = crypto_akcipher_maxsize(tfm); 143 len = crypto_akcipher_maxsize(tfm);
128 info->key_size = len * 8; 144 info->key_size = len * 8;
@@ -137,6 +153,8 @@ static int software_key_query(const struct kernel_pkey_params *params,
137 KEYCTL_SUPPORTS_SIGN); 153 KEYCTL_SUPPORTS_SIGN);
138 ret = 0; 154 ret = 0;
139 155
156error_free_key:
157 kfree(key);
140error_free_tfm: 158error_free_tfm:
141 crypto_free_akcipher(tfm); 159 crypto_free_akcipher(tfm);
142 pr_devel("<==%s() = %d\n", __func__, ret); 160 pr_devel("<==%s() = %d\n", __func__, ret);
@@ -155,6 +173,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
155 struct crypto_wait cwait; 173 struct crypto_wait cwait;
156 struct scatterlist in_sg, out_sg; 174 struct scatterlist in_sg, out_sg;
157 char alg_name[CRYPTO_MAX_ALG_NAME]; 175 char alg_name[CRYPTO_MAX_ALG_NAME];
176 char *key, *ptr;
158 int ret; 177 int ret;
159 178
160 pr_devel("==>%s()\n", __func__); 179 pr_devel("==>%s()\n", __func__);
@@ -173,14 +192,23 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
173 if (!req) 192 if (!req)
174 goto error_free_tfm; 193 goto error_free_tfm;
175 194
195 key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
196 GFP_KERNEL);
197 if (!key)
198 goto error_free_req;
199
200 memcpy(key, pkey->key, pkey->keylen);
201 ptr = key + pkey->keylen;
202 ptr = pkey_pack_u32(ptr, pkey->algo);
203 ptr = pkey_pack_u32(ptr, pkey->paramlen);
204 memcpy(ptr, pkey->params, pkey->paramlen);
205
176 if (pkey->key_is_private) 206 if (pkey->key_is_private)
177 ret = crypto_akcipher_set_priv_key(tfm, 207 ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
178 pkey->key, pkey->keylen);
179 else 208 else
180 ret = crypto_akcipher_set_pub_key(tfm, 209 ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
181 pkey->key, pkey->keylen);
182 if (ret) 210 if (ret)
183 goto error_free_req; 211 goto error_free_key;
184 212
185 sg_init_one(&in_sg, in, params->in_len); 213 sg_init_one(&in_sg, in, params->in_len);
186 sg_init_one(&out_sg, out, params->out_len); 214 sg_init_one(&out_sg, out, params->out_len);
@@ -210,6 +238,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
210 if (ret == 0) 238 if (ret == 0)
211 ret = req->dst_len; 239 ret = req->dst_len;
212 240
241error_free_key:
242 kfree(key);
213error_free_req: 243error_free_req:
214 akcipher_request_free(req); 244 akcipher_request_free(req);
215error_free_tfm: 245error_free_tfm:
@@ -227,10 +257,9 @@ int public_key_verify_signature(const struct public_key *pkey,
227 struct crypto_wait cwait; 257 struct crypto_wait cwait;
228 struct crypto_akcipher *tfm; 258 struct crypto_akcipher *tfm;
229 struct akcipher_request *req; 259 struct akcipher_request *req;
230 struct scatterlist sig_sg, digest_sg; 260 struct scatterlist src_sg[2];
231 char alg_name[CRYPTO_MAX_ALG_NAME]; 261 char alg_name[CRYPTO_MAX_ALG_NAME];
232 void *output; 262 char *key, *ptr;
233 unsigned int outlen;
234 int ret; 263 int ret;
235 264
236 pr_devel("==>%s()\n", __func__); 265 pr_devel("==>%s()\n", __func__);
@@ -254,45 +283,37 @@ int public_key_verify_signature(const struct public_key *pkey,
254 if (!req) 283 if (!req)
255 goto error_free_tfm; 284 goto error_free_tfm;
256 285
286 key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
287 GFP_KERNEL);
288 if (!key)
289 goto error_free_req;
290
291 memcpy(key, pkey->key, pkey->keylen);
292 ptr = key + pkey->keylen;
293 ptr = pkey_pack_u32(ptr, pkey->algo);
294 ptr = pkey_pack_u32(ptr, pkey->paramlen);
295 memcpy(ptr, pkey->params, pkey->paramlen);
296
257 if (pkey->key_is_private) 297 if (pkey->key_is_private)
258 ret = crypto_akcipher_set_priv_key(tfm, 298 ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
259 pkey->key, pkey->keylen);
260 else 299 else
261 ret = crypto_akcipher_set_pub_key(tfm, 300 ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
262 pkey->key, pkey->keylen);
263 if (ret) 301 if (ret)
264 goto error_free_req; 302 goto error_free_key;
265
266 ret = -ENOMEM;
267 outlen = crypto_akcipher_maxsize(tfm);
268 output = kmalloc(outlen, GFP_KERNEL);
269 if (!output)
270 goto error_free_req;
271 303
272 sg_init_one(&sig_sg, sig->s, sig->s_size); 304 sg_init_table(src_sg, 2);
273 sg_init_one(&digest_sg, output, outlen); 305 sg_set_buf(&src_sg[0], sig->s, sig->s_size);
274 akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, 306 sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
275 outlen); 307 akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
308 sig->digest_size);
276 crypto_init_wait(&cwait); 309 crypto_init_wait(&cwait);
277 akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 310 akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
278 CRYPTO_TFM_REQ_MAY_SLEEP, 311 CRYPTO_TFM_REQ_MAY_SLEEP,
279 crypto_req_done, &cwait); 312 crypto_req_done, &cwait);
280
281 /* Perform the verification calculation. This doesn't actually do the
282 * verification, but rather calculates the hash expected by the
283 * signature and returns that to us.
284 */
285 ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); 313 ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
286 if (ret)
287 goto out_free_output;
288
289 /* Do the actual verification step. */
290 if (req->dst_len != sig->digest_size ||
291 memcmp(sig->digest, output, sig->digest_size) != 0)
292 ret = -EKEYREJECTED;
293 314
294out_free_output: 315error_free_key:
295 kfree(output); 316 kfree(key);
296error_free_req: 317error_free_req:
297 akcipher_request_free(req); 318 akcipher_request_free(req);
298error_free_tfm: 319error_free_tfm:
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index d178650fd524..f8e4a932bcfb 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -354,7 +354,6 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
354 goto error_no_desc; 354 goto error_no_desc;
355 355
356 desc->tfm = tfm; 356 desc->tfm = tfm;
357 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
358 ret = crypto_shash_init(desc); 357 ret = crypto_shash_init(desc);
359 if (ret < 0) 358 if (ret < 0)
360 goto error; 359 goto error;
diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1
index aae0cde414e2..5c9f4e4a5231 100644
--- a/crypto/asymmetric_keys/x509.asn1
+++ b/crypto/asymmetric_keys/x509.asn1
@@ -22,7 +22,7 @@ CertificateSerialNumber ::= INTEGER
22 22
23AlgorithmIdentifier ::= SEQUENCE { 23AlgorithmIdentifier ::= SEQUENCE {
24 algorithm OBJECT IDENTIFIER ({ x509_note_OID }), 24 algorithm OBJECT IDENTIFIER ({ x509_note_OID }),
25 parameters ANY OPTIONAL 25 parameters ANY OPTIONAL ({ x509_note_params })
26} 26}
27 27
28Name ::= SEQUENCE OF RelativeDistinguishedName 28Name ::= SEQUENCE OF RelativeDistinguishedName
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 991f4d735a4e..5b7bfd95c334 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -26,6 +26,9 @@ struct x509_parse_context {
26 const void *cert_start; /* Start of cert content */ 26 const void *cert_start; /* Start of cert content */
27 const void *key; /* Key data */ 27 const void *key; /* Key data */
28 size_t key_size; /* Size of key data */ 28 size_t key_size; /* Size of key data */
29 const void *params; /* Key parameters */
30 size_t params_size; /* Size of key parameters */
31 enum OID key_algo; /* Public key algorithm */
29 enum OID last_oid; /* Last OID encountered */ 32 enum OID last_oid; /* Last OID encountered */
30 enum OID algo_oid; /* Algorithm OID */ 33 enum OID algo_oid; /* Algorithm OID */
31 unsigned char nr_mpi; /* Number of MPIs stored */ 34 unsigned char nr_mpi; /* Number of MPIs stored */
@@ -109,6 +112,13 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
109 112
110 cert->pub->keylen = ctx->key_size; 113 cert->pub->keylen = ctx->key_size;
111 114
115 cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL);
116 if (!cert->pub->params)
117 goto error_decode;
118
119 cert->pub->paramlen = ctx->params_size;
120 cert->pub->algo = ctx->key_algo;
121
112 /* Grab the signature bits */ 122 /* Grab the signature bits */
113 ret = x509_get_sig_params(cert); 123 ret = x509_get_sig_params(cert);
114 if (ret < 0) 124 if (ret < 0)
@@ -220,6 +230,14 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
220 case OID_sha224WithRSAEncryption: 230 case OID_sha224WithRSAEncryption:
221 ctx->cert->sig->hash_algo = "sha224"; 231 ctx->cert->sig->hash_algo = "sha224";
222 goto rsa_pkcs1; 232 goto rsa_pkcs1;
233
234 case OID_gost2012Signature256:
235 ctx->cert->sig->hash_algo = "streebog256";
236 goto ecrdsa;
237
238 case OID_gost2012Signature512:
239 ctx->cert->sig->hash_algo = "streebog512";
240 goto ecrdsa;
223 } 241 }
224 242
225rsa_pkcs1: 243rsa_pkcs1:
@@ -227,6 +245,11 @@ rsa_pkcs1:
227 ctx->cert->sig->encoding = "pkcs1"; 245 ctx->cert->sig->encoding = "pkcs1";
228 ctx->algo_oid = ctx->last_oid; 246 ctx->algo_oid = ctx->last_oid;
229 return 0; 247 return 0;
248ecrdsa:
249 ctx->cert->sig->pkey_algo = "ecrdsa";
250 ctx->cert->sig->encoding = "raw";
251 ctx->algo_oid = ctx->last_oid;
252 return 0;
230} 253}
231 254
232/* 255/*
@@ -246,7 +269,8 @@ int x509_note_signature(void *context, size_t hdrlen,
246 return -EINVAL; 269 return -EINVAL;
247 } 270 }
248 271
249 if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) { 272 if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
273 strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0) {
250 /* Discard the BIT STRING metadata */ 274 /* Discard the BIT STRING metadata */
251 if (vlen < 1 || *(const u8 *)value != 0) 275 if (vlen < 1 || *(const u8 *)value != 0)
252 return -EBADMSG; 276 return -EBADMSG;
@@ -401,6 +425,27 @@ int x509_note_subject(void *context, size_t hdrlen,
401} 425}
402 426
403/* 427/*
428 * Extract the parameters for the public key
429 */
430int x509_note_params(void *context, size_t hdrlen,
431 unsigned char tag,
432 const void *value, size_t vlen)
433{
434 struct x509_parse_context *ctx = context;
435
436 /*
437 * AlgorithmIdentifier is used three times in the x509, we should skip
438 * first and ignore third, using second one which is after subject and
439 * before subjectPublicKey.
440 */
441 if (!ctx->cert->raw_subject || ctx->key)
442 return 0;
443 ctx->params = value - hdrlen;
444 ctx->params_size = vlen + hdrlen;
445 return 0;
446}
447
448/*
404 * Extract the data for the public key algorithm 449 * Extract the data for the public key algorithm
405 */ 450 */
406int x509_extract_key_data(void *context, size_t hdrlen, 451int x509_extract_key_data(void *context, size_t hdrlen,
@@ -409,11 +454,15 @@ int x509_extract_key_data(void *context, size_t hdrlen,
409{ 454{
410 struct x509_parse_context *ctx = context; 455 struct x509_parse_context *ctx = context;
411 456
412 if (ctx->last_oid != OID_rsaEncryption) 457 ctx->key_algo = ctx->last_oid;
458 if (ctx->last_oid == OID_rsaEncryption)
459 ctx->cert->pub->pkey_algo = "rsa";
460 else if (ctx->last_oid == OID_gost2012PKey256 ||
461 ctx->last_oid == OID_gost2012PKey512)
462 ctx->cert->pub->pkey_algo = "ecrdsa";
463 else
413 return -ENOPKG; 464 return -ENOPKG;
414 465
415 ctx->cert->pub->pkey_algo = "rsa";
416
417 /* Discard the BIT STRING metadata */ 466 /* Discard the BIT STRING metadata */
418 if (vlen < 1 || *(const u8 *)value != 0) 467 if (vlen < 1 || *(const u8 *)value != 0)
419 return -EBADMSG; 468 return -EBADMSG;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 9338b4558cdc..bd96683d8cde 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -77,7 +77,6 @@ int x509_get_sig_params(struct x509_certificate *cert)
77 goto error; 77 goto error;
78 78
79 desc->tfm = tfm; 79 desc->tfm = tfm;
80 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
81 80
82 ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest); 81 ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
83 if (ret < 0) 82 if (ret < 0)
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 4be293a4b5f0..b3eddac7fa3a 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -508,7 +508,7 @@ static void __exit crypto_authenc_module_exit(void)
508 crypto_unregister_template(&crypto_authenc_tmpl); 508 crypto_unregister_template(&crypto_authenc_tmpl);
509} 509}
510 510
511module_init(crypto_authenc_module_init); 511subsys_initcall(crypto_authenc_module_init);
512module_exit(crypto_authenc_module_exit); 512module_exit(crypto_authenc_module_exit);
513 513
514MODULE_LICENSE("GPL"); 514MODULE_LICENSE("GPL");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 4741fe89ba2c..58074308e535 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -523,7 +523,7 @@ static void __exit crypto_authenc_esn_module_exit(void)
523 crypto_unregister_template(&crypto_authenc_esn_tmpl); 523 crypto_unregister_template(&crypto_authenc_esn_tmpl);
524} 524}
525 525
526module_init(crypto_authenc_esn_module_init); 526subsys_initcall(crypto_authenc_esn_module_init);
527module_exit(crypto_authenc_esn_module_exit); 527module_exit(crypto_authenc_esn_module_exit);
528 528
529MODULE_LICENSE("GPL"); 529MODULE_LICENSE("GPL");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 87b392a77a93..8548ced8b074 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -133,7 +133,7 @@ static void __exit blowfish_mod_fini(void)
133 crypto_unregister_alg(&alg); 133 crypto_unregister_alg(&alg);
134} 134}
135 135
136module_init(blowfish_mod_init); 136subsys_initcall(blowfish_mod_init);
137module_exit(blowfish_mod_fini); 137module_exit(blowfish_mod_fini);
138 138
139MODULE_LICENSE("GPL"); 139MODULE_LICENSE("GPL");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 32ddd4836ff5..15ce1281f5d9 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1092,7 +1092,7 @@ static void __exit camellia_fini(void)
1092 crypto_unregister_alg(&camellia_alg); 1092 crypto_unregister_alg(&camellia_alg);
1093} 1093}
1094 1094
1095module_init(camellia_init); 1095subsys_initcall(camellia_init);
1096module_exit(camellia_fini); 1096module_exit(camellia_fini);
1097 1097
1098MODULE_DESCRIPTION("Camellia Cipher Algorithm"); 1098MODULE_DESCRIPTION("Camellia Cipher Algorithm");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 66169c178314..24bc7d4e33be 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -543,7 +543,7 @@ static void __exit cast5_mod_fini(void)
543 crypto_unregister_alg(&alg); 543 crypto_unregister_alg(&alg);
544} 544}
545 545
546module_init(cast5_mod_init); 546subsys_initcall(cast5_mod_init);
547module_exit(cast5_mod_fini); 547module_exit(cast5_mod_fini);
548 548
549MODULE_LICENSE("GPL"); 549MODULE_LICENSE("GPL");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index c8e5ec69790e..edd59cc34991 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -285,7 +285,7 @@ static void __exit cast6_mod_fini(void)
285 crypto_unregister_alg(&alg); 285 crypto_unregister_alg(&alg);
286} 286}
287 287
288module_init(cast6_mod_init); 288subsys_initcall(cast6_mod_init);
289module_exit(cast6_mod_fini); 289module_exit(cast6_mod_fini);
290 290
291MODULE_LICENSE("GPL"); 291MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index d12efaac9230..129f79d03365 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -98,7 +98,7 @@ static void __exit crypto_cbc_module_exit(void)
98 crypto_unregister_template(&crypto_cbc_tmpl); 98 crypto_unregister_template(&crypto_cbc_tmpl);
99} 99}
100 100
101module_init(crypto_cbc_module_init); 101subsys_initcall(crypto_cbc_module_init);
102module_exit(crypto_cbc_module_exit); 102module_exit(crypto_cbc_module_exit);
103 103
104MODULE_LICENSE("GPL"); 104MODULE_LICENSE("GPL");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 50df8f001c1c..c1ef9d0b4271 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -458,7 +458,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
458 458
459static int crypto_ccm_create_common(struct crypto_template *tmpl, 459static int crypto_ccm_create_common(struct crypto_template *tmpl,
460 struct rtattr **tb, 460 struct rtattr **tb,
461 const char *full_name,
462 const char *ctr_name, 461 const char *ctr_name,
463 const char *mac_name) 462 const char *mac_name)
464{ 463{
@@ -486,7 +485,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
486 485
487 mac = __crypto_hash_alg_common(mac_alg); 486 mac = __crypto_hash_alg_common(mac_alg);
488 err = -EINVAL; 487 err = -EINVAL;
489 if (mac->digestsize != 16) 488 if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
489 mac->digestsize != 16)
490 goto out_put_mac; 490 goto out_put_mac;
491 491
492 inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); 492 inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
@@ -509,23 +509,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
509 509
510 ctr = crypto_spawn_skcipher_alg(&ictx->ctr); 510 ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
511 511
512 /* Not a stream cipher? */ 512 /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
513 err = -EINVAL; 513 err = -EINVAL;
514 if (ctr->base.cra_blocksize != 1) 514 if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
515 crypto_skcipher_alg_ivsize(ctr) != 16 ||
516 ctr->base.cra_blocksize != 1)
515 goto err_drop_ctr; 517 goto err_drop_ctr;
516 518
517 /* We want the real thing! */ 519 /* ctr and cbcmac must use the same underlying block cipher. */
518 if (crypto_skcipher_alg_ivsize(ctr) != 16) 520 if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
519 goto err_drop_ctr; 521 goto err_drop_ctr;
520 522
521 err = -ENAMETOOLONG; 523 err = -ENAMETOOLONG;
524 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
525 "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
526 goto err_drop_ctr;
527
522 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 528 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
523 "ccm_base(%s,%s)", ctr->base.cra_driver_name, 529 "ccm_base(%s,%s)", ctr->base.cra_driver_name,
524 mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 530 mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
525 goto err_drop_ctr; 531 goto err_drop_ctr;
526 532
527 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
528
529 inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; 533 inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
530 inst->alg.base.cra_priority = (mac->base.cra_priority + 534 inst->alg.base.cra_priority = (mac->base.cra_priority +
531 ctr->base.cra_priority) / 2; 535 ctr->base.cra_priority) / 2;
@@ -567,7 +571,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
567 const char *cipher_name; 571 const char *cipher_name;
568 char ctr_name[CRYPTO_MAX_ALG_NAME]; 572 char ctr_name[CRYPTO_MAX_ALG_NAME];
569 char mac_name[CRYPTO_MAX_ALG_NAME]; 573 char mac_name[CRYPTO_MAX_ALG_NAME];
570 char full_name[CRYPTO_MAX_ALG_NAME];
571 574
572 cipher_name = crypto_attr_alg_name(tb[1]); 575 cipher_name = crypto_attr_alg_name(tb[1]);
573 if (IS_ERR(cipher_name)) 576 if (IS_ERR(cipher_name))
@@ -581,35 +584,24 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
581 cipher_name) >= CRYPTO_MAX_ALG_NAME) 584 cipher_name) >= CRYPTO_MAX_ALG_NAME)
582 return -ENAMETOOLONG; 585 return -ENAMETOOLONG;
583 586
584 if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= 587 return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
585 CRYPTO_MAX_ALG_NAME)
586 return -ENAMETOOLONG;
587
588 return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
589 mac_name);
590} 588}
591 589
592static int crypto_ccm_base_create(struct crypto_template *tmpl, 590static int crypto_ccm_base_create(struct crypto_template *tmpl,
593 struct rtattr **tb) 591 struct rtattr **tb)
594{ 592{
595 const char *ctr_name; 593 const char *ctr_name;
596 const char *cipher_name; 594 const char *mac_name;
597 char full_name[CRYPTO_MAX_ALG_NAME];
598 595
599 ctr_name = crypto_attr_alg_name(tb[1]); 596 ctr_name = crypto_attr_alg_name(tb[1]);
600 if (IS_ERR(ctr_name)) 597 if (IS_ERR(ctr_name))
601 return PTR_ERR(ctr_name); 598 return PTR_ERR(ctr_name);
602 599
603 cipher_name = crypto_attr_alg_name(tb[2]); 600 mac_name = crypto_attr_alg_name(tb[2]);
604 if (IS_ERR(cipher_name)) 601 if (IS_ERR(mac_name))
605 return PTR_ERR(cipher_name); 602 return PTR_ERR(mac_name);
606
607 if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
608 ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
609 return -ENAMETOOLONG;
610 603
611 return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, 604 return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
612 cipher_name);
613} 605}
614 606
615static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, 607static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1014,7 +1006,7 @@ static void __exit crypto_ccm_module_exit(void)
1014 ARRAY_SIZE(crypto_ccm_tmpls)); 1006 ARRAY_SIZE(crypto_ccm_tmpls));
1015} 1007}
1016 1008
1017module_init(crypto_ccm_module_init); 1009subsys_initcall(crypto_ccm_module_init);
1018module_exit(crypto_ccm_module_exit); 1010module_exit(crypto_ccm_module_exit);
1019 1011
1020MODULE_LICENSE("GPL"); 1012MODULE_LICENSE("GPL");
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 03ac847f6d6a..7b68fbb61732 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -243,7 +243,7 @@ static void __exit crypto_cfb_module_exit(void)
243 crypto_unregister_template(&crypto_cfb_tmpl); 243 crypto_unregister_template(&crypto_cfb_tmpl);
244} 244}
245 245
246module_init(crypto_cfb_module_init); 246subsys_initcall(crypto_cfb_module_init);
247module_exit(crypto_cfb_module_exit); 247module_exit(crypto_cfb_module_exit);
248 248
249MODULE_LICENSE("GPL"); 249MODULE_LICENSE("GPL");
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index ed2e12e26dd8..e38a2d61819a 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -645,8 +645,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
645 645
646 err = -ENAMETOOLONG; 646 err = -ENAMETOOLONG;
647 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 647 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
648 "%s(%s,%s)", name, chacha_name, 648 "%s(%s,%s)", name, chacha->base.cra_name,
649 poly_name) >= CRYPTO_MAX_ALG_NAME) 649 poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
650 goto out_drop_chacha; 650 goto out_drop_chacha;
651 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 651 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
652 "%s(%s,%s)", name, chacha->base.cra_driver_name, 652 "%s(%s,%s)", name, chacha->base.cra_driver_name,
@@ -725,7 +725,7 @@ static void __exit chacha20poly1305_module_exit(void)
725 ARRAY_SIZE(rfc7539_tmpls)); 725 ARRAY_SIZE(rfc7539_tmpls));
726} 726}
727 727
728module_init(chacha20poly1305_module_init); 728subsys_initcall(chacha20poly1305_module_init);
729module_exit(chacha20poly1305_module_exit); 729module_exit(chacha20poly1305_module_exit);
730 730
731MODULE_LICENSE("GPL"); 731MODULE_LICENSE("GPL");
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
index 35b583101f4f..d2ec04997832 100644
--- a/crypto/chacha_generic.c
+++ b/crypto/chacha_generic.c
@@ -22,18 +22,16 @@ static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
22 /* aligned to potentially speed up crypto_xor() */ 22 /* aligned to potentially speed up crypto_xor() */
23 u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long)); 23 u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
24 24
25 if (dst != src)
26 memcpy(dst, src, bytes);
27
28 while (bytes >= CHACHA_BLOCK_SIZE) { 25 while (bytes >= CHACHA_BLOCK_SIZE) {
29 chacha_block(state, stream, nrounds); 26 chacha_block(state, stream, nrounds);
30 crypto_xor(dst, stream, CHACHA_BLOCK_SIZE); 27 crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
31 bytes -= CHACHA_BLOCK_SIZE; 28 bytes -= CHACHA_BLOCK_SIZE;
32 dst += CHACHA_BLOCK_SIZE; 29 dst += CHACHA_BLOCK_SIZE;
30 src += CHACHA_BLOCK_SIZE;
33 } 31 }
34 if (bytes) { 32 if (bytes) {
35 chacha_block(state, stream, nrounds); 33 chacha_block(state, stream, nrounds);
36 crypto_xor(dst, stream, bytes); 34 crypto_xor_cpy(dst, src, stream, bytes);
37 } 35 }
38} 36}
39 37
@@ -52,7 +50,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
52 unsigned int nbytes = walk.nbytes; 50 unsigned int nbytes = walk.nbytes;
53 51
54 if (nbytes < walk.total) 52 if (nbytes < walk.total)
55 nbytes = round_down(nbytes, walk.stride); 53 nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
56 54
57 chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, 55 chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
58 nbytes, ctx->nrounds); 56 nbytes, ctx->nrounds);
@@ -203,7 +201,7 @@ static void __exit chacha_generic_mod_fini(void)
203 crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); 201 crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
204} 202}
205 203
206module_init(chacha_generic_mod_init); 204subsys_initcall(chacha_generic_mod_init);
207module_exit(chacha_generic_mod_fini); 205module_exit(chacha_generic_mod_fini);
208 206
209MODULE_LICENSE("GPL"); 207MODULE_LICENSE("GPL");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 16301f52858c..c60b6c011ec6 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -313,7 +313,7 @@ static void __exit crypto_cmac_module_exit(void)
313 crypto_unregister_template(&crypto_cmac_tmpl); 313 crypto_unregister_template(&crypto_cmac_tmpl);
314} 314}
315 315
316module_init(crypto_cmac_module_init); 316subsys_initcall(crypto_cmac_module_init);
317module_exit(crypto_cmac_module_exit); 317module_exit(crypto_cmac_module_exit);
318 318
319MODULE_LICENSE("GPL"); 319MODULE_LICENSE("GPL");
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index 00facd27bcc2..9e97912280bd 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -146,7 +146,7 @@ static void __exit crc32_mod_fini(void)
146 crypto_unregister_shash(&alg); 146 crypto_unregister_shash(&alg);
147} 147}
148 148
149module_init(crc32_mod_init); 149subsys_initcall(crc32_mod_init);
150module_exit(crc32_mod_fini); 150module_exit(crc32_mod_fini);
151 151
152MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>"); 152MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 7283066ecc98..ad26f15d4c7b 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -165,7 +165,7 @@ static void __exit crc32c_mod_fini(void)
165 crypto_unregister_shash(&alg); 165 crypto_unregister_shash(&alg);
166} 166}
167 167
168module_init(crc32c_mod_init); 168subsys_initcall(crc32c_mod_init);
169module_exit(crc32c_mod_fini); 169module_exit(crc32c_mod_fini);
170 170
171MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); 171MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 8e94e29dc6fc..d90c0070710e 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
65 return 0; 65 return 0;
66} 66}
67 67
68static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, 68static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
69 u8 *out)
70{ 69{
71 *(__u16 *)out = crc_t10dif_generic(*crcp, data, len); 70 *(__u16 *)out = crc_t10dif_generic(crc, data, len);
72 return 0; 71 return 0;
73} 72}
74 73
@@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
77{ 76{
78 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); 77 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
79 78
80 return __chksum_finup(&ctx->crc, data, len, out); 79 return __chksum_finup(ctx->crc, data, len, out);
81} 80}
82 81
83static int chksum_digest(struct shash_desc *desc, const u8 *data, 82static int chksum_digest(struct shash_desc *desc, const u8 *data,
84 unsigned int length, u8 *out) 83 unsigned int length, u8 *out)
85{ 84{
86 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); 85 return __chksum_finup(0, data, length, out);
87
88 return __chksum_finup(&ctx->crc, data, length, out);
89} 86}
90 87
91static struct shash_alg alg = { 88static struct shash_alg alg = {
@@ -115,7 +112,7 @@ static void __exit crct10dif_mod_fini(void)
115 crypto_unregister_shash(&alg); 112 crypto_unregister_shash(&alg);
116} 113}
117 114
118module_init(crct10dif_mod_init); 115subsys_initcall(crct10dif_mod_init);
119module_exit(crct10dif_mod_fini); 116module_exit(crct10dif_mod_fini);
120 117
121MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); 118MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 5640e5db7bdb..b3bb99390ae7 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -65,15 +65,6 @@ struct aead_instance_ctx {
65 struct cryptd_queue *queue; 65 struct cryptd_queue *queue;
66}; 66};
67 67
68struct cryptd_blkcipher_ctx {
69 atomic_t refcnt;
70 struct crypto_blkcipher *child;
71};
72
73struct cryptd_blkcipher_request_ctx {
74 crypto_completion_t complete;
75};
76
77struct cryptd_skcipher_ctx { 68struct cryptd_skcipher_ctx {
78 atomic_t refcnt; 69 atomic_t refcnt;
79 struct crypto_sync_skcipher *child; 70 struct crypto_sync_skcipher *child;
@@ -216,129 +207,6 @@ static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
216 *mask |= algt->mask & CRYPTO_ALG_INTERNAL; 207 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
217} 208}
218 209
219static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
220 const u8 *key, unsigned int keylen)
221{
222 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
223 struct crypto_blkcipher *child = ctx->child;
224 int err;
225
226 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
227 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
228 CRYPTO_TFM_REQ_MASK);
229 err = crypto_blkcipher_setkey(child, key, keylen);
230 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
231 CRYPTO_TFM_RES_MASK);
232 return err;
233}
234
235static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
236 struct crypto_blkcipher *child,
237 int err,
238 int (*crypt)(struct blkcipher_desc *desc,
239 struct scatterlist *dst,
240 struct scatterlist *src,
241 unsigned int len))
242{
243 struct cryptd_blkcipher_request_ctx *rctx;
244 struct cryptd_blkcipher_ctx *ctx;
245 struct crypto_ablkcipher *tfm;
246 struct blkcipher_desc desc;
247 int refcnt;
248
249 rctx = ablkcipher_request_ctx(req);
250
251 if (unlikely(err == -EINPROGRESS))
252 goto out;
253
254 desc.tfm = child;
255 desc.info = req->info;
256 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
257
258 err = crypt(&desc, req->dst, req->src, req->nbytes);
259
260 req->base.complete = rctx->complete;
261
262out:
263 tfm = crypto_ablkcipher_reqtfm(req);
264 ctx = crypto_ablkcipher_ctx(tfm);
265 refcnt = atomic_read(&ctx->refcnt);
266
267 local_bh_disable();
268 rctx->complete(&req->base, err);
269 local_bh_enable();
270
271 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
272 crypto_free_ablkcipher(tfm);
273}
274
275static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
276{
277 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
278 struct crypto_blkcipher *child = ctx->child;
279
280 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
281 crypto_blkcipher_crt(child)->encrypt);
282}
283
284static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
285{
286 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
287 struct crypto_blkcipher *child = ctx->child;
288
289 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
290 crypto_blkcipher_crt(child)->decrypt);
291}
292
293static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
294 crypto_completion_t compl)
295{
296 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
297 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
298 struct cryptd_queue *queue;
299
300 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
301 rctx->complete = req->base.complete;
302 req->base.complete = compl;
303
304 return cryptd_enqueue_request(queue, &req->base);
305}
306
307static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
308{
309 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
310}
311
312static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
313{
314 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
315}
316
317static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
318{
319 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
320 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
321 struct crypto_spawn *spawn = &ictx->spawn;
322 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
323 struct crypto_blkcipher *cipher;
324
325 cipher = crypto_spawn_blkcipher(spawn);
326 if (IS_ERR(cipher))
327 return PTR_ERR(cipher);
328
329 ctx->child = cipher;
330 tfm->crt_ablkcipher.reqsize =
331 sizeof(struct cryptd_blkcipher_request_ctx);
332 return 0;
333}
334
335static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
336{
337 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
338
339 crypto_free_blkcipher(ctx->child);
340}
341
342static int cryptd_init_instance(struct crypto_instance *inst, 210static int cryptd_init_instance(struct crypto_instance *inst,
343 struct crypto_alg *alg) 211 struct crypto_alg *alg)
344{ 212{
@@ -382,67 +250,6 @@ out_free_inst:
382 goto out; 250 goto out;
383} 251}
384 252
385static int cryptd_create_blkcipher(struct crypto_template *tmpl,
386 struct rtattr **tb,
387 struct cryptd_queue *queue)
388{
389 struct cryptd_instance_ctx *ctx;
390 struct crypto_instance *inst;
391 struct crypto_alg *alg;
392 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
393 u32 mask = CRYPTO_ALG_TYPE_MASK;
394 int err;
395
396 cryptd_check_internal(tb, &type, &mask);
397
398 alg = crypto_get_attr_alg(tb, type, mask);
399 if (IS_ERR(alg))
400 return PTR_ERR(alg);
401
402 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
403 err = PTR_ERR(inst);
404 if (IS_ERR(inst))
405 goto out_put_alg;
406
407 ctx = crypto_instance_ctx(inst);
408 ctx->queue = queue;
409
410 err = crypto_init_spawn(&ctx->spawn, alg, inst,
411 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
412 if (err)
413 goto out_free_inst;
414
415 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
416 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
417 type |= CRYPTO_ALG_INTERNAL;
418 inst->alg.cra_flags = type;
419 inst->alg.cra_type = &crypto_ablkcipher_type;
420
421 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
422 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
423 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
424
425 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
426
427 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
428 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
429
430 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
431 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
432 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
433
434 err = crypto_register_instance(tmpl, inst);
435 if (err) {
436 crypto_drop_spawn(&ctx->spawn);
437out_free_inst:
438 kfree(inst);
439 }
440
441out_put_alg:
442 crypto_mod_put(alg);
443 return err;
444}
445
446static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, 253static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
447 const u8 *key, unsigned int keylen) 254 const u8 *key, unsigned int keylen)
448{ 255{
@@ -738,7 +545,6 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
738 goto out; 545 goto out;
739 546
740 desc->tfm = child; 547 desc->tfm = child;
741 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
742 548
743 err = crypto_shash_init(desc); 549 err = crypto_shash_init(desc);
744 550
@@ -830,7 +636,6 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
830 goto out; 636 goto out;
831 637
832 desc->tfm = child; 638 desc->tfm = child;
833 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
834 639
835 err = shash_ahash_digest(req, desc); 640 err = shash_ahash_digest(req, desc);
836 641
@@ -859,7 +664,6 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in)
859 struct shash_desc *desc = cryptd_shash_desc(req); 664 struct shash_desc *desc = cryptd_shash_desc(req);
860 665
861 desc->tfm = ctx->child; 666 desc->tfm = ctx->child;
862 desc->flags = req->base.flags;
863 667
864 return crypto_shash_import(desc, in); 668 return crypto_shash_import(desc, in);
865} 669}
@@ -1118,10 +922,6 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
1118 922
1119 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 923 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
1120 case CRYPTO_ALG_TYPE_BLKCIPHER: 924 case CRYPTO_ALG_TYPE_BLKCIPHER:
1121 if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
1122 CRYPTO_ALG_TYPE_BLKCIPHER)
1123 return cryptd_create_blkcipher(tmpl, tb, &queue);
1124
1125 return cryptd_create_skcipher(tmpl, tb, &queue); 925 return cryptd_create_skcipher(tmpl, tb, &queue);
1126 case CRYPTO_ALG_TYPE_DIGEST: 926 case CRYPTO_ALG_TYPE_DIGEST:
1127 return cryptd_create_hash(tmpl, tb, &queue); 927 return cryptd_create_hash(tmpl, tb, &queue);
@@ -1160,58 +960,6 @@ static struct crypto_template cryptd_tmpl = {
1160 .module = THIS_MODULE, 960 .module = THIS_MODULE,
1161}; 961};
1162 962
1163struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
1164 u32 type, u32 mask)
1165{
1166 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1167 struct cryptd_blkcipher_ctx *ctx;
1168 struct crypto_tfm *tfm;
1169
1170 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1171 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1172 return ERR_PTR(-EINVAL);
1173 type = crypto_skcipher_type(type);
1174 mask &= ~CRYPTO_ALG_TYPE_MASK;
1175 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
1176 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
1177 if (IS_ERR(tfm))
1178 return ERR_CAST(tfm);
1179 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
1180 crypto_free_tfm(tfm);
1181 return ERR_PTR(-EINVAL);
1182 }
1183
1184 ctx = crypto_tfm_ctx(tfm);
1185 atomic_set(&ctx->refcnt, 1);
1186
1187 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
1188}
1189EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
1190
1191struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
1192{
1193 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1194 return ctx->child;
1195}
1196EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
1197
1198bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
1199{
1200 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1201
1202 return atomic_read(&ctx->refcnt) - 1;
1203}
1204EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
1205
1206void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
1207{
1208 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1209
1210 if (atomic_dec_and_test(&ctx->refcnt))
1211 crypto_free_ablkcipher(&tfm->base);
1212}
1213EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
1214
1215struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 963struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
1216 u32 type, u32 mask) 964 u32 type, u32 mask)
1217{ 965{
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 01630a9c7e01..9320d4eaa4a8 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -220,7 +220,7 @@ static void __exit crypto_null_mod_fini(void)
220 crypto_unregister_skcipher(&skcipher_null); 220 crypto_unregister_skcipher(&skcipher_null);
221} 221}
222 222
223module_init(crypto_null_mod_init); 223subsys_initcall(crypto_null_mod_init);
224module_exit(crypto_null_mod_fini); 224module_exit(crypto_null_mod_fini);
225 225
226MODULE_LICENSE("GPL"); 226MODULE_LICENSE("GPL");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index ec8f8b67473a..52cdf2c5605f 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -384,7 +384,7 @@ static void __exit crypto_ctr_module_exit(void)
384 ARRAY_SIZE(crypto_ctr_tmpls)); 384 ARRAY_SIZE(crypto_ctr_tmpls));
385} 385}
386 386
387module_init(crypto_ctr_module_init); 387subsys_initcall(crypto_ctr_module_init);
388module_exit(crypto_ctr_module_exit); 388module_exit(crypto_ctr_module_exit);
389 389
390MODULE_LICENSE("GPL"); 390MODULE_LICENSE("GPL");
diff --git a/crypto/cts.c b/crypto/cts.c
index 4e28d83ae37d..6b6087dbb62a 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -152,12 +152,14 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
152 struct skcipher_request *subreq = &rctx->subreq; 152 struct skcipher_request *subreq = &rctx->subreq;
153 int bsize = crypto_skcipher_blocksize(tfm); 153 int bsize = crypto_skcipher_blocksize(tfm);
154 unsigned int nbytes = req->cryptlen; 154 unsigned int nbytes = req->cryptlen;
155 int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
156 unsigned int offset; 155 unsigned int offset;
157 156
158 skcipher_request_set_tfm(subreq, ctx->child); 157 skcipher_request_set_tfm(subreq, ctx->child);
159 158
160 if (cbc_blocks <= 0) { 159 if (nbytes < bsize)
160 return -EINVAL;
161
162 if (nbytes == bsize) {
161 skcipher_request_set_callback(subreq, req->base.flags, 163 skcipher_request_set_callback(subreq, req->base.flags,
162 req->base.complete, 164 req->base.complete,
163 req->base.data); 165 req->base.data);
@@ -166,7 +168,7 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
166 return crypto_skcipher_encrypt(subreq); 168 return crypto_skcipher_encrypt(subreq);
167 } 169 }
168 170
169 offset = cbc_blocks * bsize; 171 offset = rounddown(nbytes - 1, bsize);
170 rctx->offset = offset; 172 rctx->offset = offset;
171 173
172 skcipher_request_set_callback(subreq, req->base.flags, 174 skcipher_request_set_callback(subreq, req->base.flags,
@@ -244,13 +246,15 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
244 struct skcipher_request *subreq = &rctx->subreq; 246 struct skcipher_request *subreq = &rctx->subreq;
245 int bsize = crypto_skcipher_blocksize(tfm); 247 int bsize = crypto_skcipher_blocksize(tfm);
246 unsigned int nbytes = req->cryptlen; 248 unsigned int nbytes = req->cryptlen;
247 int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
248 unsigned int offset; 249 unsigned int offset;
249 u8 *space; 250 u8 *space;
250 251
251 skcipher_request_set_tfm(subreq, ctx->child); 252 skcipher_request_set_tfm(subreq, ctx->child);
252 253
253 if (cbc_blocks <= 0) { 254 if (nbytes < bsize)
255 return -EINVAL;
256
257 if (nbytes == bsize) {
254 skcipher_request_set_callback(subreq, req->base.flags, 258 skcipher_request_set_callback(subreq, req->base.flags,
255 req->base.complete, 259 req->base.complete,
256 req->base.data); 260 req->base.data);
@@ -264,10 +268,10 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
264 268
265 space = crypto_cts_reqctx_space(req); 269 space = crypto_cts_reqctx_space(req);
266 270
267 offset = cbc_blocks * bsize; 271 offset = rounddown(nbytes - 1, bsize);
268 rctx->offset = offset; 272 rctx->offset = offset;
269 273
270 if (cbc_blocks <= 1) 274 if (offset <= bsize)
271 memcpy(space, req->iv, bsize); 275 memcpy(space, req->iv, bsize);
272 else 276 else
273 scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize, 277 scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
@@ -419,7 +423,7 @@ static void __exit crypto_cts_module_exit(void)
419 crypto_unregister_template(&crypto_cts_tmpl); 423 crypto_unregister_template(&crypto_cts_tmpl);
420} 424}
421 425
422module_init(crypto_cts_module_init); 426subsys_initcall(crypto_cts_module_init);
423module_exit(crypto_cts_module_exit); 427module_exit(crypto_cts_module_exit);
424 428
425MODULE_LICENSE("Dual BSD/GPL"); 429MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 94ec3b36a8e8..aab089cde1bf 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -334,7 +334,7 @@ static void __exit deflate_mod_fini(void)
334 crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp)); 334 crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
335} 335}
336 336
337module_init(deflate_mod_init); 337subsys_initcall(deflate_mod_init);
338module_exit(deflate_mod_fini); 338module_exit(deflate_mod_fini);
339 339
340MODULE_LICENSE("GPL"); 340MODULE_LICENSE("GPL");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 1e6621665dd9..d7a88b4fa611 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -862,14 +862,11 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
862int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, 862int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
863 unsigned int keylen) 863 unsigned int keylen)
864{ 864{
865 const u32 *K = (const u32 *)key; 865 int err;
866 866
867 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 867 err = __des3_verify_key(flags, key);
868 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && 868 if (unlikely(err))
869 (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 869 return err;
870 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
871 return -EINVAL;
872 }
873 870
874 des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; 871 des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
875 dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; 872 dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
@@ -993,7 +990,7 @@ static void __exit des_generic_mod_fini(void)
993 crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs)); 990 crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
994} 991}
995 992
996module_init(des_generic_mod_init); 993subsys_initcall(des_generic_mod_init);
997module_exit(des_generic_mod_fini); 994module_exit(des_generic_mod_fini);
998 995
999MODULE_LICENSE("GPL"); 996MODULE_LICENSE("GPL");
diff --git a/crypto/dh.c b/crypto/dh.c
index 09a44de4209d..ce77fb4ee8b3 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -236,7 +236,7 @@ static void dh_exit(void)
236 crypto_unregister_kpp(&dh); 236 crypto_unregister_kpp(&dh);
237} 237}
238 238
239module_init(dh_init); 239subsys_initcall(dh_init);
240module_exit(dh_exit); 240module_exit(dh_exit);
241MODULE_ALIAS_CRYPTO("dh"); 241MODULE_ALIAS_CRYPTO("dh");
242MODULE_LICENSE("GPL"); 242MODULE_LICENSE("GPL");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index bc52d9562611..2a5b16bb000c 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1587,7 +1587,6 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
1587 } 1587 }
1588 1588
1589 sdesc->shash.tfm = tfm; 1589 sdesc->shash.tfm = tfm;
1590 sdesc->shash.flags = 0;
1591 drbg->priv_data = sdesc; 1590 drbg->priv_data = sdesc;
1592 1591
1593 return crypto_shash_alignmask(tfm); 1592 return crypto_shash_alignmask(tfm);
@@ -2039,7 +2038,7 @@ static void __exit drbg_exit(void)
2039 crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); 2038 crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
2040} 2039}
2041 2040
2042module_init(drbg_init); 2041subsys_initcall(drbg_init);
2043module_exit(drbg_exit); 2042module_exit(drbg_exit);
2044#ifndef CRYPTO_DRBG_HASH_STRING 2043#ifndef CRYPTO_DRBG_HASH_STRING
2045#define CRYPTO_DRBG_HASH_STRING "" 2044#define CRYPTO_DRBG_HASH_STRING ""
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 0732715c8d91..de839129d151 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -101,7 +101,7 @@ static void __exit crypto_ecb_module_exit(void)
101 crypto_unregister_template(&crypto_ecb_tmpl); 101 crypto_unregister_template(&crypto_ecb_tmpl);
102} 102}
103 103
104module_init(crypto_ecb_module_init); 104subsys_initcall(crypto_ecb_module_init);
105module_exit(crypto_ecb_module_exit); 105module_exit(crypto_ecb_module_exit);
106 106
107MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
diff --git a/crypto/ecc.c b/crypto/ecc.c
index ed1237115066..dfe114bc0c4a 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2013, Kenneth MacKay 2 * Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved.
3 * All rights reserved. 3 * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
4 * 4 *
5 * Redistribution and use in source and binary forms, with or without 5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are 6 * modification, are permitted provided that the following conditions are
@@ -24,12 +24,15 @@
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */ 25 */
26 26
27#include <linux/module.h>
27#include <linux/random.h> 28#include <linux/random.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <linux/swab.h> 30#include <linux/swab.h>
30#include <linux/fips.h> 31#include <linux/fips.h>
31#include <crypto/ecdh.h> 32#include <crypto/ecdh.h>
32#include <crypto/rng.h> 33#include <crypto/rng.h>
34#include <asm/unaligned.h>
35#include <linux/ratelimit.h>
33 36
34#include "ecc.h" 37#include "ecc.h"
35#include "ecc_curve_defs.h" 38#include "ecc_curve_defs.h"
@@ -112,7 +115,7 @@ static void vli_clear(u64 *vli, unsigned int ndigits)
112} 115}
113 116
114/* Returns true if vli == 0, false otherwise. */ 117/* Returns true if vli == 0, false otherwise. */
115static bool vli_is_zero(const u64 *vli, unsigned int ndigits) 118bool vli_is_zero(const u64 *vli, unsigned int ndigits)
116{ 119{
117 int i; 120 int i;
118 121
@@ -123,6 +126,7 @@ static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
123 126
124 return true; 127 return true;
125} 128}
129EXPORT_SYMBOL(vli_is_zero);
126 130
127/* Returns nonzero if bit bit of vli is set. */ 131/* Returns nonzero if bit bit of vli is set. */
128static u64 vli_test_bit(const u64 *vli, unsigned int bit) 132static u64 vli_test_bit(const u64 *vli, unsigned int bit)
@@ -130,6 +134,11 @@ static u64 vli_test_bit(const u64 *vli, unsigned int bit)
130 return (vli[bit / 64] & ((u64)1 << (bit % 64))); 134 return (vli[bit / 64] & ((u64)1 << (bit % 64)));
131} 135}
132 136
137static bool vli_is_negative(const u64 *vli, unsigned int ndigits)
138{
139 return vli_test_bit(vli, ndigits * 64 - 1);
140}
141
133/* Counts the number of 64-bit "digits" in vli. */ 142/* Counts the number of 64-bit "digits" in vli. */
134static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits) 143static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
135{ 144{
@@ -161,6 +170,27 @@ static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
161 return ((num_digits - 1) * 64 + i); 170 return ((num_digits - 1) * 64 + i);
162} 171}
163 172
173/* Set dest from unaligned bit string src. */
174void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits)
175{
176 int i;
177 const u64 *from = src;
178
179 for (i = 0; i < ndigits; i++)
180 dest[i] = get_unaligned_be64(&from[ndigits - 1 - i]);
181}
182EXPORT_SYMBOL(vli_from_be64);
183
184void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits)
185{
186 int i;
187 const u64 *from = src;
188
189 for (i = 0; i < ndigits; i++)
190 dest[i] = get_unaligned_le64(&from[i]);
191}
192EXPORT_SYMBOL(vli_from_le64);
193
164/* Sets dest = src. */ 194/* Sets dest = src. */
165static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits) 195static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
166{ 196{
@@ -171,7 +201,7 @@ static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
171} 201}
172 202
173/* Returns sign of left - right. */ 203/* Returns sign of left - right. */
174static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits) 204int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
175{ 205{
176 int i; 206 int i;
177 207
@@ -184,6 +214,7 @@ static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
184 214
185 return 0; 215 return 0;
186} 216}
217EXPORT_SYMBOL(vli_cmp);
187 218
188/* Computes result = in << c, returning carry. Can modify in place 219/* Computes result = in << c, returning carry. Can modify in place
189 * (if result == in). 0 < shift < 64. 220 * (if result == in). 0 < shift < 64.
@@ -239,8 +270,30 @@ static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
239 return carry; 270 return carry;
240} 271}
241 272
273/* Computes result = left + right, returning carry. Can modify in place. */
274static u64 vli_uadd(u64 *result, const u64 *left, u64 right,
275 unsigned int ndigits)
276{
277 u64 carry = right;
278 int i;
279
280 for (i = 0; i < ndigits; i++) {
281 u64 sum;
282
283 sum = left[i] + carry;
284 if (sum != left[i])
285 carry = (sum < left[i]);
286 else
287 carry = !!carry;
288
289 result[i] = sum;
290 }
291
292 return carry;
293}
294
242/* Computes result = left - right, returning borrow. Can modify in place. */ 295/* Computes result = left - right, returning borrow. Can modify in place. */
243static u64 vli_sub(u64 *result, const u64 *left, const u64 *right, 296u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
244 unsigned int ndigits) 297 unsigned int ndigits)
245{ 298{
246 u64 borrow = 0; 299 u64 borrow = 0;
@@ -258,9 +311,37 @@ static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
258 311
259 return borrow; 312 return borrow;
260} 313}
314EXPORT_SYMBOL(vli_sub);
315
316/* Computes result = left - right, returning borrow. Can modify in place. */
317static u64 vli_usub(u64 *result, const u64 *left, u64 right,
318 unsigned int ndigits)
319{
320 u64 borrow = right;
321 int i;
322
323 for (i = 0; i < ndigits; i++) {
324 u64 diff;
325
326 diff = left[i] - borrow;
327 if (diff != left[i])
328 borrow = (diff > left[i]);
329
330 result[i] = diff;
331 }
332
333 return borrow;
334}
261 335
262static uint128_t mul_64_64(u64 left, u64 right) 336static uint128_t mul_64_64(u64 left, u64 right)
263{ 337{
338 uint128_t result;
339#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
340 unsigned __int128 m = (unsigned __int128)left * right;
341
342 result.m_low = m;
343 result.m_high = m >> 64;
344#else
264 u64 a0 = left & 0xffffffffull; 345 u64 a0 = left & 0xffffffffull;
265 u64 a1 = left >> 32; 346 u64 a1 = left >> 32;
266 u64 b0 = right & 0xffffffffull; 347 u64 b0 = right & 0xffffffffull;
@@ -269,7 +350,6 @@ static uint128_t mul_64_64(u64 left, u64 right)
269 u64 m1 = a0 * b1; 350 u64 m1 = a0 * b1;
270 u64 m2 = a1 * b0; 351 u64 m2 = a1 * b0;
271 u64 m3 = a1 * b1; 352 u64 m3 = a1 * b1;
272 uint128_t result;
273 353
274 m2 += (m0 >> 32); 354 m2 += (m0 >> 32);
275 m2 += m1; 355 m2 += m1;
@@ -280,7 +360,7 @@ static uint128_t mul_64_64(u64 left, u64 right)
280 360
281 result.m_low = (m0 & 0xffffffffull) | (m2 << 32); 361 result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
282 result.m_high = m3 + (m2 >> 32); 362 result.m_high = m3 + (m2 >> 32);
283 363#endif
284 return result; 364 return result;
285} 365}
286 366
@@ -330,6 +410,28 @@ static void vli_mult(u64 *result, const u64 *left, const u64 *right,
330 result[ndigits * 2 - 1] = r01.m_low; 410 result[ndigits * 2 - 1] = r01.m_low;
331} 411}
332 412
413/* Compute product = left * right, for a small right value. */
414static void vli_umult(u64 *result, const u64 *left, u32 right,
415 unsigned int ndigits)
416{
417 uint128_t r01 = { 0 };
418 unsigned int k;
419
420 for (k = 0; k < ndigits; k++) {
421 uint128_t product;
422
423 product = mul_64_64(left[k], right);
424 r01 = add_128_128(r01, product);
425 /* no carry */
426 result[k] = r01.m_low;
427 r01.m_low = r01.m_high;
428 r01.m_high = 0;
429 }
430 result[k] = r01.m_low;
431 for (++k; k < ndigits * 2; k++)
432 result[k] = 0;
433}
434
333static void vli_square(u64 *result, const u64 *left, unsigned int ndigits) 435static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
334{ 436{
335 uint128_t r01 = { 0, 0 }; 437 uint128_t r01 = { 0, 0 };
@@ -402,6 +504,170 @@ static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
402 vli_add(result, result, mod, ndigits); 504 vli_add(result, result, mod, ndigits);
403} 505}
404 506
507/*
508 * Computes result = product % mod
509 * for special form moduli: p = 2^k-c, for small c (note the minus sign)
510 *
511 * References:
512 * R. Crandall, C. Pomerance. Prime Numbers: A Computational Perspective.
513 * 9 Fast Algorithms for Large-Integer Arithmetic. 9.2.3 Moduli of special form
514 * Algorithm 9.2.13 (Fast mod operation for special-form moduli).
515 */
516static void vli_mmod_special(u64 *result, const u64 *product,
517 const u64 *mod, unsigned int ndigits)
518{
519 u64 c = -mod[0];
520 u64 t[ECC_MAX_DIGITS * 2];
521 u64 r[ECC_MAX_DIGITS * 2];
522
523 vli_set(r, product, ndigits * 2);
524 while (!vli_is_zero(r + ndigits, ndigits)) {
525 vli_umult(t, r + ndigits, c, ndigits);
526 vli_clear(r + ndigits, ndigits);
527 vli_add(r, r, t, ndigits * 2);
528 }
529 vli_set(t, mod, ndigits);
530 vli_clear(t + ndigits, ndigits);
531 while (vli_cmp(r, t, ndigits * 2) >= 0)
532 vli_sub(r, r, t, ndigits * 2);
533 vli_set(result, r, ndigits);
534}
535
536/*
537 * Computes result = product % mod
538 * for special form moduli: p = 2^{k-1}+c, for small c (note the plus sign)
539 * where k-1 does not fit into qword boundary by -1 bit (such as 255).
540
541 * References (loosely based on):
542 * A. Menezes, P. van Oorschot, S. Vanstone. Handbook of Applied Cryptography.
543 * 14.3.4 Reduction methods for moduli of special form. Algorithm 14.47.
544 * URL: http://cacr.uwaterloo.ca/hac/about/chap14.pdf
545 *
546 * H. Cohen, G. Frey, R. Avanzi, C. Doche, T. Lange, K. Nguyen, F. Vercauteren.
547 * Handbook of Elliptic and Hyperelliptic Curve Cryptography.
548 * Algorithm 10.25 Fast reduction for special form moduli
549 */
550static void vli_mmod_special2(u64 *result, const u64 *product,
551 const u64 *mod, unsigned int ndigits)
552{
553 u64 c2 = mod[0] * 2;
554 u64 q[ECC_MAX_DIGITS];
555 u64 r[ECC_MAX_DIGITS * 2];
556 u64 m[ECC_MAX_DIGITS * 2]; /* expanded mod */
557 int carry; /* last bit that doesn't fit into q */
558 int i;
559
560 vli_set(m, mod, ndigits);
561 vli_clear(m + ndigits, ndigits);
562
563 vli_set(r, product, ndigits);
564 /* q and carry are top bits */
565 vli_set(q, product + ndigits, ndigits);
566 vli_clear(r + ndigits, ndigits);
567 carry = vli_is_negative(r, ndigits);
568 if (carry)
569 r[ndigits - 1] &= (1ull << 63) - 1;
570 for (i = 1; carry || !vli_is_zero(q, ndigits); i++) {
571 u64 qc[ECC_MAX_DIGITS * 2];
572
573 vli_umult(qc, q, c2, ndigits);
574 if (carry)
575 vli_uadd(qc, qc, mod[0], ndigits * 2);
576 vli_set(q, qc + ndigits, ndigits);
577 vli_clear(qc + ndigits, ndigits);
578 carry = vli_is_negative(qc, ndigits);
579 if (carry)
580 qc[ndigits - 1] &= (1ull << 63) - 1;
581 if (i & 1)
582 vli_sub(r, r, qc, ndigits * 2);
583 else
584 vli_add(r, r, qc, ndigits * 2);
585 }
586 while (vli_is_negative(r, ndigits * 2))
587 vli_add(r, r, m, ndigits * 2);
588 while (vli_cmp(r, m, ndigits * 2) >= 0)
589 vli_sub(r, r, m, ndigits * 2);
590
591 vli_set(result, r, ndigits);
592}
593
594/*
595 * Computes result = product % mod, where product is 2N words long.
596 * Reference: Ken MacKay's micro-ecc.
597 * Currently only designed to work for curve_p or curve_n.
598 */
599static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod,
600 unsigned int ndigits)
601{
602 u64 mod_m[2 * ECC_MAX_DIGITS];
603 u64 tmp[2 * ECC_MAX_DIGITS];
604 u64 *v[2] = { tmp, product };
605 u64 carry = 0;
606 unsigned int i;
607 /* Shift mod so its highest set bit is at the maximum position. */
608 int shift = (ndigits * 2 * 64) - vli_num_bits(mod, ndigits);
609 int word_shift = shift / 64;
610 int bit_shift = shift % 64;
611
612 vli_clear(mod_m, word_shift);
613 if (bit_shift > 0) {
614 for (i = 0; i < ndigits; ++i) {
615 mod_m[word_shift + i] = (mod[i] << bit_shift) | carry;
616 carry = mod[i] >> (64 - bit_shift);
617 }
618 } else
619 vli_set(mod_m + word_shift, mod, ndigits);
620
621 for (i = 1; shift >= 0; --shift) {
622 u64 borrow = 0;
623 unsigned int j;
624
625 for (j = 0; j < ndigits * 2; ++j) {
626 u64 diff = v[i][j] - mod_m[j] - borrow;
627
628 if (diff != v[i][j])
629 borrow = (diff > v[i][j]);
630 v[1 - i][j] = diff;
631 }
632 i = !(i ^ borrow); /* Swap the index if there was no borrow */
633 vli_rshift1(mod_m, ndigits);
634 mod_m[ndigits - 1] |= mod_m[ndigits] << (64 - 1);
635 vli_rshift1(mod_m + ndigits, ndigits);
636 }
637 vli_set(result, v[i], ndigits);
638}
639
640/* Computes result = product % mod using Barrett's reduction with precomputed
641 * value mu appended to the mod after ndigits, mu = (2^{2w} / mod) and have
642 * length ndigits + 1, where mu * (2^w - 1) should not overflow ndigits
643 * boundary.
644 *
645 * Reference:
646 * R. Brent, P. Zimmermann. Modern Computer Arithmetic. 2010.
647 * 2.4.1 Barrett's algorithm. Algorithm 2.5.
648 */
649static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
650 unsigned int ndigits)
651{
652 u64 q[ECC_MAX_DIGITS * 2];
653 u64 r[ECC_MAX_DIGITS * 2];
654 const u64 *mu = mod + ndigits;
655
656 vli_mult(q, product + ndigits, mu, ndigits);
657 if (mu[ndigits])
658 vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits);
659 vli_mult(r, mod, q + ndigits, ndigits);
660 vli_sub(r, product, r, ndigits * 2);
661 while (!vli_is_zero(r + ndigits, ndigits) ||
662 vli_cmp(r, mod, ndigits) != -1) {
663 u64 carry;
664
665 carry = vli_sub(r, r, mod, ndigits);
666 vli_usub(r + ndigits, r + ndigits, carry, ndigits);
667 }
668 vli_set(result, r, ndigits);
669}
670
405/* Computes p_result = p_product % curve_p. 671/* Computes p_result = p_product % curve_p.
406 * See algorithm 5 and 6 from 672 * See algorithm 5 and 6 from
407 * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf 673 * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
@@ -509,14 +775,33 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
509 } 775 }
510} 776}
511 777
512/* Computes result = product % curve_prime 778/* Computes result = product % curve_prime for different curve_primes.
513 * from http://www.nsa.gov/ia/_files/nist-routines.pdf 779 *
514*/ 780 * Note that curve_primes are distinguished just by heuristic check and
781 * not by complete conformance check.
782 */
515static bool vli_mmod_fast(u64 *result, u64 *product, 783static bool vli_mmod_fast(u64 *result, u64 *product,
516 const u64 *curve_prime, unsigned int ndigits) 784 const u64 *curve_prime, unsigned int ndigits)
517{ 785{
518 u64 tmp[2 * ECC_MAX_DIGITS]; 786 u64 tmp[2 * ECC_MAX_DIGITS];
519 787
788 /* Currently, both NIST primes have -1 in lowest qword. */
789 if (curve_prime[0] != -1ull) {
790 /* Try to handle Pseudo-Marsenne primes. */
791 if (curve_prime[ndigits - 1] == -1ull) {
792 vli_mmod_special(result, product, curve_prime,
793 ndigits);
794 return true;
795 } else if (curve_prime[ndigits - 1] == 1ull << 63 &&
796 curve_prime[ndigits - 2] == 0) {
797 vli_mmod_special2(result, product, curve_prime,
798 ndigits);
799 return true;
800 }
801 vli_mmod_barrett(result, product, curve_prime, ndigits);
802 return true;
803 }
804
520 switch (ndigits) { 805 switch (ndigits) {
521 case 3: 806 case 3:
522 vli_mmod_fast_192(result, product, curve_prime, tmp); 807 vli_mmod_fast_192(result, product, curve_prime, tmp);
@@ -525,13 +810,26 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
525 vli_mmod_fast_256(result, product, curve_prime, tmp); 810 vli_mmod_fast_256(result, product, curve_prime, tmp);
526 break; 811 break;
527 default: 812 default:
528 pr_err("unsupports digits size!\n"); 813 pr_err_ratelimited("ecc: unsupported digits size!\n");
529 return false; 814 return false;
530 } 815 }
531 816
532 return true; 817 return true;
533} 818}
534 819
820/* Computes result = (left * right) % mod.
821 * Assumes that mod is big enough curve order.
822 */
823void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
824 const u64 *mod, unsigned int ndigits)
825{
826 u64 product[ECC_MAX_DIGITS * 2];
827
828 vli_mult(product, left, right, ndigits);
829 vli_mmod_slow(result, product, mod, ndigits);
830}
831EXPORT_SYMBOL(vli_mod_mult_slow);
832
535/* Computes result = (left * right) % curve_prime. */ 833/* Computes result = (left * right) % curve_prime. */
536static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, 834static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
537 const u64 *curve_prime, unsigned int ndigits) 835 const u64 *curve_prime, unsigned int ndigits)
@@ -557,7 +855,7 @@ static void vli_mod_square_fast(u64 *result, const u64 *left,
557 * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide" 855 * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
558 * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf 856 * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
559 */ 857 */
560static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod, 858void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
561 unsigned int ndigits) 859 unsigned int ndigits)
562{ 860{
563 u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS]; 861 u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS];
@@ -630,6 +928,7 @@ static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
630 928
631 vli_set(result, u, ndigits); 929 vli_set(result, u, ndigits);
632} 930}
931EXPORT_SYMBOL(vli_mod_inv);
633 932
634/* ------ Point operations ------ */ 933/* ------ Point operations ------ */
635 934
@@ -903,6 +1202,85 @@ static void ecc_point_mult(struct ecc_point *result,
903 vli_set(result->y, ry[0], ndigits); 1202 vli_set(result->y, ry[0], ndigits);
904} 1203}
905 1204
1205/* Computes R = P + Q mod p */
1206static void ecc_point_add(const struct ecc_point *result,
1207 const struct ecc_point *p, const struct ecc_point *q,
1208 const struct ecc_curve *curve)
1209{
1210 u64 z[ECC_MAX_DIGITS];
1211 u64 px[ECC_MAX_DIGITS];
1212 u64 py[ECC_MAX_DIGITS];
1213 unsigned int ndigits = curve->g.ndigits;
1214
1215 vli_set(result->x, q->x, ndigits);
1216 vli_set(result->y, q->y, ndigits);
1217 vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
1218 vli_set(px, p->x, ndigits);
1219 vli_set(py, p->y, ndigits);
1220 xycz_add(px, py, result->x, result->y, curve->p, ndigits);
1221 vli_mod_inv(z, z, curve->p, ndigits);
1222 apply_z(result->x, result->y, z, curve->p, ndigits);
1223}
1224
1225/* Computes R = u1P + u2Q mod p using Shamir's trick.
1226 * Based on: Kenneth MacKay's micro-ecc (2014).
1227 */
1228void ecc_point_mult_shamir(const struct ecc_point *result,
1229 const u64 *u1, const struct ecc_point *p,
1230 const u64 *u2, const struct ecc_point *q,
1231 const struct ecc_curve *curve)
1232{
1233 u64 z[ECC_MAX_DIGITS];
1234 u64 sump[2][ECC_MAX_DIGITS];
1235 u64 *rx = result->x;
1236 u64 *ry = result->y;
1237 unsigned int ndigits = curve->g.ndigits;
1238 unsigned int num_bits;
1239 struct ecc_point sum = ECC_POINT_INIT(sump[0], sump[1], ndigits);
1240 const struct ecc_point *points[4];
1241 const struct ecc_point *point;
1242 unsigned int idx;
1243 int i;
1244
1245 ecc_point_add(&sum, p, q, curve);
1246 points[0] = NULL;
1247 points[1] = p;
1248 points[2] = q;
1249 points[3] = &sum;
1250
1251 num_bits = max(vli_num_bits(u1, ndigits),
1252 vli_num_bits(u2, ndigits));
1253 i = num_bits - 1;
1254 idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
1255 point = points[idx];
1256
1257 vli_set(rx, point->x, ndigits);
1258 vli_set(ry, point->y, ndigits);
1259 vli_clear(z + 1, ndigits - 1);
1260 z[0] = 1;
1261
1262 for (--i; i >= 0; i--) {
1263 ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits);
1264 idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
1265 point = points[idx];
1266 if (point) {
1267 u64 tx[ECC_MAX_DIGITS];
1268 u64 ty[ECC_MAX_DIGITS];
1269 u64 tz[ECC_MAX_DIGITS];
1270
1271 vli_set(tx, point->x, ndigits);
1272 vli_set(ty, point->y, ndigits);
1273 apply_z(tx, ty, z, curve->p, ndigits);
1274 vli_mod_sub(tz, rx, tx, curve->p, ndigits);
1275 xycz_add(tx, ty, rx, ry, curve->p, ndigits);
1276 vli_mod_mult_fast(z, z, tz, curve->p, ndigits);
1277 }
1278 }
1279 vli_mod_inv(z, z, curve->p, ndigits);
1280 apply_z(rx, ry, z, curve->p, ndigits);
1281}
1282EXPORT_SYMBOL(ecc_point_mult_shamir);
1283
906static inline void ecc_swap_digits(const u64 *in, u64 *out, 1284static inline void ecc_swap_digits(const u64 *in, u64 *out,
907 unsigned int ndigits) 1285 unsigned int ndigits)
908{ 1286{
@@ -948,6 +1326,7 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
948 1326
949 return __ecc_is_key_valid(curve, private_key, ndigits); 1327 return __ecc_is_key_valid(curve, private_key, ndigits);
950} 1328}
1329EXPORT_SYMBOL(ecc_is_key_valid);
951 1330
952/* 1331/*
953 * ECC private keys are generated using the method of extra random bits, 1332 * ECC private keys are generated using the method of extra random bits,
@@ -1000,6 +1379,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
1000 1379
1001 return 0; 1380 return 0;
1002} 1381}
1382EXPORT_SYMBOL(ecc_gen_privkey);
1003 1383
1004int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, 1384int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
1005 const u64 *private_key, u64 *public_key) 1385 const u64 *private_key, u64 *public_key)
@@ -1036,13 +1416,17 @@ err_free_point:
1036out: 1416out:
1037 return ret; 1417 return ret;
1038} 1418}
1419EXPORT_SYMBOL(ecc_make_pub_key);
1039 1420
1040/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */ 1421/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
1041static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, 1422int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
1042 struct ecc_point *pk) 1423 struct ecc_point *pk)
1043{ 1424{
1044 u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS]; 1425 u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
1045 1426
1427 if (WARN_ON(pk->ndigits != curve->g.ndigits))
1428 return -EINVAL;
1429
1046 /* Check 1: Verify key is not the zero point. */ 1430 /* Check 1: Verify key is not the zero point. */
1047 if (ecc_point_is_zero(pk)) 1431 if (ecc_point_is_zero(pk))
1048 return -EINVAL; 1432 return -EINVAL;
@@ -1064,8 +1448,8 @@ static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
1064 return -EINVAL; 1448 return -EINVAL;
1065 1449
1066 return 0; 1450 return 0;
1067
1068} 1451}
1452EXPORT_SYMBOL(ecc_is_pubkey_valid_partial);
1069 1453
1070int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, 1454int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
1071 const u64 *private_key, const u64 *public_key, 1455 const u64 *private_key, const u64 *public_key,
@@ -1121,3 +1505,6 @@ err_alloc_product:
1121out: 1505out:
1122 return ret; 1506 return ret;
1123} 1507}
1508EXPORT_SYMBOL(crypto_ecdh_shared_secret);
1509
1510MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/ecc.h b/crypto/ecc.h
index f75a86baa3bd..ab0eb70b9c09 100644
--- a/crypto/ecc.h
+++ b/crypto/ecc.h
@@ -26,13 +26,51 @@
26#ifndef _CRYPTO_ECC_H 26#ifndef _CRYPTO_ECC_H
27#define _CRYPTO_ECC_H 27#define _CRYPTO_ECC_H
28 28
29/* One digit is u64 qword. */
29#define ECC_CURVE_NIST_P192_DIGITS 3 30#define ECC_CURVE_NIST_P192_DIGITS 3
30#define ECC_CURVE_NIST_P256_DIGITS 4 31#define ECC_CURVE_NIST_P256_DIGITS 4
31#define ECC_MAX_DIGITS ECC_CURVE_NIST_P256_DIGITS 32#define ECC_MAX_DIGITS (512 / 64)
32 33
33#define ECC_DIGITS_TO_BYTES_SHIFT 3 34#define ECC_DIGITS_TO_BYTES_SHIFT 3
34 35
35/** 36/**
37 * struct ecc_point - elliptic curve point in affine coordinates
38 *
39 * @x: X coordinate in vli form.
40 * @y: Y coordinate in vli form.
41 * @ndigits: Length of vlis in u64 qwords.
42 */
43struct ecc_point {
44 u64 *x;
45 u64 *y;
46 u8 ndigits;
47};
48
49#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
50
51/**
52 * struct ecc_curve - definition of elliptic curve
53 *
54 * @name: Short name of the curve.
55 * @g: Generator point of the curve.
56 * @p: Prime number, if Barrett's reduction is used for this curve
57 * pre-calculated value 'mu' is appended to the @p after ndigits.
58 * Use of Barrett's reduction is heuristically determined in
59 * vli_mmod_fast().
60 * @n: Order of the curve group.
61 * @a: Curve parameter a.
62 * @b: Curve parameter b.
63 */
64struct ecc_curve {
65 char *name;
66 struct ecc_point g;
67 u64 *p;
68 u64 *n;
69 u64 *a;
70 u64 *b;
71};
72
73/**
36 * ecc_is_key_valid() - Validate a given ECDH private key 74 * ecc_is_key_valid() - Validate a given ECDH private key
37 * 75 *
38 * @curve_id: id representing the curve to use 76 * @curve_id: id representing the curve to use
@@ -91,4 +129,117 @@ int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
91int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, 129int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
92 const u64 *private_key, const u64 *public_key, 130 const u64 *private_key, const u64 *public_key,
93 u64 *secret); 131 u64 *secret);
132
133/**
134 * ecc_is_pubkey_valid_partial() - Partial public key validation
135 *
136 * @curve: elliptic curve domain parameters
137 * @pk: public key as a point
138 *
139 * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
140 * Public-Key Validation Routine.
141 *
142 * Note: There is no check that the public key is in the correct elliptic curve
143 * subgroup.
144 *
145 * Return: 0 if validation is successful, -EINVAL if validation is failed.
146 */
147int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
148 struct ecc_point *pk);
149
150/**
151 * vli_is_zero() - Determine is vli is zero
152 *
153 * @vli: vli to check.
154 * @ndigits: length of the @vli
155 */
156bool vli_is_zero(const u64 *vli, unsigned int ndigits);
157
158/**
159 * vli_cmp() - compare left and right vlis
160 *
161 * @left: vli
162 * @right: vli
163 * @ndigits: length of both vlis
164 *
165 * Returns sign of @left - @right, i.e. -1 if @left < @right,
166 * 0 if @left == @right, 1 if @left > @right.
167 */
168int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
169
170/**
171 * vli_sub() - Subtracts right from left
172 *
173 * @result: where to write result
174 * @left: vli
175 * @right vli
176 * @ndigits: length of all vlis
177 *
178 * Note: can modify in-place.
179 *
180 * Return: carry bit.
181 */
182u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
183 unsigned int ndigits);
184
185/**
186 * vli_from_be64() - Load vli from big-endian u64 array
187 *
188 * @dest: destination vli
189 * @src: source array of u64 BE values
190 * @ndigits: length of both vli and array
191 */
192void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
193
194/**
195 * vli_from_le64() - Load vli from little-endian u64 array
196 *
197 * @dest: destination vli
198 * @src: source array of u64 LE values
199 * @ndigits: length of both vli and array
200 */
201void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
202
203/**
204 * vli_mod_inv() - Modular inversion
205 *
206 * @result: where to write vli number
207 * @input: vli value to operate on
208 * @mod: modulus
209 * @ndigits: length of all vlis
210 */
211void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
212 unsigned int ndigits);
213
214/**
215 * vli_mod_mult_slow() - Modular multiplication
216 *
217 * @result: where to write result value
218 * @left: vli number to multiply with @right
219 * @right: vli number to multiply with @left
220 * @mod: modulus
221 * @ndigits: length of all vlis
222 *
223 * Note: Assumes that mod is big enough curve order.
224 */
225void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
226 const u64 *mod, unsigned int ndigits);
227
228/**
229 * ecc_point_mult_shamir() - Add two points multiplied by scalars
230 *
231 * @result: resulting point
232 * @x: scalar to multiply with @p
233 * @p: point to multiply with @x
234 * @y: scalar to multiply with @q
235 * @q: point to multiply with @y
236 * @curve: curve
237 *
238 * Returns result = x * p + x * q over the curve.
239 * This works faster than two multiplications and addition.
240 */
241void ecc_point_mult_shamir(const struct ecc_point *result,
242 const u64 *x, const struct ecc_point *p,
243 const u64 *y, const struct ecc_point *q,
244 const struct ecc_curve *curve);
94#endif 245#endif
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index 336ab1805639..69be6c7d228f 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -2,21 +2,6 @@
2#ifndef _CRYTO_ECC_CURVE_DEFS_H 2#ifndef _CRYTO_ECC_CURVE_DEFS_H
3#define _CRYTO_ECC_CURVE_DEFS_H 3#define _CRYTO_ECC_CURVE_DEFS_H
4 4
5struct ecc_point {
6 u64 *x;
7 u64 *y;
8 u8 ndigits;
9};
10
11struct ecc_curve {
12 char *name;
13 struct ecc_point g;
14 u64 *p;
15 u64 *n;
16 u64 *a;
17 u64 *b;
18};
19
20/* NIST P-192: a = p - 3 */ 5/* NIST P-192: a = p - 3 */
21static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull, 6static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
22 0x188DA80EB03090F6ull }; 7 0x188DA80EB03090F6ull };
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index bf6300175b9c..890092bd8989 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -166,7 +166,7 @@ static void ecdh_exit(void)
166 crypto_unregister_kpp(&ecdh); 166 crypto_unregister_kpp(&ecdh);
167} 167}
168 168
169module_init(ecdh_init); 169subsys_initcall(ecdh_init);
170module_exit(ecdh_exit); 170module_exit(ecdh_exit);
171MODULE_ALIAS_CRYPTO("ecdh"); 171MODULE_ALIAS_CRYPTO("ecdh");
172MODULE_LICENSE("GPL"); 172MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 77e607fdbfb7..e71d1bc8d850 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -174,7 +174,7 @@ static void __exit echainiv_module_exit(void)
174 crypto_unregister_template(&echainiv_tmpl); 174 crypto_unregister_template(&echainiv_tmpl);
175} 175}
176 176
177module_init(echainiv_module_init); 177subsys_initcall(echainiv_module_init);
178module_exit(echainiv_module_exit); 178module_exit(echainiv_module_exit);
179 179
180MODULE_LICENSE("GPL"); 180MODULE_LICENSE("GPL");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
new file mode 100644
index 000000000000..887ec21aee49
--- /dev/null
+++ b/crypto/ecrdsa.c
@@ -0,0 +1,296 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Elliptic Curve (Russian) Digital Signature Algorithm for Cryptographic API
4 *
5 * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
6 *
7 * References:
8 * GOST 34.10-2018, GOST R 34.10-2012, RFC 7091, ISO/IEC 14888-3:2018.
9 *
10 * Historical references:
11 * GOST R 34.10-2001, RFC 4357, ISO/IEC 14888-3:2006/Amd 1:2010.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 */
18
19#include <linux/module.h>
20#include <linux/crypto.h>
21#include <crypto/streebog.h>
22#include <crypto/internal/akcipher.h>
23#include <crypto/akcipher.h>
24#include <linux/oid_registry.h>
25#include "ecrdsa_params.asn1.h"
26#include "ecrdsa_pub_key.asn1.h"
27#include "ecc.h"
28#include "ecrdsa_defs.h"
29
30#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
31#define ECRDSA_MAX_DIGITS (512 / 64)
32
33struct ecrdsa_ctx {
34 enum OID algo_oid; /* overall public key oid */
35 enum OID curve_oid; /* parameter */
36 enum OID digest_oid; /* parameter */
37 const struct ecc_curve *curve; /* curve from oid */
38 unsigned int digest_len; /* parameter (bytes) */
39 const char *digest; /* digest name from oid */
40 unsigned int key_len; /* @key length (bytes) */
41 const char *key; /* raw public key */
42 struct ecc_point pub_key;
43 u64 _pubp[2][ECRDSA_MAX_DIGITS]; /* point storage for @pub_key */
44};
45
46static const struct ecc_curve *get_curve_by_oid(enum OID oid)
47{
48 switch (oid) {
49 case OID_gostCPSignA:
50 case OID_gostTC26Sign256B:
51 return &gost_cp256a;
52 case OID_gostCPSignB:
53 case OID_gostTC26Sign256C:
54 return &gost_cp256b;
55 case OID_gostCPSignC:
56 case OID_gostTC26Sign256D:
57 return &gost_cp256c;
58 case OID_gostTC26Sign512A:
59 return &gost_tc512a;
60 case OID_gostTC26Sign512B:
61 return &gost_tc512b;
62 /* The following two aren't implemented: */
63 case OID_gostTC26Sign256A:
64 case OID_gostTC26Sign512C:
65 default:
66 return NULL;
67 }
68}
69
70static int ecrdsa_verify(struct akcipher_request *req)
71{
72 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
73 struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
74 unsigned char sig[ECRDSA_MAX_SIG_SIZE];
75 unsigned char digest[STREEBOG512_DIGEST_SIZE];
76 unsigned int ndigits = req->dst_len / sizeof(u64);
77 u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */
78 u64 _r[ECRDSA_MAX_DIGITS]; /* -r */
79 u64 s[ECRDSA_MAX_DIGITS]; /* second part of sig (s) */
80 u64 e[ECRDSA_MAX_DIGITS]; /* h \mod q */
81 u64 *v = e; /* e^{-1} \mod q */
82 u64 z1[ECRDSA_MAX_DIGITS];
83 u64 *z2 = _r;
84 struct ecc_point cc = ECC_POINT_INIT(s, e, ndigits); /* reuse s, e */
85
86 /*
87 * Digest value, digest algorithm, and curve (modulus) should have the
88 * same length (256 or 512 bits), public key and signature should be
89 * twice bigger.
90 */
91 if (!ctx->curve ||
92 !ctx->digest ||
93 !req->src ||
94 !ctx->pub_key.x ||
95 req->dst_len != ctx->digest_len ||
96 req->dst_len != ctx->curve->g.ndigits * sizeof(u64) ||
97 ctx->pub_key.ndigits != ctx->curve->g.ndigits ||
98 req->dst_len * 2 != req->src_len ||
99 WARN_ON(req->src_len > sizeof(sig)) ||
100 WARN_ON(req->dst_len > sizeof(digest)))
101 return -EBADMSG;
102
103 sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len),
104 sig, req->src_len);
105 sg_pcopy_to_buffer(req->src,
106 sg_nents_for_len(req->src,
107 req->src_len + req->dst_len),
108 digest, req->dst_len, req->src_len);
109
110 vli_from_be64(s, sig, ndigits);
111 vli_from_be64(r, sig + ndigits * sizeof(u64), ndigits);
112
113 /* Step 1: verify that 0 < r < q, 0 < s < q */
114 if (vli_is_zero(r, ndigits) ||
115 vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
116 vli_is_zero(s, ndigits) ||
117 vli_cmp(s, ctx->curve->n, ndigits) == 1)
118 return -EKEYREJECTED;
119
120 /* Step 2: calculate hash (h) of the message (passed as input) */
121 /* Step 3: calculate e = h \mod q */
122 vli_from_le64(e, digest, ndigits);
123 if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
124 vli_sub(e, e, ctx->curve->n, ndigits);
125 if (vli_is_zero(e, ndigits))
126 e[0] = 1;
127
128 /* Step 4: calculate v = e^{-1} \mod q */
129 vli_mod_inv(v, e, ctx->curve->n, ndigits);
130
131 /* Step 5: calculate z_1 = sv \mod q, z_2 = -rv \mod q */
132 vli_mod_mult_slow(z1, s, v, ctx->curve->n, ndigits);
133 vli_sub(_r, ctx->curve->n, r, ndigits);
134 vli_mod_mult_slow(z2, _r, v, ctx->curve->n, ndigits);
135
136 /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
137 ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
138 ctx->curve);
139 if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
140 vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
141
142 /* Step 7: if R == r signature is valid */
143 if (!vli_cmp(cc.x, r, ndigits))
144 return 0;
145 else
146 return -EKEYREJECTED;
147}
148
149int ecrdsa_param_curve(void *context, size_t hdrlen, unsigned char tag,
150 const void *value, size_t vlen)
151{
152 struct ecrdsa_ctx *ctx = context;
153
154 ctx->curve_oid = look_up_OID(value, vlen);
155 if (!ctx->curve_oid)
156 return -EINVAL;
157 ctx->curve = get_curve_by_oid(ctx->curve_oid);
158 return 0;
159}
160
161/* Optional. If present should match expected digest algo OID. */
162int ecrdsa_param_digest(void *context, size_t hdrlen, unsigned char tag,
163 const void *value, size_t vlen)
164{
165 struct ecrdsa_ctx *ctx = context;
166 int digest_oid = look_up_OID(value, vlen);
167
168 if (digest_oid != ctx->digest_oid)
169 return -EINVAL;
170 return 0;
171}
172
173int ecrdsa_parse_pub_key(void *context, size_t hdrlen, unsigned char tag,
174 const void *value, size_t vlen)
175{
176 struct ecrdsa_ctx *ctx = context;
177
178 ctx->key = value;
179 ctx->key_len = vlen;
180 return 0;
181}
182
183static u8 *ecrdsa_unpack_u32(u32 *dst, void *src)
184{
185 memcpy(dst, src, sizeof(u32));
186 return src + sizeof(u32);
187}
188
189/* Parse BER encoded subjectPublicKey. */
190static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
191 unsigned int keylen)
192{
193 struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
194 unsigned int ndigits;
195 u32 algo, paramlen;
196 u8 *params;
197 int err;
198
199 err = asn1_ber_decoder(&ecrdsa_pub_key_decoder, ctx, key, keylen);
200 if (err < 0)
201 return err;
202
203 /* Key parameters is in the key after keylen. */
204 params = ecrdsa_unpack_u32(&paramlen,
205 ecrdsa_unpack_u32(&algo, (u8 *)key + keylen));
206
207 if (algo == OID_gost2012PKey256) {
208 ctx->digest = "streebog256";
209 ctx->digest_oid = OID_gost2012Digest256;
210 ctx->digest_len = 256 / 8;
211 } else if (algo == OID_gost2012PKey512) {
212 ctx->digest = "streebog512";
213 ctx->digest_oid = OID_gost2012Digest512;
214 ctx->digest_len = 512 / 8;
215 } else
216 return -ENOPKG;
217 ctx->algo_oid = algo;
218
219 /* Parse SubjectPublicKeyInfo.AlgorithmIdentifier.parameters. */
220 err = asn1_ber_decoder(&ecrdsa_params_decoder, ctx, params, paramlen);
221 if (err < 0)
222 return err;
223 /*
224 * Sizes of algo (set in digest_len) and curve should match
225 * each other.
226 */
227 if (!ctx->curve ||
228 ctx->curve->g.ndigits * sizeof(u64) != ctx->digest_len)
229 return -ENOPKG;
230 /*
231 * Key is two 256- or 512-bit coordinates which should match
232 * curve size.
233 */
234 if ((ctx->key_len != (2 * 256 / 8) &&
235 ctx->key_len != (2 * 512 / 8)) ||
236 ctx->key_len != ctx->curve->g.ndigits * sizeof(u64) * 2)
237 return -ENOPKG;
238
239 ndigits = ctx->key_len / sizeof(u64) / 2;
240 ctx->pub_key = ECC_POINT_INIT(ctx->_pubp[0], ctx->_pubp[1], ndigits);
241 vli_from_le64(ctx->pub_key.x, ctx->key, ndigits);
242 vli_from_le64(ctx->pub_key.y, ctx->key + ndigits * sizeof(u64),
243 ndigits);
244
245 if (ecc_is_pubkey_valid_partial(ctx->curve, &ctx->pub_key))
246 return -EKEYREJECTED;
247
248 return 0;
249}
250
251static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm)
252{
253 struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
254
255 /*
256 * Verify doesn't need any output, so it's just informational
257 * for keyctl to determine the key bit size.
258 */
259 return ctx->pub_key.ndigits * sizeof(u64);
260}
261
262static void ecrdsa_exit_tfm(struct crypto_akcipher *tfm)
263{
264}
265
266static struct akcipher_alg ecrdsa_alg = {
267 .verify = ecrdsa_verify,
268 .set_pub_key = ecrdsa_set_pub_key,
269 .max_size = ecrdsa_max_size,
270 .exit = ecrdsa_exit_tfm,
271 .base = {
272 .cra_name = "ecrdsa",
273 .cra_driver_name = "ecrdsa-generic",
274 .cra_priority = 100,
275 .cra_module = THIS_MODULE,
276 .cra_ctxsize = sizeof(struct ecrdsa_ctx),
277 },
278};
279
280static int __init ecrdsa_mod_init(void)
281{
282 return crypto_register_akcipher(&ecrdsa_alg);
283}
284
285static void __exit ecrdsa_mod_fini(void)
286{
287 crypto_unregister_akcipher(&ecrdsa_alg);
288}
289
290module_init(ecrdsa_mod_init);
291module_exit(ecrdsa_mod_fini);
292
293MODULE_LICENSE("GPL");
294MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
295MODULE_DESCRIPTION("EC-RDSA generic algorithm");
296MODULE_ALIAS_CRYPTO("ecrdsa-generic");
diff --git a/crypto/ecrdsa_defs.h b/crypto/ecrdsa_defs.h
new file mode 100644
index 000000000000..170baf039007
--- /dev/null
+++ b/crypto/ecrdsa_defs.h
@@ -0,0 +1,225 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Definitions of EC-RDSA Curve Parameters
4 *
5 * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 */
12
13#ifndef _CRYTO_ECRDSA_DEFS_H
14#define _CRYTO_ECRDSA_DEFS_H
15
16#include "ecc.h"
17
18#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
19#define ECRDSA_MAX_DIGITS (512 / 64)
20
21/*
22 * EC-RDSA uses its own set of curves.
23 *
24 * cp256{a,b,c} curves first defined for GOST R 34.10-2001 in RFC 4357 (as
25 * 256-bit {A,B,C}-ParamSet), but inherited for GOST R 34.10-2012 and
26 * proposed for use in R 50.1.114-2016 and RFC 7836 as the 256-bit curves.
27 */
28/* OID_gostCPSignA 1.2.643.2.2.35.1 */
29static u64 cp256a_g_x[] = {
30 0x0000000000000001ull, 0x0000000000000000ull,
31 0x0000000000000000ull, 0x0000000000000000ull, };
32static u64 cp256a_g_y[] = {
33 0x22ACC99C9E9F1E14ull, 0x35294F2DDF23E3B1ull,
34 0x27DF505A453F2B76ull, 0x8D91E471E0989CDAull, };
35static u64 cp256a_p[] = { /* p = 2^256 - 617 */
36 0xFFFFFFFFFFFFFD97ull, 0xFFFFFFFFFFFFFFFFull,
37 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
38static u64 cp256a_n[] = {
39 0x45841B09B761B893ull, 0x6C611070995AD100ull,
40 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
41static u64 cp256a_a[] = { /* a = p - 3 */
42 0xFFFFFFFFFFFFFD94ull, 0xFFFFFFFFFFFFFFFFull,
43 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
44static u64 cp256a_b[] = {
45 0x00000000000000a6ull, 0x0000000000000000ull,
46 0x0000000000000000ull, 0x0000000000000000ull };
47
48static struct ecc_curve gost_cp256a = {
49 .name = "cp256a",
50 .g = {
51 .x = cp256a_g_x,
52 .y = cp256a_g_y,
53 .ndigits = 256 / 64,
54 },
55 .p = cp256a_p,
56 .n = cp256a_n,
57 .a = cp256a_a,
58 .b = cp256a_b
59};
60
61/* OID_gostCPSignB 1.2.643.2.2.35.2 */
62static u64 cp256b_g_x[] = {
63 0x0000000000000001ull, 0x0000000000000000ull,
64 0x0000000000000000ull, 0x0000000000000000ull, };
65static u64 cp256b_g_y[] = {
66 0x744BF8D717717EFCull, 0xC545C9858D03ECFBull,
67 0xB83D1C3EB2C070E5ull, 0x3FA8124359F96680ull, };
68static u64 cp256b_p[] = { /* p = 2^255 + 3225 */
69 0x0000000000000C99ull, 0x0000000000000000ull,
70 0x0000000000000000ull, 0x8000000000000000ull, };
71static u64 cp256b_n[] = {
72 0xE497161BCC8A198Full, 0x5F700CFFF1A624E5ull,
73 0x0000000000000001ull, 0x8000000000000000ull, };
74static u64 cp256b_a[] = { /* a = p - 3 */
75 0x0000000000000C96ull, 0x0000000000000000ull,
76 0x0000000000000000ull, 0x8000000000000000ull, };
77static u64 cp256b_b[] = {
78 0x2F49D4CE7E1BBC8Bull, 0xE979259373FF2B18ull,
79 0x66A7D3C25C3DF80Aull, 0x3E1AF419A269A5F8ull, };
80
81static struct ecc_curve gost_cp256b = {
82 .name = "cp256b",
83 .g = {
84 .x = cp256b_g_x,
85 .y = cp256b_g_y,
86 .ndigits = 256 / 64,
87 },
88 .p = cp256b_p,
89 .n = cp256b_n,
90 .a = cp256b_a,
91 .b = cp256b_b
92};
93
94/* OID_gostCPSignC 1.2.643.2.2.35.3 */
95static u64 cp256c_g_x[] = {
96 0x0000000000000000ull, 0x0000000000000000ull,
97 0x0000000000000000ull, 0x0000000000000000ull, };
98static u64 cp256c_g_y[] = {
99 0x366E550DFDB3BB67ull, 0x4D4DC440D4641A8Full,
100 0x3CBF3783CD08C0EEull, 0x41ECE55743711A8Cull, };
101static u64 cp256c_p[] = {
102 0x7998F7B9022D759Bull, 0xCF846E86789051D3ull,
103 0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull,
104 /* pre-computed value for Barrett's reduction */
105 0xedc283cdd217b5a2ull, 0xbac48fc06398ae59ull,
106 0x405384d55f9f3b73ull, 0xa51f176161f1d734ull,
107 0x0000000000000001ull, };
108static u64 cp256c_n[] = {
109 0xF02F3A6598980BB9ull, 0x582CA3511EDDFB74ull,
110 0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull, };
111static u64 cp256c_a[] = { /* a = p - 3 */
112 0x7998F7B9022D7598ull, 0xCF846E86789051D3ull,
113 0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull, };
114static u64 cp256c_b[] = {
115 0x000000000000805aull, 0x0000000000000000ull,
116 0x0000000000000000ull, 0x0000000000000000ull, };
117
118static struct ecc_curve gost_cp256c = {
119 .name = "cp256c",
120 .g = {
121 .x = cp256c_g_x,
122 .y = cp256c_g_y,
123 .ndigits = 256 / 64,
124 },
125 .p = cp256c_p,
126 .n = cp256c_n,
127 .a = cp256c_a,
128 .b = cp256c_b
129};
130
131/* tc512{a,b} curves first recommended in 2013 and then standardized in
132 * R 50.1.114-2016 and RFC 7836 for use with GOST R 34.10-2012 (as TC26
133 * 512-bit ParamSet{A,B}).
134 */
135/* OID_gostTC26Sign512A 1.2.643.7.1.2.1.2.1 */
136static u64 tc512a_g_x[] = {
137 0x0000000000000003ull, 0x0000000000000000ull,
138 0x0000000000000000ull, 0x0000000000000000ull,
139 0x0000000000000000ull, 0x0000000000000000ull,
140 0x0000000000000000ull, 0x0000000000000000ull, };
141static u64 tc512a_g_y[] = {
142 0x89A589CB5215F2A4ull, 0x8028FE5FC235F5B8ull,
143 0x3D75E6A50E3A41E9ull, 0xDF1626BE4FD036E9ull,
144 0x778064FDCBEFA921ull, 0xCE5E1C93ACF1ABC1ull,
145 0xA61B8816E25450E6ull, 0x7503CFE87A836AE3ull, };
146static u64 tc512a_p[] = { /* p = 2^512 - 569 */
147 0xFFFFFFFFFFFFFDC7ull, 0xFFFFFFFFFFFFFFFFull,
148 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
149 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
150 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
151static u64 tc512a_n[] = {
152 0xCACDB1411F10B275ull, 0x9B4B38ABFAD2B85Dull,
153 0x6FF22B8D4E056060ull, 0x27E69532F48D8911ull,
154 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
155 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
156static u64 tc512a_a[] = { /* a = p - 3 */
157 0xFFFFFFFFFFFFFDC4ull, 0xFFFFFFFFFFFFFFFFull,
158 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
159 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
160 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
161static u64 tc512a_b[] = {
162 0x503190785A71C760ull, 0x862EF9D4EBEE4761ull,
163 0x4CB4574010DA90DDull, 0xEE3CB090F30D2761ull,
164 0x79BD081CFD0B6265ull, 0x34B82574761CB0E8ull,
165 0xC1BD0B2B6667F1DAull, 0xE8C2505DEDFC86DDull, };
166
167static struct ecc_curve gost_tc512a = {
168 .name = "tc512a",
169 .g = {
170 .x = tc512a_g_x,
171 .y = tc512a_g_y,
172 .ndigits = 512 / 64,
173 },
174 .p = tc512a_p,
175 .n = tc512a_n,
176 .a = tc512a_a,
177 .b = tc512a_b
178};
179
180/* OID_gostTC26Sign512B 1.2.643.7.1.2.1.2.2 */
181static u64 tc512b_g_x[] = {
182 0x0000000000000002ull, 0x0000000000000000ull,
183 0x0000000000000000ull, 0x0000000000000000ull,
184 0x0000000000000000ull, 0x0000000000000000ull,
185 0x0000000000000000ull, 0x0000000000000000ull, };
186static u64 tc512b_g_y[] = {
187 0x7E21340780FE41BDull, 0x28041055F94CEEECull,
188 0x152CBCAAF8C03988ull, 0xDCB228FD1EDF4A39ull,
189 0xBE6DD9E6C8EC7335ull, 0x3C123B697578C213ull,
190 0x2C071E3647A8940Full, 0x1A8F7EDA389B094Cull, };
191static u64 tc512b_p[] = { /* p = 2^511 + 111 */
192 0x000000000000006Full, 0x0000000000000000ull,
193 0x0000000000000000ull, 0x0000000000000000ull,
194 0x0000000000000000ull, 0x0000000000000000ull,
195 0x0000000000000000ull, 0x8000000000000000ull, };
196static u64 tc512b_n[] = {
197 0xC6346C54374F25BDull, 0x8B996712101BEA0Eull,
198 0xACFDB77BD9D40CFAull, 0x49A1EC142565A545ull,
199 0x0000000000000001ull, 0x0000000000000000ull,
200 0x0000000000000000ull, 0x8000000000000000ull, };
201static u64 tc512b_a[] = { /* a = p - 3 */
202 0x000000000000006Cull, 0x0000000000000000ull,
203 0x0000000000000000ull, 0x0000000000000000ull,
204 0x0000000000000000ull, 0x0000000000000000ull,
205 0x0000000000000000ull, 0x8000000000000000ull, };
206static u64 tc512b_b[] = {
207 0xFB8CCBC7C5140116ull, 0x50F78BEE1FA3106Eull,
208 0x7F8B276FAD1AB69Cull, 0x3E965D2DB1416D21ull,
209 0xBF85DC806C4B289Full, 0xB97C7D614AF138BCull,
210 0x7E3E06CF6F5E2517ull, 0x687D1B459DC84145ull, };
211
212static struct ecc_curve gost_tc512b = {
213 .name = "tc512b",
214 .g = {
215 .x = tc512b_g_x,
216 .y = tc512b_g_y,
217 .ndigits = 512 / 64,
218 },
219 .p = tc512b_p,
220 .n = tc512b_n,
221 .a = tc512b_a,
222 .b = tc512b_b
223};
224
225#endif
diff --git a/crypto/ecrdsa_params.asn1 b/crypto/ecrdsa_params.asn1
new file mode 100644
index 000000000000..aba99c3763cf
--- /dev/null
+++ b/crypto/ecrdsa_params.asn1
@@ -0,0 +1,4 @@
1EcrdsaParams ::= SEQUENCE {
2 curve OBJECT IDENTIFIER ({ ecrdsa_param_curve }),
3 digest OBJECT IDENTIFIER OPTIONAL ({ ecrdsa_param_digest })
4}
diff --git a/crypto/ecrdsa_pub_key.asn1 b/crypto/ecrdsa_pub_key.asn1
new file mode 100644
index 000000000000..048cb646bce4
--- /dev/null
+++ b/crypto/ecrdsa_pub_key.asn1
@@ -0,0 +1 @@
EcrdsaPubKey ::= OCTET STRING ({ ecrdsa_parse_pub_key })
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 77286ea28865..4e8704405a3b 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -414,7 +414,7 @@ static void __exit fcrypt_mod_fini(void)
414 crypto_unregister_alg(&fcrypt_alg); 414 crypto_unregister_alg(&fcrypt_alg);
415} 415}
416 416
417module_init(fcrypt_mod_init); 417subsys_initcall(fcrypt_mod_init);
418module_exit(fcrypt_mod_fini); 418module_exit(fcrypt_mod_fini);
419 419
420MODULE_LICENSE("Dual BSD/GPL"); 420MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/fips.c b/crypto/fips.c
index 9d627c1cf8bc..9dfed122d6da 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -74,5 +74,5 @@ static void __exit fips_exit(void)
74 crypto_proc_fips_exit(); 74 crypto_proc_fips_exit();
75} 75}
76 76
77module_init(fips_init); 77subsys_initcall(fips_init);
78module_exit(fips_exit); 78module_exit(fips_exit);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e1a11f529d25..33f45a980967 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
597 597
598static int crypto_gcm_create_common(struct crypto_template *tmpl, 598static int crypto_gcm_create_common(struct crypto_template *tmpl,
599 struct rtattr **tb, 599 struct rtattr **tb,
600 const char *full_name,
601 const char *ctr_name, 600 const char *ctr_name,
602 const char *ghash_name) 601 const char *ghash_name)
603{ 602{
@@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
638 goto err_free_inst; 637 goto err_free_inst;
639 638
640 err = -EINVAL; 639 err = -EINVAL;
641 if (ghash->digestsize != 16) 640 if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
641 ghash->digestsize != 16)
642 goto err_drop_ghash; 642 goto err_drop_ghash;
643 643
644 crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); 644 crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
@@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
650 650
651 ctr = crypto_spawn_skcipher_alg(&ctx->ctr); 651 ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
652 652
653 /* We only support 16-byte blocks. */ 653 /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
654 err = -EINVAL; 654 err = -EINVAL;
655 if (crypto_skcipher_alg_ivsize(ctr) != 16) 655 if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
656 crypto_skcipher_alg_ivsize(ctr) != 16 ||
657 ctr->base.cra_blocksize != 1)
656 goto out_put_ctr; 658 goto out_put_ctr;
657 659
658 /* Not a stream cipher? */ 660 err = -ENAMETOOLONG;
659 if (ctr->base.cra_blocksize != 1) 661 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
662 "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
660 goto out_put_ctr; 663 goto out_put_ctr;
661 664
662 err = -ENAMETOOLONG;
663 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 665 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
664 "gcm_base(%s,%s)", ctr->base.cra_driver_name, 666 "gcm_base(%s,%s)", ctr->base.cra_driver_name,
665 ghash_alg->cra_driver_name) >= 667 ghash_alg->cra_driver_name) >=
666 CRYPTO_MAX_ALG_NAME) 668 CRYPTO_MAX_ALG_NAME)
667 goto out_put_ctr; 669 goto out_put_ctr;
668 670
669 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
670
671 inst->alg.base.cra_flags = (ghash->base.cra_flags | 671 inst->alg.base.cra_flags = (ghash->base.cra_flags |
672 ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; 672 ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
673 inst->alg.base.cra_priority = (ghash->base.cra_priority + 673 inst->alg.base.cra_priority = (ghash->base.cra_priority +
@@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
709{ 709{
710 const char *cipher_name; 710 const char *cipher_name;
711 char ctr_name[CRYPTO_MAX_ALG_NAME]; 711 char ctr_name[CRYPTO_MAX_ALG_NAME];
712 char full_name[CRYPTO_MAX_ALG_NAME];
713 712
714 cipher_name = crypto_attr_alg_name(tb[1]); 713 cipher_name = crypto_attr_alg_name(tb[1]);
715 if (IS_ERR(cipher_name)) 714 if (IS_ERR(cipher_name))
@@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
719 CRYPTO_MAX_ALG_NAME) 718 CRYPTO_MAX_ALG_NAME)
720 return -ENAMETOOLONG; 719 return -ENAMETOOLONG;
721 720
722 if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >= 721 return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
723 CRYPTO_MAX_ALG_NAME)
724 return -ENAMETOOLONG;
725
726 return crypto_gcm_create_common(tmpl, tb, full_name,
727 ctr_name, "ghash");
728} 722}
729 723
730static int crypto_gcm_base_create(struct crypto_template *tmpl, 724static int crypto_gcm_base_create(struct crypto_template *tmpl,
@@ -732,7 +726,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
732{ 726{
733 const char *ctr_name; 727 const char *ctr_name;
734 const char *ghash_name; 728 const char *ghash_name;
735 char full_name[CRYPTO_MAX_ALG_NAME];
736 729
737 ctr_name = crypto_attr_alg_name(tb[1]); 730 ctr_name = crypto_attr_alg_name(tb[1]);
738 if (IS_ERR(ctr_name)) 731 if (IS_ERR(ctr_name))
@@ -742,12 +735,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
742 if (IS_ERR(ghash_name)) 735 if (IS_ERR(ghash_name))
743 return PTR_ERR(ghash_name); 736 return PTR_ERR(ghash_name);
744 737
745 if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", 738 return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
746 ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
747 return -ENAMETOOLONG;
748
749 return crypto_gcm_create_common(tmpl, tb, full_name,
750 ctr_name, ghash_name);
751} 739}
752 740
753static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, 741static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1258,7 +1246,7 @@ static void __exit crypto_gcm_module_exit(void)
1258 ARRAY_SIZE(crypto_gcm_tmpls)); 1246 ARRAY_SIZE(crypto_gcm_tmpls));
1259} 1247}
1260 1248
1261module_init(crypto_gcm_module_init); 1249subsys_initcall(crypto_gcm_module_init);
1262module_exit(crypto_gcm_module_exit); 1250module_exit(crypto_gcm_module_exit);
1263 1251
1264MODULE_LICENSE("GPL"); 1252MODULE_LICENSE("GPL");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index d9f192b953b2..e6307935413c 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -149,7 +149,7 @@ static void __exit ghash_mod_exit(void)
149 crypto_unregister_shash(&ghash_alg); 149 crypto_unregister_shash(&ghash_alg);
150} 150}
151 151
152module_init(ghash_mod_init); 152subsys_initcall(ghash_mod_init);
153module_exit(ghash_mod_exit); 153module_exit(ghash_mod_exit);
154 154
155MODULE_LICENSE("GPL"); 155MODULE_LICENSE("GPL");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index e74730224f0a..a68c1266121f 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -57,8 +57,6 @@ static int hmac_setkey(struct crypto_shash *parent,
57 unsigned int i; 57 unsigned int i;
58 58
59 shash->tfm = hash; 59 shash->tfm = hash;
60 shash->flags = crypto_shash_get_flags(parent)
61 & CRYPTO_TFM_REQ_MAY_SLEEP;
62 60
63 if (keylen > bs) { 61 if (keylen > bs) {
64 int err; 62 int err;
@@ -91,8 +89,6 @@ static int hmac_export(struct shash_desc *pdesc, void *out)
91{ 89{
92 struct shash_desc *desc = shash_desc_ctx(pdesc); 90 struct shash_desc *desc = shash_desc_ctx(pdesc);
93 91
94 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
95
96 return crypto_shash_export(desc, out); 92 return crypto_shash_export(desc, out);
97} 93}
98 94
@@ -102,7 +98,6 @@ static int hmac_import(struct shash_desc *pdesc, const void *in)
102 struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); 98 struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
103 99
104 desc->tfm = ctx->hash; 100 desc->tfm = ctx->hash;
105 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
106 101
107 return crypto_shash_import(desc, in); 102 return crypto_shash_import(desc, in);
108} 103}
@@ -117,8 +112,6 @@ static int hmac_update(struct shash_desc *pdesc,
117{ 112{
118 struct shash_desc *desc = shash_desc_ctx(pdesc); 113 struct shash_desc *desc = shash_desc_ctx(pdesc);
119 114
120 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
121
122 return crypto_shash_update(desc, data, nbytes); 115 return crypto_shash_update(desc, data, nbytes);
123} 116}
124 117
@@ -130,8 +123,6 @@ static int hmac_final(struct shash_desc *pdesc, u8 *out)
130 char *opad = crypto_shash_ctx_aligned(parent) + ss; 123 char *opad = crypto_shash_ctx_aligned(parent) + ss;
131 struct shash_desc *desc = shash_desc_ctx(pdesc); 124 struct shash_desc *desc = shash_desc_ctx(pdesc);
132 125
133 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
134
135 return crypto_shash_final(desc, out) ?: 126 return crypto_shash_final(desc, out) ?:
136 crypto_shash_import(desc, opad) ?: 127 crypto_shash_import(desc, opad) ?:
137 crypto_shash_finup(desc, out, ds, out); 128 crypto_shash_finup(desc, out, ds, out);
@@ -147,8 +138,6 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
147 char *opad = crypto_shash_ctx_aligned(parent) + ss; 138 char *opad = crypto_shash_ctx_aligned(parent) + ss;
148 struct shash_desc *desc = shash_desc_ctx(pdesc); 139 struct shash_desc *desc = shash_desc_ctx(pdesc);
149 140
150 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
151
152 return crypto_shash_finup(desc, data, nbytes, out) ?: 141 return crypto_shash_finup(desc, data, nbytes, out) ?:
153 crypto_shash_import(desc, opad) ?: 142 crypto_shash_import(desc, opad) ?:
154 crypto_shash_finup(desc, out, ds, out); 143 crypto_shash_finup(desc, out, ds, out);
@@ -268,7 +257,7 @@ static void __exit hmac_module_exit(void)
268 crypto_unregister_template(&hmac_tmpl); 257 crypto_unregister_template(&hmac_tmpl);
269} 258}
270 259
271module_init(hmac_module_init); 260subsys_initcall(hmac_module_init);
272module_exit(hmac_module_exit); 261module_exit(hmac_module_exit);
273 262
274MODULE_LICENSE("GPL"); 263MODULE_LICENSE("GPL");
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 787dccca3715..6ea1a270b8dc 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -198,7 +198,7 @@ static void __exit jent_mod_exit(void)
198 crypto_unregister_rng(&jent_alg); 198 crypto_unregister_rng(&jent_alg);
199} 199}
200 200
201module_init(jent_mod_init); 201subsys_initcall(jent_mod_init);
202module_exit(jent_mod_exit); 202module_exit(jent_mod_exit);
203 203
204MODULE_LICENSE("Dual BSD/GPL"); 204MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index a5cfe610d8f4..a155c88105ea 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -310,7 +310,7 @@ static void __exit crypto_kw_exit(void)
310 crypto_unregister_template(&crypto_kw_tmpl); 310 crypto_unregister_template(&crypto_kw_tmpl);
311} 311}
312 312
313module_init(crypto_kw_init); 313subsys_initcall(crypto_kw_init);
314module_exit(crypto_kw_exit); 314module_exit(crypto_kw_exit);
315 315
316MODULE_LICENSE("Dual BSD/GPL"); 316MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 873eb5ded6d7..b50aa8a3ab4c 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -875,7 +875,7 @@ static void __exit khazad_mod_fini(void)
875} 875}
876 876
877 877
878module_init(khazad_mod_init); 878subsys_initcall(khazad_mod_init);
879module_exit(khazad_mod_fini); 879module_exit(khazad_mod_fini);
880 880
881MODULE_LICENSE("GPL"); 881MODULE_LICENSE("GPL");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 08a0e458bc3e..fa302f3f161e 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -162,8 +162,10 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
162 } 162 }
163 163
164 err = skcipher_walk_virt(&w, req, false); 164 err = skcipher_walk_virt(&w, req, false);
165 iv = (__be32 *)w.iv; 165 if (err)
166 return err;
166 167
168 iv = (__be32 *)w.iv;
167 counter[0] = be32_to_cpu(iv[3]); 169 counter[0] = be32_to_cpu(iv[3]);
168 counter[1] = be32_to_cpu(iv[2]); 170 counter[1] = be32_to_cpu(iv[2]);
169 counter[2] = be32_to_cpu(iv[1]); 171 counter[2] = be32_to_cpu(iv[1]);
@@ -435,7 +437,7 @@ static void __exit crypto_module_exit(void)
435 crypto_unregister_template(&crypto_tmpl); 437 crypto_unregister_template(&crypto_tmpl);
436} 438}
437 439
438module_init(crypto_module_init); 440subsys_initcall(crypto_module_init);
439module_exit(crypto_module_exit); 441module_exit(crypto_module_exit);
440 442
441MODULE_LICENSE("GPL"); 443MODULE_LICENSE("GPL");
diff --git a/crypto/lz4.c b/crypto/lz4.c
index c160dfdbf2e0..1e35134d0a98 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -164,7 +164,7 @@ static void __exit lz4_mod_fini(void)
164 crypto_unregister_scomp(&scomp); 164 crypto_unregister_scomp(&scomp);
165} 165}
166 166
167module_init(lz4_mod_init); 167subsys_initcall(lz4_mod_init);
168module_exit(lz4_mod_fini); 168module_exit(lz4_mod_fini);
169 169
170MODULE_LICENSE("GPL"); 170MODULE_LICENSE("GPL");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 583b5e013d7a..4a220b628fe7 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -165,7 +165,7 @@ static void __exit lz4hc_mod_fini(void)
165 crypto_unregister_scomp(&scomp); 165 crypto_unregister_scomp(&scomp);
166} 166}
167 167
168module_init(lz4hc_mod_init); 168subsys_initcall(lz4hc_mod_init);
169module_exit(lz4hc_mod_fini); 169module_exit(lz4hc_mod_fini);
170 170
171MODULE_LICENSE("GPL"); 171MODULE_LICENSE("GPL");
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index ea9c75b1db49..4c82bf18440f 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -167,7 +167,7 @@ static void __exit lzorle_mod_fini(void)
167 crypto_unregister_scomp(&scomp); 167 crypto_unregister_scomp(&scomp);
168} 168}
169 169
170module_init(lzorle_mod_init); 170subsys_initcall(lzorle_mod_init);
171module_exit(lzorle_mod_fini); 171module_exit(lzorle_mod_fini);
172 172
173MODULE_LICENSE("GPL"); 173MODULE_LICENSE("GPL");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 218567d717d6..4a6ac8f247d0 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -167,7 +167,7 @@ static void __exit lzo_mod_fini(void)
167 crypto_unregister_scomp(&scomp); 167 crypto_unregister_scomp(&scomp);
168} 168}
169 169
170module_init(lzo_mod_init); 170subsys_initcall(lzo_mod_init);
171module_exit(lzo_mod_fini); 171module_exit(lzo_mod_fini);
172 172
173MODULE_LICENSE("GPL"); 173MODULE_LICENSE("GPL");
diff --git a/crypto/md4.c b/crypto/md4.c
index 9965ec40d9f9..9a1a228a0c69 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -232,7 +232,7 @@ static void __exit md4_mod_fini(void)
232 crypto_unregister_shash(&alg); 232 crypto_unregister_shash(&alg);
233} 233}
234 234
235module_init(md4_mod_init); 235subsys_initcall(md4_mod_init);
236module_exit(md4_mod_fini); 236module_exit(md4_mod_fini);
237 237
238MODULE_LICENSE("GPL"); 238MODULE_LICENSE("GPL");
diff --git a/crypto/md5.c b/crypto/md5.c
index 94dd78144ba3..221c2c0932f8 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -244,7 +244,7 @@ static void __exit md5_mod_fini(void)
244 crypto_unregister_shash(&alg); 244 crypto_unregister_shash(&alg);
245} 245}
246 246
247module_init(md5_mod_init); 247subsys_initcall(md5_mod_init);
248module_exit(md5_mod_fini); 248module_exit(md5_mod_fini);
249 249
250MODULE_LICENSE("GPL"); 250MODULE_LICENSE("GPL");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 46195e0d0f4d..538ae7933795 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -178,7 +178,7 @@ static void __exit michael_mic_exit(void)
178} 178}
179 179
180 180
181module_init(michael_mic_init); 181subsys_initcall(michael_mic_init);
182module_exit(michael_mic_exit); 182module_exit(michael_mic_exit);
183 183
184MODULE_LICENSE("GPL v2"); 184MODULE_LICENSE("GPL v2");
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 0747732d5b78..f8734c6576af 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -532,7 +532,7 @@ static void __exit crypto_morus1280_module_exit(void)
532 crypto_unregister_aead(&crypto_morus1280_alg); 532 crypto_unregister_aead(&crypto_morus1280_alg);
533} 533}
534 534
535module_init(crypto_morus1280_module_init); 535subsys_initcall(crypto_morus1280_module_init);
536module_exit(crypto_morus1280_module_exit); 536module_exit(crypto_morus1280_module_exit);
537 537
538MODULE_LICENSE("GPL"); 538MODULE_LICENSE("GPL");
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 1617a1eb8be1..ae5aa9482cb4 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -523,7 +523,7 @@ static void __exit crypto_morus640_module_exit(void)
523 crypto_unregister_aead(&crypto_morus640_alg); 523 crypto_unregister_aead(&crypto_morus640_alg);
524} 524}
525 525
526module_init(crypto_morus640_module_init); 526subsys_initcall(crypto_morus640_module_init);
527module_exit(crypto_morus640_module_exit); 527module_exit(crypto_morus640_module_exit);
528 528
529MODULE_LICENSE("GPL"); 529MODULE_LICENSE("GPL");
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index ec831a5594d8..9ab4e07cde4d 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -244,7 +244,7 @@ static void __exit nhpoly1305_mod_exit(void)
244 crypto_unregister_shash(&nhpoly1305_alg); 244 crypto_unregister_shash(&nhpoly1305_alg);
245} 245}
246 246
247module_init(nhpoly1305_mod_init); 247subsys_initcall(nhpoly1305_mod_init);
248module_exit(nhpoly1305_mod_exit); 248module_exit(nhpoly1305_mod_exit);
249 249
250MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function"); 250MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
diff --git a/crypto/ofb.c b/crypto/ofb.c
index 34b6e1f426f7..133ff4c7f2c6 100644
--- a/crypto/ofb.c
+++ b/crypto/ofb.c
@@ -95,7 +95,7 @@ static void __exit crypto_ofb_module_exit(void)
95 crypto_unregister_template(&crypto_ofb_tmpl); 95 crypto_unregister_template(&crypto_ofb_tmpl);
96} 96}
97 97
98module_init(crypto_ofb_module_init); 98subsys_initcall(crypto_ofb_module_init);
99module_exit(crypto_ofb_module_exit); 99module_exit(crypto_ofb_module_exit);
100 100
101MODULE_LICENSE("GPL"); 101MODULE_LICENSE("GPL");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 2fa03fc576fe..31b3ce948474 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -191,7 +191,7 @@ static void __exit crypto_pcbc_module_exit(void)
191 crypto_unregister_template(&crypto_pcbc_tmpl); 191 crypto_unregister_template(&crypto_pcbc_tmpl);
192} 192}
193 193
194module_init(crypto_pcbc_module_init); 194subsys_initcall(crypto_pcbc_module_init);
195module_exit(crypto_pcbc_module_exit); 195module_exit(crypto_pcbc_module_exit);
196 196
197MODULE_LICENSE("GPL"); 197MODULE_LICENSE("GPL");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index d47cfc47b1b1..0e9ce329fd47 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -512,7 +512,7 @@ static void __exit pcrypt_exit(void)
512 crypto_unregister_template(&pcrypt_tmpl); 512 crypto_unregister_template(&pcrypt_tmpl);
513} 513}
514 514
515module_init(pcrypt_init); 515subsys_initcall(pcrypt_init);
516module_exit(pcrypt_exit); 516module_exit(pcrypt_exit);
517 517
518MODULE_LICENSE("GPL"); 518MODULE_LICENSE("GPL");
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 2a06874204e8..adc40298c749 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -318,7 +318,7 @@ static void __exit poly1305_mod_exit(void)
318 crypto_unregister_shash(&poly1305_alg); 318 crypto_unregister_shash(&poly1305_alg);
319} 319}
320 320
321module_init(poly1305_mod_init); 321subsys_initcall(poly1305_mod_init);
322module_exit(poly1305_mod_exit); 322module_exit(poly1305_mod_exit);
323 323
324MODULE_LICENSE("GPL"); 324MODULE_LICENSE("GPL");
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 5f4472256e27..faf4252c4b85 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -318,7 +318,7 @@ static void __exit rmd128_mod_fini(void)
318 crypto_unregister_shash(&alg); 318 crypto_unregister_shash(&alg);
319} 319}
320 320
321module_init(rmd128_mod_init); 321subsys_initcall(rmd128_mod_init);
322module_exit(rmd128_mod_fini); 322module_exit(rmd128_mod_fini);
323 323
324MODULE_LICENSE("GPL"); 324MODULE_LICENSE("GPL");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 737645344d1c..b33309916d4f 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -362,7 +362,7 @@ static void __exit rmd160_mod_fini(void)
362 crypto_unregister_shash(&alg); 362 crypto_unregister_shash(&alg);
363} 363}
364 364
365module_init(rmd160_mod_init); 365subsys_initcall(rmd160_mod_init);
366module_exit(rmd160_mod_fini); 366module_exit(rmd160_mod_fini);
367 367
368MODULE_LICENSE("GPL"); 368MODULE_LICENSE("GPL");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 0e9d30676a01..2a643250c9a5 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -337,7 +337,7 @@ static void __exit rmd256_mod_fini(void)
337 crypto_unregister_shash(&alg); 337 crypto_unregister_shash(&alg);
338} 338}
339 339
340module_init(rmd256_mod_init); 340subsys_initcall(rmd256_mod_init);
341module_exit(rmd256_mod_fini); 341module_exit(rmd256_mod_fini);
342 342
343MODULE_LICENSE("GPL"); 343MODULE_LICENSE("GPL");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 3ae1df5bb48c..2f062574fc8c 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -386,7 +386,7 @@ static void __exit rmd320_mod_fini(void)
386 crypto_unregister_shash(&alg); 386 crypto_unregister_shash(&alg);
387} 387}
388 388
389module_init(rmd320_mod_init); 389subsys_initcall(rmd320_mod_init);
390module_exit(rmd320_mod_fini); 390module_exit(rmd320_mod_fini);
391 391
392MODULE_LICENSE("GPL"); 392MODULE_LICENSE("GPL");
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 0a6680ca8cb6..29c336068dc0 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -429,7 +429,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
429 akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg, 429 akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
430 req->dst, ctx->key_size - 1, req->dst_len); 430 req->dst, ctx->key_size - 1, req->dst_len);
431 431
432 err = crypto_akcipher_sign(&req_ctx->child_req); 432 err = crypto_akcipher_decrypt(&req_ctx->child_req);
433 if (err != -EINPROGRESS && err != -EBUSY) 433 if (err != -EINPROGRESS && err != -EBUSY)
434 return pkcs1pad_encrypt_sign_complete(req, err); 434 return pkcs1pad_encrypt_sign_complete(req, err);
435 435
@@ -488,14 +488,21 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
488 488
489 err = 0; 489 err = 0;
490 490
491 if (req->dst_len < dst_len - pos) 491 if (req->dst_len != dst_len - pos) {
492 err = -EOVERFLOW; 492 err = -EKEYREJECTED;
493 req->dst_len = dst_len - pos; 493 req->dst_len = dst_len - pos;
494 494 goto done;
495 if (!err) 495 }
496 sg_copy_from_buffer(req->dst, 496 /* Extract appended digest. */
497 sg_nents_for_len(req->dst, req->dst_len), 497 sg_pcopy_to_buffer(req->src,
498 out_buf + pos, req->dst_len); 498 sg_nents_for_len(req->src,
499 req->src_len + req->dst_len),
500 req_ctx->out_buf + ctx->key_size,
501 req->dst_len, ctx->key_size);
502 /* Do the actual verification step. */
503 if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
504 req->dst_len) != 0)
505 err = -EKEYREJECTED;
499done: 506done:
500 kzfree(req_ctx->out_buf); 507 kzfree(req_ctx->out_buf);
501 508
@@ -532,10 +539,12 @@ static int pkcs1pad_verify(struct akcipher_request *req)
532 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 539 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
533 int err; 540 int err;
534 541
535 if (!ctx->key_size || req->src_len < ctx->key_size) 542 if (WARN_ON(req->dst) ||
543 WARN_ON(!req->dst_len) ||
544 !ctx->key_size || req->src_len < ctx->key_size)
536 return -EINVAL; 545 return -EINVAL;
537 546
538 req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL); 547 req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
539 if (!req_ctx->out_buf) 548 if (!req_ctx->out_buf)
540 return -ENOMEM; 549 return -ENOMEM;
541 550
@@ -551,7 +560,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
551 req_ctx->out_sg, req->src_len, 560 req_ctx->out_sg, req->src_len,
552 ctx->key_size); 561 ctx->key_size);
553 562
554 err = crypto_akcipher_verify(&req_ctx->child_req); 563 err = crypto_akcipher_encrypt(&req_ctx->child_req);
555 if (err != -EINPROGRESS && err != -EBUSY) 564 if (err != -EINPROGRESS && err != -EBUSY)
556 return pkcs1pad_verify_complete(req, err); 565 return pkcs1pad_verify_complete(req, err);
557 566
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 4167980c243d..dcbb03431778 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -50,34 +50,6 @@ static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c)
50 return mpi_powm(m, c, key->d, key->n); 50 return mpi_powm(m, c, key->d, key->n);
51} 51}
52 52
53/*
54 * RSASP1 function [RFC3447 sec 5.2.1]
55 * s = m^d mod n
56 */
57static int _rsa_sign(const struct rsa_mpi_key *key, MPI s, MPI m)
58{
59 /* (1) Validate 0 <= m < n */
60 if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
61 return -EINVAL;
62
63 /* (2) s = m^d mod n */
64 return mpi_powm(s, m, key->d, key->n);
65}
66
67/*
68 * RSAVP1 function [RFC3447 sec 5.2.2]
69 * m = s^e mod n;
70 */
71static int _rsa_verify(const struct rsa_mpi_key *key, MPI m, MPI s)
72{
73 /* (1) Validate 0 <= s < n */
74 if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
75 return -EINVAL;
76
77 /* (2) m = s^e mod n */
78 return mpi_powm(m, s, key->e, key->n);
79}
80
81static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm) 53static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
82{ 54{
83 return akcipher_tfm_ctx(tfm); 55 return akcipher_tfm_ctx(tfm);
@@ -160,85 +132,6 @@ err_free_m:
160 return ret; 132 return ret;
161} 133}
162 134
163static int rsa_sign(struct akcipher_request *req)
164{
165 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
166 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
167 MPI m, s = mpi_alloc(0);
168 int ret = 0;
169 int sign;
170
171 if (!s)
172 return -ENOMEM;
173
174 if (unlikely(!pkey->n || !pkey->d)) {
175 ret = -EINVAL;
176 goto err_free_s;
177 }
178
179 ret = -ENOMEM;
180 m = mpi_read_raw_from_sgl(req->src, req->src_len);
181 if (!m)
182 goto err_free_s;
183
184 ret = _rsa_sign(pkey, s, m);
185 if (ret)
186 goto err_free_m;
187
188 ret = mpi_write_to_sgl(s, req->dst, req->dst_len, &sign);
189 if (ret)
190 goto err_free_m;
191
192 if (sign < 0)
193 ret = -EBADMSG;
194
195err_free_m:
196 mpi_free(m);
197err_free_s:
198 mpi_free(s);
199 return ret;
200}
201
202static int rsa_verify(struct akcipher_request *req)
203{
204 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
205 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
206 MPI s, m = mpi_alloc(0);
207 int ret = 0;
208 int sign;
209
210 if (!m)
211 return -ENOMEM;
212
213 if (unlikely(!pkey->n || !pkey->e)) {
214 ret = -EINVAL;
215 goto err_free_m;
216 }
217
218 s = mpi_read_raw_from_sgl(req->src, req->src_len);
219 if (!s) {
220 ret = -ENOMEM;
221 goto err_free_m;
222 }
223
224 ret = _rsa_verify(pkey, m, s);
225 if (ret)
226 goto err_free_s;
227
228 ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
229 if (ret)
230 goto err_free_s;
231
232 if (sign < 0)
233 ret = -EBADMSG;
234
235err_free_s:
236 mpi_free(s);
237err_free_m:
238 mpi_free(m);
239 return ret;
240}
241
242static void rsa_free_mpi_key(struct rsa_mpi_key *key) 135static void rsa_free_mpi_key(struct rsa_mpi_key *key)
243{ 136{
244 mpi_free(key->d); 137 mpi_free(key->d);
@@ -353,8 +246,6 @@ static void rsa_exit_tfm(struct crypto_akcipher *tfm)
353static struct akcipher_alg rsa = { 246static struct akcipher_alg rsa = {
354 .encrypt = rsa_enc, 247 .encrypt = rsa_enc,
355 .decrypt = rsa_dec, 248 .decrypt = rsa_dec,
356 .sign = rsa_sign,
357 .verify = rsa_verify,
358 .set_priv_key = rsa_set_priv_key, 249 .set_priv_key = rsa_set_priv_key,
359 .set_pub_key = rsa_set_pub_key, 250 .set_pub_key = rsa_set_pub_key,
360 .max_size = rsa_max_size, 251 .max_size = rsa_max_size,
@@ -391,7 +282,7 @@ static void rsa_exit(void)
391 crypto_unregister_akcipher(&rsa); 282 crypto_unregister_akcipher(&rsa);
392} 283}
393 284
394module_init(rsa_init); 285subsys_initcall(rsa_init);
395module_exit(rsa_exit); 286module_exit(rsa_exit);
396MODULE_ALIAS_CRYPTO("rsa"); 287MODULE_ALIAS_CRYPTO("rsa");
397MODULE_LICENSE("GPL"); 288MODULE_LICENSE("GPL");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 00fce32ae17a..c81a44404086 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -86,18 +86,17 @@ static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
86{ 86{
87 __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)]; 87 __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)];
88 88
89 if (dst != src)
90 memcpy(dst, src, bytes);
91
92 while (bytes >= SALSA20_BLOCK_SIZE) { 89 while (bytes >= SALSA20_BLOCK_SIZE) {
93 salsa20_block(state, stream); 90 salsa20_block(state, stream);
94 crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE); 91 crypto_xor_cpy(dst, src, (const u8 *)stream,
92 SALSA20_BLOCK_SIZE);
95 bytes -= SALSA20_BLOCK_SIZE; 93 bytes -= SALSA20_BLOCK_SIZE;
96 dst += SALSA20_BLOCK_SIZE; 94 dst += SALSA20_BLOCK_SIZE;
95 src += SALSA20_BLOCK_SIZE;
97 } 96 }
98 if (bytes) { 97 if (bytes) {
99 salsa20_block(state, stream); 98 salsa20_block(state, stream);
100 crypto_xor(dst, (const u8 *)stream, bytes); 99 crypto_xor_cpy(dst, src, (const u8 *)stream, bytes);
101 } 100 }
102} 101}
103 102
@@ -161,7 +160,7 @@ static int salsa20_crypt(struct skcipher_request *req)
161 160
162 err = skcipher_walk_virt(&walk, req, false); 161 err = skcipher_walk_virt(&walk, req, false);
163 162
164 salsa20_init(state, ctx, walk.iv); 163 salsa20_init(state, ctx, req->iv);
165 164
166 while (walk.nbytes > 0) { 165 while (walk.nbytes > 0) {
167 unsigned int nbytes = walk.nbytes; 166 unsigned int nbytes = walk.nbytes;
@@ -204,7 +203,7 @@ static void __exit salsa20_generic_mod_fini(void)
204 crypto_unregister_skcipher(&alg); 203 crypto_unregister_skcipher(&alg);
205} 204}
206 205
207module_init(salsa20_generic_mod_init); 206subsys_initcall(salsa20_generic_mod_init);
208module_exit(salsa20_generic_mod_fini); 207module_exit(salsa20_generic_mod_fini);
209 208
210MODULE_LICENSE("GPL"); 209MODULE_LICENSE("GPL");
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 6f8305f8c300..712b4c2ea021 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -29,9 +29,17 @@
29#include <crypto/internal/scompress.h> 29#include <crypto/internal/scompress.h>
30#include "internal.h" 30#include "internal.h"
31 31
32struct scomp_scratch {
33 spinlock_t lock;
34 void *src;
35 void *dst;
36};
37
38static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
39 .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
40};
41
32static const struct crypto_type crypto_scomp_type; 42static const struct crypto_type crypto_scomp_type;
33static void * __percpu *scomp_src_scratches;
34static void * __percpu *scomp_dst_scratches;
35static int scomp_scratch_users; 43static int scomp_scratch_users;
36static DEFINE_MUTEX(scomp_lock); 44static DEFINE_MUTEX(scomp_lock);
37 45
@@ -62,76 +70,53 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
62 seq_puts(m, "type : scomp\n"); 70 seq_puts(m, "type : scomp\n");
63} 71}
64 72
65static void crypto_scomp_free_scratches(void * __percpu *scratches) 73static void crypto_scomp_free_scratches(void)
66{ 74{
75 struct scomp_scratch *scratch;
67 int i; 76 int i;
68 77
69 if (!scratches) 78 for_each_possible_cpu(i) {
70 return; 79 scratch = per_cpu_ptr(&scomp_scratch, i);
71
72 for_each_possible_cpu(i)
73 vfree(*per_cpu_ptr(scratches, i));
74 80
75 free_percpu(scratches); 81 vfree(scratch->src);
82 vfree(scratch->dst);
83 scratch->src = NULL;
84 scratch->dst = NULL;
85 }
76} 86}
77 87
78static void * __percpu *crypto_scomp_alloc_scratches(void) 88static int crypto_scomp_alloc_scratches(void)
79{ 89{
80 void * __percpu *scratches; 90 struct scomp_scratch *scratch;
81 int i; 91 int i;
82 92
83 scratches = alloc_percpu(void *);
84 if (!scratches)
85 return NULL;
86
87 for_each_possible_cpu(i) { 93 for_each_possible_cpu(i) {
88 void *scratch; 94 void *mem;
89
90 scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
91 if (!scratch)
92 goto error;
93 *per_cpu_ptr(scratches, i) = scratch;
94 }
95
96 return scratches;
97
98error:
99 crypto_scomp_free_scratches(scratches);
100 return NULL;
101}
102 95
103static void crypto_scomp_free_all_scratches(void) 96 scratch = per_cpu_ptr(&scomp_scratch, i);
104{
105 if (!--scomp_scratch_users) {
106 crypto_scomp_free_scratches(scomp_src_scratches);
107 crypto_scomp_free_scratches(scomp_dst_scratches);
108 scomp_src_scratches = NULL;
109 scomp_dst_scratches = NULL;
110 }
111}
112 97
113static int crypto_scomp_alloc_all_scratches(void) 98 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
114{ 99 if (!mem)
115 if (!scomp_scratch_users++) { 100 goto error;
116 scomp_src_scratches = crypto_scomp_alloc_scratches(); 101 scratch->src = mem;
117 if (!scomp_src_scratches) 102 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
118 return -ENOMEM; 103 if (!mem)
119 scomp_dst_scratches = crypto_scomp_alloc_scratches(); 104 goto error;
120 if (!scomp_dst_scratches) { 105 scratch->dst = mem;
121 crypto_scomp_free_scratches(scomp_src_scratches);
122 scomp_src_scratches = NULL;
123 return -ENOMEM;
124 }
125 } 106 }
126 return 0; 107 return 0;
108error:
109 crypto_scomp_free_scratches();
110 return -ENOMEM;
127} 111}
128 112
129static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) 113static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
130{ 114{
131 int ret; 115 int ret = 0;
132 116
133 mutex_lock(&scomp_lock); 117 mutex_lock(&scomp_lock);
134 ret = crypto_scomp_alloc_all_scratches(); 118 if (!scomp_scratch_users++)
119 ret = crypto_scomp_alloc_scratches();
135 mutex_unlock(&scomp_lock); 120 mutex_unlock(&scomp_lock);
136 121
137 return ret; 122 return ret;
@@ -143,42 +128,41 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
143 void **tfm_ctx = acomp_tfm_ctx(tfm); 128 void **tfm_ctx = acomp_tfm_ctx(tfm);
144 struct crypto_scomp *scomp = *tfm_ctx; 129 struct crypto_scomp *scomp = *tfm_ctx;
145 void **ctx = acomp_request_ctx(req); 130 void **ctx = acomp_request_ctx(req);
146 const int cpu = get_cpu(); 131 struct scomp_scratch *scratch;
147 u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
148 u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
149 int ret; 132 int ret;
150 133
151 if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) { 134 if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
152 ret = -EINVAL; 135 return -EINVAL;
153 goto out;
154 }
155 136
156 if (req->dst && !req->dlen) { 137 if (req->dst && !req->dlen)
157 ret = -EINVAL; 138 return -EINVAL;
158 goto out;
159 }
160 139
161 if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) 140 if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
162 req->dlen = SCOMP_SCRATCH_SIZE; 141 req->dlen = SCOMP_SCRATCH_SIZE;
163 142
164 scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0); 143 scratch = raw_cpu_ptr(&scomp_scratch);
144 spin_lock(&scratch->lock);
145
146 scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
165 if (dir) 147 if (dir)
166 ret = crypto_scomp_compress(scomp, scratch_src, req->slen, 148 ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
167 scratch_dst, &req->dlen, *ctx); 149 scratch->dst, &req->dlen, *ctx);
168 else 150 else
169 ret = crypto_scomp_decompress(scomp, scratch_src, req->slen, 151 ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
170 scratch_dst, &req->dlen, *ctx); 152 scratch->dst, &req->dlen, *ctx);
171 if (!ret) { 153 if (!ret) {
172 if (!req->dst) { 154 if (!req->dst) {
173 req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL); 155 req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
174 if (!req->dst) 156 if (!req->dst) {
157 ret = -ENOMEM;
175 goto out; 158 goto out;
159 }
176 } 160 }
177 scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen, 161 scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
178 1); 162 1);
179 } 163 }
180out: 164out:
181 put_cpu(); 165 spin_unlock(&scratch->lock);
182 return ret; 166 return ret;
183} 167}
184 168
@@ -199,7 +183,8 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
199 crypto_free_scomp(*ctx); 183 crypto_free_scomp(*ctx);
200 184
201 mutex_lock(&scomp_lock); 185 mutex_lock(&scomp_lock);
202 crypto_scomp_free_all_scratches(); 186 if (!--scomp_scratch_users)
187 crypto_scomp_free_scratches();
203 mutex_unlock(&scomp_lock); 188 mutex_unlock(&scomp_lock);
204} 189}
205 190
diff --git a/crypto/seed.c b/crypto/seed.c
index c6ba8438be43..a75ac50fa4fd 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -470,7 +470,7 @@ static void __exit seed_fini(void)
470 crypto_unregister_alg(&seed_alg); 470 crypto_unregister_alg(&seed_alg);
471} 471}
472 472
473module_init(seed_init); 473subsys_initcall(seed_init);
474module_exit(seed_fini); 474module_exit(seed_fini);
475 475
476MODULE_DESCRIPTION("SEED Cipher Algorithm"); 476MODULE_DESCRIPTION("SEED Cipher Algorithm");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index ed1b0e9f2436..3f2fad615d26 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -211,7 +211,7 @@ static void __exit seqiv_module_exit(void)
211 crypto_unregister_template(&seqiv_tmpl); 211 crypto_unregister_template(&seqiv_tmpl);
212} 212}
213 213
214module_init(seqiv_module_init); 214subsys_initcall(seqiv_module_init);
215module_exit(seqiv_module_exit); 215module_exit(seqiv_module_exit);
216 216
217MODULE_LICENSE("GPL"); 217MODULE_LICENSE("GPL");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 7c3382facc82..ec4ec89ad108 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -664,7 +664,7 @@ static void __exit serpent_mod_fini(void)
664 crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs)); 664 crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs));
665} 665}
666 666
667module_init(serpent_mod_init); 667subsys_initcall(serpent_mod_init);
668module_exit(serpent_mod_fini); 668module_exit(serpent_mod_fini);
669 669
670MODULE_LICENSE("GPL"); 670MODULE_LICENSE("GPL");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 2af64ef81f40..1b806d4584b2 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -92,7 +92,7 @@ static void __exit sha1_generic_mod_fini(void)
92 crypto_unregister_shash(&alg); 92 crypto_unregister_shash(&alg);
93} 93}
94 94
95module_init(sha1_generic_mod_init); 95subsys_initcall(sha1_generic_mod_init);
96module_exit(sha1_generic_mod_fini); 96module_exit(sha1_generic_mod_fini);
97 97
98MODULE_LICENSE("GPL"); 98MODULE_LICENSE("GPL");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 1e5ba6649e8d..5844e9a469e8 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -301,7 +301,7 @@ static void __exit sha256_generic_mod_fini(void)
301 crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs)); 301 crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
302} 302}
303 303
304module_init(sha256_generic_mod_init); 304subsys_initcall(sha256_generic_mod_init);
305module_exit(sha256_generic_mod_fini); 305module_exit(sha256_generic_mod_fini);
306 306
307MODULE_LICENSE("GPL"); 307MODULE_LICENSE("GPL");
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7ed98367d4fb..60fd2be609d8 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -294,7 +294,7 @@ static void __exit sha3_generic_mod_fini(void)
294 crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); 294 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
295} 295}
296 296
297module_init(sha3_generic_mod_init); 297subsys_initcall(sha3_generic_mod_init);
298module_exit(sha3_generic_mod_fini); 298module_exit(sha3_generic_mod_fini);
299 299
300MODULE_LICENSE("GPL"); 300MODULE_LICENSE("GPL");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 4097cd555eb6..0193ecb8ae10 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -223,7 +223,7 @@ static void __exit sha512_generic_mod_fini(void)
223 crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs)); 223 crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
224} 224}
225 225
226module_init(sha512_generic_mod_init); 226subsys_initcall(sha512_generic_mod_init);
227module_exit(sha512_generic_mod_fini); 227module_exit(sha512_generic_mod_fini);
228 228
229MODULE_LICENSE("GPL"); 229MODULE_LICENSE("GPL");
diff --git a/crypto/shash.c b/crypto/shash.c
index 15b369c4745f..e55c1f558bc3 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -238,7 +238,6 @@ static int shash_async_init(struct ahash_request *req)
238 struct shash_desc *desc = ahash_request_ctx(req); 238 struct shash_desc *desc = ahash_request_ctx(req);
239 239
240 desc->tfm = *ctx; 240 desc->tfm = *ctx;
241 desc->flags = req->base.flags;
242 241
243 return crypto_shash_init(desc); 242 return crypto_shash_init(desc);
244} 243}
@@ -293,7 +292,6 @@ static int shash_async_finup(struct ahash_request *req)
293 struct shash_desc *desc = ahash_request_ctx(req); 292 struct shash_desc *desc = ahash_request_ctx(req);
294 293
295 desc->tfm = *ctx; 294 desc->tfm = *ctx;
296 desc->flags = req->base.flags;
297 295
298 return shash_ahash_finup(req, desc); 296 return shash_ahash_finup(req, desc);
299} 297}
@@ -307,14 +305,13 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
307 305
308 if (nbytes && 306 if (nbytes &&
309 (sg = req->src, offset = sg->offset, 307 (sg = req->src, offset = sg->offset,
310 nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { 308 nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
311 void *data; 309 void *data;
312 310
313 data = kmap_atomic(sg_page(sg)); 311 data = kmap_atomic(sg_page(sg));
314 err = crypto_shash_digest(desc, data + offset, nbytes, 312 err = crypto_shash_digest(desc, data + offset, nbytes,
315 req->result); 313 req->result);
316 kunmap_atomic(data); 314 kunmap_atomic(data);
317 crypto_yield(desc->flags);
318 } else 315 } else
319 err = crypto_shash_init(desc) ?: 316 err = crypto_shash_init(desc) ?:
320 shash_ahash_finup(req, desc); 317 shash_ahash_finup(req, desc);
@@ -329,7 +326,6 @@ static int shash_async_digest(struct ahash_request *req)
329 struct shash_desc *desc = ahash_request_ctx(req); 326 struct shash_desc *desc = ahash_request_ctx(req);
330 327
331 desc->tfm = *ctx; 328 desc->tfm = *ctx;
332 desc->flags = req->base.flags;
333 329
334 return shash_ahash_digest(req, desc); 330 return shash_ahash_digest(req, desc);
335} 331}
@@ -345,7 +341,6 @@ static int shash_async_import(struct ahash_request *req, const void *in)
345 struct shash_desc *desc = ahash_request_ctx(req); 341 struct shash_desc *desc = ahash_request_ctx(req);
346 342
347 desc->tfm = *ctx; 343 desc->tfm = *ctx;
348 desc->flags = req->base.flags;
349 344
350 return crypto_shash_import(desc, in); 345 return crypto_shash_import(desc, in);
351} 346}
diff --git a/crypto/simd.c b/crypto/simd.c
index 78e8d037ae2b..3e3b1d1a6b1f 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 4 * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5 * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au> 5 * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
6 * Copyright (c) 2019 Google LLC
6 * 7 *
7 * Based on aesni-intel_glue.c by: 8 * Based on aesni-intel_glue.c by:
8 * Copyright (C) 2008, Intel Corp. 9 * Copyright (C) 2008, Intel Corp.
@@ -20,10 +21,26 @@
20 * 21 *
21 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 */
25
26/*
27 * Shared crypto SIMD helpers. These functions dynamically create and register
28 * an skcipher or AEAD algorithm that wraps another, internal algorithm. The
29 * wrapper ensures that the internal algorithm is only executed in a context
30 * where SIMD instructions are usable, i.e. where may_use_simd() returns true.
31 * If SIMD is already usable, the wrapper directly calls the internal algorithm.
32 * Otherwise it defers execution to a workqueue via cryptd.
23 * 33 *
34 * This is an alternative to the internal algorithm implementing a fallback for
35 * the !may_use_simd() case itself.
36 *
37 * Note that the wrapper algorithm is asynchronous, i.e. it has the
38 * CRYPTO_ALG_ASYNC flag set. Therefore it won't be found by users who
39 * explicitly allocate a synchronous algorithm.
24 */ 40 */
25 41
26#include <crypto/cryptd.h> 42#include <crypto/cryptd.h>
43#include <crypto/internal/aead.h>
27#include <crypto/internal/simd.h> 44#include <crypto/internal/simd.h>
28#include <crypto/internal/skcipher.h> 45#include <crypto/internal/skcipher.h>
29#include <linux/kernel.h> 46#include <linux/kernel.h>
@@ -31,6 +48,8 @@
31#include <linux/preempt.h> 48#include <linux/preempt.h>
32#include <asm/simd.h> 49#include <asm/simd.h>
33 50
51/* skcipher support */
52
34struct simd_skcipher_alg { 53struct simd_skcipher_alg {
35 const char *ialg_name; 54 const char *ialg_name;
36 struct skcipher_alg alg; 55 struct skcipher_alg alg;
@@ -66,7 +85,7 @@ static int simd_skcipher_encrypt(struct skcipher_request *req)
66 subreq = skcipher_request_ctx(req); 85 subreq = skcipher_request_ctx(req);
67 *subreq = *req; 86 *subreq = *req;
68 87
69 if (!may_use_simd() || 88 if (!crypto_simd_usable() ||
70 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 89 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
71 child = &ctx->cryptd_tfm->base; 90 child = &ctx->cryptd_tfm->base;
72 else 91 else
@@ -87,7 +106,7 @@ static int simd_skcipher_decrypt(struct skcipher_request *req)
87 subreq = skcipher_request_ctx(req); 106 subreq = skcipher_request_ctx(req);
88 *subreq = *req; 107 *subreq = *req;
89 108
90 if (!may_use_simd() || 109 if (!crypto_simd_usable() ||
91 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 110 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
92 child = &ctx->cryptd_tfm->base; 111 child = &ctx->cryptd_tfm->base;
93 else 112 else
@@ -272,4 +291,254 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
272} 291}
273EXPORT_SYMBOL_GPL(simd_unregister_skciphers); 292EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
274 293
294/* AEAD support */
295
296struct simd_aead_alg {
297 const char *ialg_name;
298 struct aead_alg alg;
299};
300
301struct simd_aead_ctx {
302 struct cryptd_aead *cryptd_tfm;
303};
304
305static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key,
306 unsigned int key_len)
307{
308 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
309 struct crypto_aead *child = &ctx->cryptd_tfm->base;
310 int err;
311
312 crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
313 crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) &
314 CRYPTO_TFM_REQ_MASK);
315 err = crypto_aead_setkey(child, key, key_len);
316 crypto_aead_set_flags(tfm, crypto_aead_get_flags(child) &
317 CRYPTO_TFM_RES_MASK);
318 return err;
319}
320
321static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
322{
323 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
324 struct crypto_aead *child = &ctx->cryptd_tfm->base;
325
326 return crypto_aead_setauthsize(child, authsize);
327}
328
329static int simd_aead_encrypt(struct aead_request *req)
330{
331 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
332 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
333 struct aead_request *subreq;
334 struct crypto_aead *child;
335
336 subreq = aead_request_ctx(req);
337 *subreq = *req;
338
339 if (!crypto_simd_usable() ||
340 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
341 child = &ctx->cryptd_tfm->base;
342 else
343 child = cryptd_aead_child(ctx->cryptd_tfm);
344
345 aead_request_set_tfm(subreq, child);
346
347 return crypto_aead_encrypt(subreq);
348}
349
350static int simd_aead_decrypt(struct aead_request *req)
351{
352 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
353 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
354 struct aead_request *subreq;
355 struct crypto_aead *child;
356
357 subreq = aead_request_ctx(req);
358 *subreq = *req;
359
360 if (!crypto_simd_usable() ||
361 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
362 child = &ctx->cryptd_tfm->base;
363 else
364 child = cryptd_aead_child(ctx->cryptd_tfm);
365
366 aead_request_set_tfm(subreq, child);
367
368 return crypto_aead_decrypt(subreq);
369}
370
371static void simd_aead_exit(struct crypto_aead *tfm)
372{
373 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
374
375 cryptd_free_aead(ctx->cryptd_tfm);
376}
377
378static int simd_aead_init(struct crypto_aead *tfm)
379{
380 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
381 struct cryptd_aead *cryptd_tfm;
382 struct simd_aead_alg *salg;
383 struct aead_alg *alg;
384 unsigned reqsize;
385
386 alg = crypto_aead_alg(tfm);
387 salg = container_of(alg, struct simd_aead_alg, alg);
388
389 cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL,
390 CRYPTO_ALG_INTERNAL);
391 if (IS_ERR(cryptd_tfm))
392 return PTR_ERR(cryptd_tfm);
393
394 ctx->cryptd_tfm = cryptd_tfm;
395
396 reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm));
397 reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base));
398 reqsize += sizeof(struct aead_request);
399
400 crypto_aead_set_reqsize(tfm, reqsize);
401
402 return 0;
403}
404
405struct simd_aead_alg *simd_aead_create_compat(const char *algname,
406 const char *drvname,
407 const char *basename)
408{
409 struct simd_aead_alg *salg;
410 struct crypto_aead *tfm;
411 struct aead_alg *ialg;
412 struct aead_alg *alg;
413 int err;
414
415 tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
416 CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
417 if (IS_ERR(tfm))
418 return ERR_CAST(tfm);
419
420 ialg = crypto_aead_alg(tfm);
421
422 salg = kzalloc(sizeof(*salg), GFP_KERNEL);
423 if (!salg) {
424 salg = ERR_PTR(-ENOMEM);
425 goto out_put_tfm;
426 }
427
428 salg->ialg_name = basename;
429 alg = &salg->alg;
430
431 err = -ENAMETOOLONG;
432 if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
433 CRYPTO_MAX_ALG_NAME)
434 goto out_free_salg;
435
436 if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
437 drvname) >= CRYPTO_MAX_ALG_NAME)
438 goto out_free_salg;
439
440 alg->base.cra_flags = CRYPTO_ALG_ASYNC;
441 alg->base.cra_priority = ialg->base.cra_priority;
442 alg->base.cra_blocksize = ialg->base.cra_blocksize;
443 alg->base.cra_alignmask = ialg->base.cra_alignmask;
444 alg->base.cra_module = ialg->base.cra_module;
445 alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx);
446
447 alg->ivsize = ialg->ivsize;
448 alg->maxauthsize = ialg->maxauthsize;
449 alg->chunksize = ialg->chunksize;
450
451 alg->init = simd_aead_init;
452 alg->exit = simd_aead_exit;
453
454 alg->setkey = simd_aead_setkey;
455 alg->setauthsize = simd_aead_setauthsize;
456 alg->encrypt = simd_aead_encrypt;
457 alg->decrypt = simd_aead_decrypt;
458
459 err = crypto_register_aead(alg);
460 if (err)
461 goto out_free_salg;
462
463out_put_tfm:
464 crypto_free_aead(tfm);
465 return salg;
466
467out_free_salg:
468 kfree(salg);
469 salg = ERR_PTR(err);
470 goto out_put_tfm;
471}
472EXPORT_SYMBOL_GPL(simd_aead_create_compat);
473
474struct simd_aead_alg *simd_aead_create(const char *algname,
475 const char *basename)
476{
477 char drvname[CRYPTO_MAX_ALG_NAME];
478
479 if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
480 CRYPTO_MAX_ALG_NAME)
481 return ERR_PTR(-ENAMETOOLONG);
482
483 return simd_aead_create_compat(algname, drvname, basename);
484}
485EXPORT_SYMBOL_GPL(simd_aead_create);
486
487void simd_aead_free(struct simd_aead_alg *salg)
488{
489 crypto_unregister_aead(&salg->alg);
490 kfree(salg);
491}
492EXPORT_SYMBOL_GPL(simd_aead_free);
493
494int simd_register_aeads_compat(struct aead_alg *algs, int count,
495 struct simd_aead_alg **simd_algs)
496{
497 int err;
498 int i;
499 const char *algname;
500 const char *drvname;
501 const char *basename;
502 struct simd_aead_alg *simd;
503
504 err = crypto_register_aeads(algs, count);
505 if (err)
506 return err;
507
508 for (i = 0; i < count; i++) {
509 WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
510 WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
511 algname = algs[i].base.cra_name + 2;
512 drvname = algs[i].base.cra_driver_name + 2;
513 basename = algs[i].base.cra_driver_name;
514 simd = simd_aead_create_compat(algname, drvname, basename);
515 err = PTR_ERR(simd);
516 if (IS_ERR(simd))
517 goto err_unregister;
518 simd_algs[i] = simd;
519 }
520 return 0;
521
522err_unregister:
523 simd_unregister_aeads(algs, count, simd_algs);
524 return err;
525}
526EXPORT_SYMBOL_GPL(simd_register_aeads_compat);
527
528void simd_unregister_aeads(struct aead_alg *algs, int count,
529 struct simd_aead_alg **simd_algs)
530{
531 int i;
532
533 crypto_unregister_aeads(algs, count);
534
535 for (i = 0; i < count; i++) {
536 if (simd_algs[i]) {
537 simd_aead_free(simd_algs[i]);
538 simd_algs[i] = NULL;
539 }
540 }
541}
542EXPORT_SYMBOL_GPL(simd_unregister_aeads);
543
275MODULE_LICENSE("GPL"); 544MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index bcf13d95f54a..2e66f312e2c4 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -131,8 +131,13 @@ unmap_src:
131 memcpy(walk->dst.virt.addr, walk->page, n); 131 memcpy(walk->dst.virt.addr, walk->page, n);
132 skcipher_unmap_dst(walk); 132 skcipher_unmap_dst(walk);
133 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { 133 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
134 if (WARN_ON(err)) { 134 if (err) {
135 /* unexpected case; didn't process all bytes */ 135 /*
136 * Didn't process all bytes. Either the algorithm is
137 * broken, or this was the last step and it turned out
138 * the message wasn't evenly divisible into blocks but
139 * the algorithm requires it.
140 */
136 err = -EINVAL; 141 err = -EINVAL;
137 goto finish; 142 goto finish;
138 } 143 }
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index c0cf87ae7ef6..e227bcada2a2 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -199,7 +199,7 @@ static void __exit sm3_generic_mod_fini(void)
199 crypto_unregister_shash(&sm3_alg); 199 crypto_unregister_shash(&sm3_alg);
200} 200}
201 201
202module_init(sm3_generic_mod_init); 202subsys_initcall(sm3_generic_mod_init);
203module_exit(sm3_generic_mod_fini); 203module_exit(sm3_generic_mod_fini);
204 204
205MODULE_LICENSE("GPL v2"); 205MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index c18eebfd5edd..71ffb343709a 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -237,7 +237,7 @@ static void __exit sm4_fini(void)
237 crypto_unregister_alg(&sm4_alg); 237 crypto_unregister_alg(&sm4_alg);
238} 238}
239 239
240module_init(sm4_init); 240subsys_initcall(sm4_init);
241module_exit(sm4_fini); 241module_exit(sm4_fini);
242 242
243MODULE_DESCRIPTION("SM4 Cipher Algorithm"); 243MODULE_DESCRIPTION("SM4 Cipher Algorithm");
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index 5a2eafed9c29..63663c3bab7e 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -996,7 +996,7 @@ static void streebog_add512(const struct streebog_uint512 *x,
996 996
997static void streebog_g(struct streebog_uint512 *h, 997static void streebog_g(struct streebog_uint512 *h,
998 const struct streebog_uint512 *N, 998 const struct streebog_uint512 *N,
999 const u8 *m) 999 const struct streebog_uint512 *m)
1000{ 1000{
1001 struct streebog_uint512 Ki, data; 1001 struct streebog_uint512 Ki, data;
1002 unsigned int i; 1002 unsigned int i;
@@ -1005,7 +1005,7 @@ static void streebog_g(struct streebog_uint512 *h,
1005 1005
1006 /* Starting E() */ 1006 /* Starting E() */
1007 Ki = data; 1007 Ki = data;
1008 streebog_xlps(&Ki, (const struct streebog_uint512 *)&m[0], &data); 1008 streebog_xlps(&Ki, m, &data);
1009 1009
1010 for (i = 0; i < 11; i++) 1010 for (i = 0; i < 11; i++)
1011 streebog_round(i, &Ki, &data); 1011 streebog_round(i, &Ki, &data);
@@ -1015,16 +1015,19 @@ static void streebog_g(struct streebog_uint512 *h,
1015 /* E() done */ 1015 /* E() done */
1016 1016
1017 streebog_xor(&data, h, &data); 1017 streebog_xor(&data, h, &data);
1018 streebog_xor(&data, (const struct streebog_uint512 *)&m[0], h); 1018 streebog_xor(&data, m, h);
1019} 1019}
1020 1020
1021static void streebog_stage2(struct streebog_state *ctx, const u8 *data) 1021static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
1022{ 1022{
1023 streebog_g(&ctx->h, &ctx->N, data); 1023 struct streebog_uint512 m;
1024
1025 memcpy(&m, data, sizeof(m));
1026
1027 streebog_g(&ctx->h, &ctx->N, &m);
1024 1028
1025 streebog_add512(&ctx->N, &buffer512, &ctx->N); 1029 streebog_add512(&ctx->N, &buffer512, &ctx->N);
1026 streebog_add512(&ctx->Sigma, (const struct streebog_uint512 *)data, 1030 streebog_add512(&ctx->Sigma, &m, &ctx->Sigma);
1027 &ctx->Sigma);
1028} 1031}
1029 1032
1030static void streebog_stage3(struct streebog_state *ctx) 1033static void streebog_stage3(struct streebog_state *ctx)
@@ -1034,13 +1037,11 @@ static void streebog_stage3(struct streebog_state *ctx)
1034 buf.qword[0] = cpu_to_le64(ctx->fillsize << 3); 1037 buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
1035 streebog_pad(ctx); 1038 streebog_pad(ctx);
1036 1039
1037 streebog_g(&ctx->h, &ctx->N, (const u8 *)&ctx->buffer); 1040 streebog_g(&ctx->h, &ctx->N, &ctx->m);
1038 streebog_add512(&ctx->N, &buf, &ctx->N); 1041 streebog_add512(&ctx->N, &buf, &ctx->N);
1039 streebog_add512(&ctx->Sigma, 1042 streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma);
1040 (const struct streebog_uint512 *)&ctx->buffer[0], 1043 streebog_g(&ctx->h, &buffer0, &ctx->N);
1041 &ctx->Sigma); 1044 streebog_g(&ctx->h, &buffer0, &ctx->Sigma);
1042 streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->N);
1043 streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->Sigma);
1044 memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512)); 1045 memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
1045} 1046}
1046 1047
@@ -1127,7 +1128,7 @@ static void __exit streebog_mod_fini(void)
1127 crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); 1128 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
1128} 1129}
1129 1130
1130module_init(streebog_mod_init); 1131subsys_initcall(streebog_mod_init);
1131module_exit(streebog_mod_fini); 1132module_exit(streebog_mod_fini);
1132 1133
1133MODULE_LICENSE("GPL"); 1134MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1ea2d5007ff5..798253f05203 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -3053,7 +3053,7 @@ err_free_tv:
3053 */ 3053 */
3054static void __exit tcrypt_mod_fini(void) { } 3054static void __exit tcrypt_mod_fini(void) { }
3055 3055
3056module_init(tcrypt_mod_init); 3056subsys_initcall(tcrypt_mod_init);
3057module_exit(tcrypt_mod_fini); 3057module_exit(tcrypt_mod_fini);
3058 3058
3059module_param(alg, charp, 0); 3059module_param(alg, charp, 0);
diff --git a/crypto/tea.c b/crypto/tea.c
index b70b441c7d1e..786b589e1399 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -274,7 +274,7 @@ MODULE_ALIAS_CRYPTO("tea");
274MODULE_ALIAS_CRYPTO("xtea"); 274MODULE_ALIAS_CRYPTO("xtea");
275MODULE_ALIAS_CRYPTO("xeta"); 275MODULE_ALIAS_CRYPTO("xeta");
276 276
277module_init(tea_mod_init); 277subsys_initcall(tea_mod_init);
278module_exit(tea_mod_fini); 278module_exit(tea_mod_fini);
279 279
280MODULE_LICENSE("GPL"); 280MODULE_LICENSE("GPL");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 8386038d67c7..c9e67c2bd725 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -37,6 +37,7 @@
37#include <crypto/akcipher.h> 37#include <crypto/akcipher.h>
38#include <crypto/kpp.h> 38#include <crypto/kpp.h>
39#include <crypto/acompress.h> 39#include <crypto/acompress.h>
40#include <crypto/internal/simd.h>
40 41
41#include "internal.h" 42#include "internal.h"
42 43
@@ -44,6 +45,9 @@ static bool notests;
44module_param(notests, bool, 0644); 45module_param(notests, bool, 0644);
45MODULE_PARM_DESC(notests, "disable crypto self-tests"); 46MODULE_PARM_DESC(notests, "disable crypto self-tests");
46 47
48static bool panic_on_fail;
49module_param(panic_on_fail, bool, 0444);
50
47#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS 51#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
48static bool noextratests; 52static bool noextratests;
49module_param(noextratests, bool, 0644); 53module_param(noextratests, bool, 0644);
@@ -52,6 +56,9 @@ MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
52static unsigned int fuzz_iterations = 100; 56static unsigned int fuzz_iterations = 100;
53module_param(fuzz_iterations, uint, 0644); 57module_param(fuzz_iterations, uint, 0644);
54MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations"); 58MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
59
60DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
61EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
55#endif 62#endif
56 63
57#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS 64#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
@@ -121,6 +128,7 @@ struct kpp_test_suite {
121 128
122struct alg_test_desc { 129struct alg_test_desc {
123 const char *alg; 130 const char *alg;
131 const char *generic_driver;
124 int (*test)(const struct alg_test_desc *desc, const char *driver, 132 int (*test)(const struct alg_test_desc *desc, const char *driver,
125 u32 type, u32 mask); 133 u32 type, u32 mask);
126 int fips_allowed; /* set if alg is allowed in fips mode */ 134 int fips_allowed; /* set if alg is allowed in fips mode */
@@ -230,12 +238,14 @@ enum finalization_type {
230 * @offset 238 * @offset
231 * @flush_type: for hashes, whether an update() should be done now vs. 239 * @flush_type: for hashes, whether an update() should be done now vs.
232 * continuing to accumulate data 240 * continuing to accumulate data
241 * @nosimd: if doing the pending update(), do it with SIMD disabled?
233 */ 242 */
234struct test_sg_division { 243struct test_sg_division {
235 unsigned int proportion_of_total; 244 unsigned int proportion_of_total;
236 unsigned int offset; 245 unsigned int offset;
237 bool offset_relative_to_alignmask; 246 bool offset_relative_to_alignmask;
238 enum flush_type flush_type; 247 enum flush_type flush_type;
248 bool nosimd;
239}; 249};
240 250
241/** 251/**
@@ -255,6 +265,7 @@ struct test_sg_division {
255 * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to 265 * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
256 * the @iv_offset 266 * the @iv_offset
257 * @finalization_type: what finalization function to use for hashes 267 * @finalization_type: what finalization function to use for hashes
268 * @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
258 */ 269 */
259struct testvec_config { 270struct testvec_config {
260 const char *name; 271 const char *name;
@@ -265,6 +276,7 @@ struct testvec_config {
265 unsigned int iv_offset; 276 unsigned int iv_offset;
266 bool iv_offset_relative_to_alignmask; 277 bool iv_offset_relative_to_alignmask;
267 enum finalization_type finalization_type; 278 enum finalization_type finalization_type;
279 bool nosimd;
268}; 280};
269 281
270#define TESTVEC_CONFIG_NAMELEN 192 282#define TESTVEC_CONFIG_NAMELEN 192
@@ -416,8 +428,11 @@ static unsigned int count_test_sg_divisions(const struct test_sg_division *divs)
416 return ndivs; 428 return ndivs;
417} 429}
418 430
431#define SGDIVS_HAVE_FLUSHES BIT(0)
432#define SGDIVS_HAVE_NOSIMD BIT(1)
433
419static bool valid_sg_divisions(const struct test_sg_division *divs, 434static bool valid_sg_divisions(const struct test_sg_division *divs,
420 unsigned int count, bool *any_flushes_ret) 435 unsigned int count, int *flags_ret)
421{ 436{
422 unsigned int total = 0; 437 unsigned int total = 0;
423 unsigned int i; 438 unsigned int i;
@@ -428,7 +443,9 @@ static bool valid_sg_divisions(const struct test_sg_division *divs,
428 return false; 443 return false;
429 total += divs[i].proportion_of_total; 444 total += divs[i].proportion_of_total;
430 if (divs[i].flush_type != FLUSH_TYPE_NONE) 445 if (divs[i].flush_type != FLUSH_TYPE_NONE)
431 *any_flushes_ret = true; 446 *flags_ret |= SGDIVS_HAVE_FLUSHES;
447 if (divs[i].nosimd)
448 *flags_ret |= SGDIVS_HAVE_NOSIMD;
432 } 449 }
433 return total == TEST_SG_TOTAL && 450 return total == TEST_SG_TOTAL &&
434 memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL; 451 memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
@@ -441,19 +458,18 @@ static bool valid_sg_divisions(const struct test_sg_division *divs,
441 */ 458 */
442static bool valid_testvec_config(const struct testvec_config *cfg) 459static bool valid_testvec_config(const struct testvec_config *cfg)
443{ 460{
444 bool any_flushes = false; 461 int flags = 0;
445 462
446 if (cfg->name == NULL) 463 if (cfg->name == NULL)
447 return false; 464 return false;
448 465
449 if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs), 466 if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
450 &any_flushes)) 467 &flags))
451 return false; 468 return false;
452 469
453 if (cfg->dst_divs[0].proportion_of_total) { 470 if (cfg->dst_divs[0].proportion_of_total) {
454 if (!valid_sg_divisions(cfg->dst_divs, 471 if (!valid_sg_divisions(cfg->dst_divs,
455 ARRAY_SIZE(cfg->dst_divs), 472 ARRAY_SIZE(cfg->dst_divs), &flags))
456 &any_flushes))
457 return false; 473 return false;
458 } else { 474 } else {
459 if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs))) 475 if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs)))
@@ -466,7 +482,12 @@ static bool valid_testvec_config(const struct testvec_config *cfg)
466 MAX_ALGAPI_ALIGNMASK + 1) 482 MAX_ALGAPI_ALIGNMASK + 1)
467 return false; 483 return false;
468 484
469 if (any_flushes && cfg->finalization_type == FINALIZATION_TYPE_DIGEST) 485 if ((flags & (SGDIVS_HAVE_FLUSHES | SGDIVS_HAVE_NOSIMD)) &&
486 cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
487 return false;
488
489 if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) &&
490 (cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP))
470 return false; 491 return false;
471 492
472 return true; 493 return true;
@@ -725,15 +746,101 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
725} 746}
726 747
727#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS 748#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
749
750/* Generate a random length in range [0, max_len], but prefer smaller values */
751static unsigned int generate_random_length(unsigned int max_len)
752{
753 unsigned int len = prandom_u32() % (max_len + 1);
754
755 switch (prandom_u32() % 4) {
756 case 0:
757 return len % 64;
758 case 1:
759 return len % 256;
760 case 2:
761 return len % 1024;
762 default:
763 return len;
764 }
765}
766
767/* Sometimes make some random changes to the given data buffer */
768static void mutate_buffer(u8 *buf, size_t count)
769{
770 size_t num_flips;
771 size_t i;
772 size_t pos;
773
774 /* Sometimes flip some bits */
775 if (prandom_u32() % 4 == 0) {
776 num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count * 8);
777 for (i = 0; i < num_flips; i++) {
778 pos = prandom_u32() % (count * 8);
779 buf[pos / 8] ^= 1 << (pos % 8);
780 }
781 }
782
783 /* Sometimes flip some bytes */
784 if (prandom_u32() % 4 == 0) {
785 num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count);
786 for (i = 0; i < num_flips; i++)
787 buf[prandom_u32() % count] ^= 0xff;
788 }
789}
790
791/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
792static void generate_random_bytes(u8 *buf, size_t count)
793{
794 u8 b;
795 u8 increment;
796 size_t i;
797
798 if (count == 0)
799 return;
800
801 switch (prandom_u32() % 8) { /* Choose a generation strategy */
802 case 0:
803 case 1:
804 /* All the same byte, plus optional mutations */
805 switch (prandom_u32() % 4) {
806 case 0:
807 b = 0x00;
808 break;
809 case 1:
810 b = 0xff;
811 break;
812 default:
813 b = (u8)prandom_u32();
814 break;
815 }
816 memset(buf, b, count);
817 mutate_buffer(buf, count);
818 break;
819 case 2:
820 /* Ascending or descending bytes, plus optional mutations */
821 increment = (u8)prandom_u32();
822 b = (u8)prandom_u32();
823 for (i = 0; i < count; i++, b += increment)
824 buf[i] = b;
825 mutate_buffer(buf, count);
826 break;
827 default:
828 /* Fully random bytes */
829 for (i = 0; i < count; i++)
830 buf[i] = (u8)prandom_u32();
831 }
832}
833
728static char *generate_random_sgl_divisions(struct test_sg_division *divs, 834static char *generate_random_sgl_divisions(struct test_sg_division *divs,
729 size_t max_divs, char *p, char *end, 835 size_t max_divs, char *p, char *end,
730 bool gen_flushes) 836 bool gen_flushes, u32 req_flags)
731{ 837{
732 struct test_sg_division *div = divs; 838 struct test_sg_division *div = divs;
733 unsigned int remaining = TEST_SG_TOTAL; 839 unsigned int remaining = TEST_SG_TOTAL;
734 840
735 do { 841 do {
736 unsigned int this_len; 842 unsigned int this_len;
843 const char *flushtype_str;
737 844
738 if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0) 845 if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0)
739 this_len = remaining; 846 this_len = remaining;
@@ -762,11 +869,31 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
762 } 869 }
763 } 870 }
764 871
872 if (div->flush_type != FLUSH_TYPE_NONE &&
873 !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
874 prandom_u32() % 2 == 0)
875 div->nosimd = true;
876
877 switch (div->flush_type) {
878 case FLUSH_TYPE_FLUSH:
879 if (div->nosimd)
880 flushtype_str = "<flush,nosimd>";
881 else
882 flushtype_str = "<flush>";
883 break;
884 case FLUSH_TYPE_REIMPORT:
885 if (div->nosimd)
886 flushtype_str = "<reimport,nosimd>";
887 else
888 flushtype_str = "<reimport>";
889 break;
890 default:
891 flushtype_str = "";
892 break;
893 }
894
765 BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */ 895 BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */
766 p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", 896 p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", flushtype_str,
767 div->flush_type == FLUSH_TYPE_NONE ? "" :
768 div->flush_type == FLUSH_TYPE_FLUSH ?
769 "<flush> " : "<reimport> ",
770 this_len / 100, this_len % 100, 897 this_len / 100, this_len % 100,
771 div->offset_relative_to_alignmask ? 898 div->offset_relative_to_alignmask ?
772 "alignmask" : "", 899 "alignmask" : "",
@@ -816,18 +943,26 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
816 break; 943 break;
817 } 944 }
818 945
946 if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
947 prandom_u32() % 2 == 0) {
948 cfg->nosimd = true;
949 p += scnprintf(p, end - p, " nosimd");
950 }
951
819 p += scnprintf(p, end - p, " src_divs=["); 952 p += scnprintf(p, end - p, " src_divs=[");
820 p = generate_random_sgl_divisions(cfg->src_divs, 953 p = generate_random_sgl_divisions(cfg->src_divs,
821 ARRAY_SIZE(cfg->src_divs), p, end, 954 ARRAY_SIZE(cfg->src_divs), p, end,
822 (cfg->finalization_type != 955 (cfg->finalization_type !=
823 FINALIZATION_TYPE_DIGEST)); 956 FINALIZATION_TYPE_DIGEST),
957 cfg->req_flags);
824 p += scnprintf(p, end - p, "]"); 958 p += scnprintf(p, end - p, "]");
825 959
826 if (!cfg->inplace && prandom_u32() % 2 == 0) { 960 if (!cfg->inplace && prandom_u32() % 2 == 0) {
827 p += scnprintf(p, end - p, " dst_divs=["); 961 p += scnprintf(p, end - p, " dst_divs=[");
828 p = generate_random_sgl_divisions(cfg->dst_divs, 962 p = generate_random_sgl_divisions(cfg->dst_divs,
829 ARRAY_SIZE(cfg->dst_divs), 963 ARRAY_SIZE(cfg->dst_divs),
830 p, end, false); 964 p, end, false,
965 cfg->req_flags);
831 p += scnprintf(p, end - p, "]"); 966 p += scnprintf(p, end - p, "]");
832 } 967 }
833 968
@@ -838,21 +973,100 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
838 973
839 WARN_ON_ONCE(!valid_testvec_config(cfg)); 974 WARN_ON_ONCE(!valid_testvec_config(cfg));
840} 975}
841#endif /* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ 976
977static void crypto_disable_simd_for_test(void)
978{
979 preempt_disable();
980 __this_cpu_write(crypto_simd_disabled_for_test, true);
981}
982
983static void crypto_reenable_simd_for_test(void)
984{
985 __this_cpu_write(crypto_simd_disabled_for_test, false);
986 preempt_enable();
987}
988
989/*
990 * Given an algorithm name, build the name of the generic implementation of that
991 * algorithm, assuming the usual naming convention. Specifically, this appends
992 * "-generic" to every part of the name that is not a template name. Examples:
993 *
994 * aes => aes-generic
995 * cbc(aes) => cbc(aes-generic)
996 * cts(cbc(aes)) => cts(cbc(aes-generic))
997 * rfc7539(chacha20,poly1305) => rfc7539(chacha20-generic,poly1305-generic)
998 *
999 * Return: 0 on success, or -ENAMETOOLONG if the generic name would be too long
1000 */
1001static int build_generic_driver_name(const char *algname,
1002 char driver_name[CRYPTO_MAX_ALG_NAME])
1003{
1004 const char *in = algname;
1005 char *out = driver_name;
1006 size_t len = strlen(algname);
1007
1008 if (len >= CRYPTO_MAX_ALG_NAME)
1009 goto too_long;
1010 do {
1011 const char *in_saved = in;
1012
1013 while (*in && *in != '(' && *in != ')' && *in != ',')
1014 *out++ = *in++;
1015 if (*in != '(' && in > in_saved) {
1016 len += 8;
1017 if (len >= CRYPTO_MAX_ALG_NAME)
1018 goto too_long;
1019 memcpy(out, "-generic", 8);
1020 out += 8;
1021 }
1022 } while ((*out++ = *in++) != '\0');
1023 return 0;
1024
1025too_long:
1026 pr_err("alg: generic driver name for \"%s\" would be too long\n",
1027 algname);
1028 return -ENAMETOOLONG;
1029}
1030#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
1031static void crypto_disable_simd_for_test(void)
1032{
1033}
1034
1035static void crypto_reenable_simd_for_test(void)
1036{
1037}
1038#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
1039
1040static int do_ahash_op(int (*op)(struct ahash_request *req),
1041 struct ahash_request *req,
1042 struct crypto_wait *wait, bool nosimd)
1043{
1044 int err;
1045
1046 if (nosimd)
1047 crypto_disable_simd_for_test();
1048
1049 err = op(req);
1050
1051 if (nosimd)
1052 crypto_reenable_simd_for_test();
1053
1054 return crypto_wait_req(err, wait);
1055}
842 1056
843static int check_nonfinal_hash_op(const char *op, int err, 1057static int check_nonfinal_hash_op(const char *op, int err,
844 u8 *result, unsigned int digestsize, 1058 u8 *result, unsigned int digestsize,
845 const char *driver, unsigned int vec_num, 1059 const char *driver, const char *vec_name,
846 const struct testvec_config *cfg) 1060 const struct testvec_config *cfg)
847{ 1061{
848 if (err) { 1062 if (err) {
849 pr_err("alg: hash: %s %s() failed with err %d on test vector %u, cfg=\"%s\"\n", 1063 pr_err("alg: hash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
850 driver, op, err, vec_num, cfg->name); 1064 driver, op, err, vec_name, cfg->name);
851 return err; 1065 return err;
852 } 1066 }
853 if (!testmgr_is_poison(result, digestsize)) { 1067 if (!testmgr_is_poison(result, digestsize)) {
854 pr_err("alg: hash: %s %s() used result buffer on test vector %u, cfg=\"%s\"\n", 1068 pr_err("alg: hash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
855 driver, op, vec_num, cfg->name); 1069 driver, op, vec_name, cfg->name);
856 return -EINVAL; 1070 return -EINVAL;
857 } 1071 }
858 return 0; 1072 return 0;
@@ -860,7 +1074,7 @@ static int check_nonfinal_hash_op(const char *op, int err,
860 1074
861static int test_hash_vec_cfg(const char *driver, 1075static int test_hash_vec_cfg(const char *driver,
862 const struct hash_testvec *vec, 1076 const struct hash_testvec *vec,
863 unsigned int vec_num, 1077 const char *vec_name,
864 const struct testvec_config *cfg, 1078 const struct testvec_config *cfg,
865 struct ahash_request *req, 1079 struct ahash_request *req,
866 struct test_sglist *tsgl, 1080 struct test_sglist *tsgl,
@@ -885,11 +1099,18 @@ static int test_hash_vec_cfg(const char *driver,
885 if (vec->ksize) { 1099 if (vec->ksize) {
886 err = crypto_ahash_setkey(tfm, vec->key, vec->ksize); 1100 err = crypto_ahash_setkey(tfm, vec->key, vec->ksize);
887 if (err) { 1101 if (err) {
888 pr_err("alg: hash: %s setkey failed with err %d on test vector %u; flags=%#x\n", 1102 if (err == vec->setkey_error)
889 driver, err, vec_num, 1103 return 0;
1104 pr_err("alg: hash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
1105 driver, vec_name, vec->setkey_error, err,
890 crypto_ahash_get_flags(tfm)); 1106 crypto_ahash_get_flags(tfm));
891 return err; 1107 return err;
892 } 1108 }
1109 if (vec->setkey_error) {
1110 pr_err("alg: hash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
1111 driver, vec_name, vec->setkey_error);
1112 return -EINVAL;
1113 }
893 } 1114 }
894 1115
895 /* Build the scatterlist for the source data */ 1116 /* Build the scatterlist for the source data */
@@ -899,8 +1120,8 @@ static int test_hash_vec_cfg(const char *driver,
899 err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize, 1120 err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
900 &input, divs); 1121 &input, divs);
901 if (err) { 1122 if (err) {
902 pr_err("alg: hash: %s: error preparing scatterlist for test vector %u, cfg=\"%s\"\n", 1123 pr_err("alg: hash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
903 driver, vec_num, cfg->name); 1124 driver, vec_name, cfg->name);
904 return err; 1125 return err;
905 } 1126 }
906 1127
@@ -909,17 +1130,26 @@ static int test_hash_vec_cfg(const char *driver,
909 testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); 1130 testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
910 testmgr_poison(result, digestsize + TESTMGR_POISON_LEN); 1131 testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
911 1132
912 if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST) { 1133 if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
1134 vec->digest_error) {
913 /* Just using digest() */ 1135 /* Just using digest() */
914 ahash_request_set_callback(req, req_flags, crypto_req_done, 1136 ahash_request_set_callback(req, req_flags, crypto_req_done,
915 &wait); 1137 &wait);
916 ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize); 1138 ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
917 err = crypto_wait_req(crypto_ahash_digest(req), &wait); 1139 err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd);
918 if (err) { 1140 if (err) {
919 pr_err("alg: hash: %s digest() failed with err %d on test vector %u, cfg=\"%s\"\n", 1141 if (err == vec->digest_error)
920 driver, err, vec_num, cfg->name); 1142 return 0;
1143 pr_err("alg: hash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
1144 driver, vec_name, vec->digest_error, err,
1145 cfg->name);
921 return err; 1146 return err;
922 } 1147 }
1148 if (vec->digest_error) {
1149 pr_err("alg: hash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
1150 driver, vec_name, vec->digest_error, cfg->name);
1151 return -EINVAL;
1152 }
923 goto result_ready; 1153 goto result_ready;
924 } 1154 }
925 1155
@@ -927,9 +1157,9 @@ static int test_hash_vec_cfg(const char *driver,
927 1157
928 ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); 1158 ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
929 ahash_request_set_crypt(req, NULL, result, 0); 1159 ahash_request_set_crypt(req, NULL, result, 0);
930 err = crypto_wait_req(crypto_ahash_init(req), &wait); 1160 err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
931 err = check_nonfinal_hash_op("init", err, result, digestsize, 1161 err = check_nonfinal_hash_op("init", err, result, digestsize,
932 driver, vec_num, cfg); 1162 driver, vec_name, cfg);
933 if (err) 1163 if (err)
934 return err; 1164 return err;
935 1165
@@ -943,10 +1173,11 @@ static int test_hash_vec_cfg(const char *driver,
943 crypto_req_done, &wait); 1173 crypto_req_done, &wait);
944 ahash_request_set_crypt(req, pending_sgl, result, 1174 ahash_request_set_crypt(req, pending_sgl, result,
945 pending_len); 1175 pending_len);
946 err = crypto_wait_req(crypto_ahash_update(req), &wait); 1176 err = do_ahash_op(crypto_ahash_update, req, &wait,
1177 divs[i]->nosimd);
947 err = check_nonfinal_hash_op("update", err, 1178 err = check_nonfinal_hash_op("update", err,
948 result, digestsize, 1179 result, digestsize,
949 driver, vec_num, cfg); 1180 driver, vec_name, cfg);
950 if (err) 1181 if (err)
951 return err; 1182 return err;
952 pending_sgl = NULL; 1183 pending_sgl = NULL;
@@ -959,13 +1190,13 @@ static int test_hash_vec_cfg(const char *driver,
959 err = crypto_ahash_export(req, hashstate); 1190 err = crypto_ahash_export(req, hashstate);
960 err = check_nonfinal_hash_op("export", err, 1191 err = check_nonfinal_hash_op("export", err,
961 result, digestsize, 1192 result, digestsize,
962 driver, vec_num, cfg); 1193 driver, vec_name, cfg);
963 if (err) 1194 if (err)
964 return err; 1195 return err;
965 if (!testmgr_is_poison(hashstate + statesize, 1196 if (!testmgr_is_poison(hashstate + statesize,
966 TESTMGR_POISON_LEN)) { 1197 TESTMGR_POISON_LEN)) {
967 pr_err("alg: hash: %s export() overran state buffer on test vector %u, cfg=\"%s\"\n", 1198 pr_err("alg: hash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
968 driver, vec_num, cfg->name); 1199 driver, vec_name, cfg->name);
969 return -EOVERFLOW; 1200 return -EOVERFLOW;
970 } 1201 }
971 1202
@@ -973,7 +1204,7 @@ static int test_hash_vec_cfg(const char *driver,
973 err = crypto_ahash_import(req, hashstate); 1204 err = crypto_ahash_import(req, hashstate);
974 err = check_nonfinal_hash_op("import", err, 1205 err = check_nonfinal_hash_op("import", err,
975 result, digestsize, 1206 result, digestsize,
976 driver, vec_num, cfg); 1207 driver, vec_name, cfg);
977 if (err) 1208 if (err)
978 return err; 1209 return err;
979 } 1210 }
@@ -986,23 +1217,23 @@ static int test_hash_vec_cfg(const char *driver,
986 ahash_request_set_crypt(req, pending_sgl, result, pending_len); 1217 ahash_request_set_crypt(req, pending_sgl, result, pending_len);
987 if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) { 1218 if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
988 /* finish with update() and final() */ 1219 /* finish with update() and final() */
989 err = crypto_wait_req(crypto_ahash_update(req), &wait); 1220 err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
990 err = check_nonfinal_hash_op("update", err, result, digestsize, 1221 err = check_nonfinal_hash_op("update", err, result, digestsize,
991 driver, vec_num, cfg); 1222 driver, vec_name, cfg);
992 if (err) 1223 if (err)
993 return err; 1224 return err;
994 err = crypto_wait_req(crypto_ahash_final(req), &wait); 1225 err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
995 if (err) { 1226 if (err) {
996 pr_err("alg: hash: %s final() failed with err %d on test vector %u, cfg=\"%s\"\n", 1227 pr_err("alg: hash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
997 driver, err, vec_num, cfg->name); 1228 driver, err, vec_name, cfg->name);
998 return err; 1229 return err;
999 } 1230 }
1000 } else { 1231 } else {
1001 /* finish with finup() */ 1232 /* finish with finup() */
1002 err = crypto_wait_req(crypto_ahash_finup(req), &wait); 1233 err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
1003 if (err) { 1234 if (err) {
1004 pr_err("alg: hash: %s finup() failed with err %d on test vector %u, cfg=\"%s\"\n", 1235 pr_err("alg: hash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
1005 driver, err, vec_num, cfg->name); 1236 driver, err, vec_name, cfg->name);
1006 return err; 1237 return err;
1007 } 1238 }
1008 } 1239 }
@@ -1010,13 +1241,13 @@ static int test_hash_vec_cfg(const char *driver,
1010result_ready: 1241result_ready:
1011 /* Check that the algorithm produced the correct digest */ 1242 /* Check that the algorithm produced the correct digest */
1012 if (memcmp(result, vec->digest, digestsize) != 0) { 1243 if (memcmp(result, vec->digest, digestsize) != 0) {
1013 pr_err("alg: hash: %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n", 1244 pr_err("alg: hash: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
1014 driver, vec_num, cfg->name); 1245 driver, vec_name, cfg->name);
1015 return -EINVAL; 1246 return -EINVAL;
1016 } 1247 }
1017 if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) { 1248 if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
1018 pr_err("alg: hash: %s overran result buffer on test vector %u, cfg=\"%s\"\n", 1249 pr_err("alg: hash: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
1019 driver, vec_num, cfg->name); 1250 driver, vec_name, cfg->name);
1020 return -EOVERFLOW; 1251 return -EOVERFLOW;
1021 } 1252 }
1022 1253
@@ -1027,11 +1258,14 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
1027 unsigned int vec_num, struct ahash_request *req, 1258 unsigned int vec_num, struct ahash_request *req,
1028 struct test_sglist *tsgl, u8 *hashstate) 1259 struct test_sglist *tsgl, u8 *hashstate)
1029{ 1260{
1261 char vec_name[16];
1030 unsigned int i; 1262 unsigned int i;
1031 int err; 1263 int err;
1032 1264
1265 sprintf(vec_name, "%u", vec_num);
1266
1033 for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) { 1267 for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
1034 err = test_hash_vec_cfg(driver, vec, vec_num, 1268 err = test_hash_vec_cfg(driver, vec, vec_name,
1035 &default_hash_testvec_configs[i], 1269 &default_hash_testvec_configs[i],
1036 req, tsgl, hashstate); 1270 req, tsgl, hashstate);
1037 if (err) 1271 if (err)
@@ -1046,7 +1280,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
1046 for (i = 0; i < fuzz_iterations; i++) { 1280 for (i = 0; i < fuzz_iterations; i++) {
1047 generate_random_testvec_config(&cfg, cfgname, 1281 generate_random_testvec_config(&cfg, cfgname,
1048 sizeof(cfgname)); 1282 sizeof(cfgname));
1049 err = test_hash_vec_cfg(driver, vec, vec_num, &cfg, 1283 err = test_hash_vec_cfg(driver, vec, vec_name, &cfg,
1050 req, tsgl, hashstate); 1284 req, tsgl, hashstate);
1051 if (err) 1285 if (err)
1052 return err; 1286 return err;
@@ -1056,9 +1290,168 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
1056 return 0; 1290 return 0;
1057} 1291}
1058 1292
1293#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
1294/*
1295 * Generate a hash test vector from the given implementation.
1296 * Assumes the buffers in 'vec' were already allocated.
1297 */
1298static void generate_random_hash_testvec(struct crypto_shash *tfm,
1299 struct hash_testvec *vec,
1300 unsigned int maxkeysize,
1301 unsigned int maxdatasize,
1302 char *name, size_t max_namelen)
1303{
1304 SHASH_DESC_ON_STACK(desc, tfm);
1305
1306 /* Data */
1307 vec->psize = generate_random_length(maxdatasize);
1308 generate_random_bytes((u8 *)vec->plaintext, vec->psize);
1309
1310 /*
1311 * Key: length in range [1, maxkeysize], but usually choose maxkeysize.
1312 * If algorithm is unkeyed, then maxkeysize == 0 and set ksize = 0.
1313 */
1314 vec->setkey_error = 0;
1315 vec->ksize = 0;
1316 if (maxkeysize) {
1317 vec->ksize = maxkeysize;
1318 if (prandom_u32() % 4 == 0)
1319 vec->ksize = 1 + (prandom_u32() % maxkeysize);
1320 generate_random_bytes((u8 *)vec->key, vec->ksize);
1321
1322 vec->setkey_error = crypto_shash_setkey(tfm, vec->key,
1323 vec->ksize);
1324 /* If the key couldn't be set, no need to continue to digest. */
1325 if (vec->setkey_error)
1326 goto done;
1327 }
1328
1329 /* Digest */
1330 desc->tfm = tfm;
1331 vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
1332 vec->psize, (u8 *)vec->digest);
1333done:
1334 snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
1335 vec->psize, vec->ksize);
1336}
1337
1338/*
1339 * Test the hash algorithm represented by @req against the corresponding generic
1340 * implementation, if one is available.
1341 */
1342static int test_hash_vs_generic_impl(const char *driver,
1343 const char *generic_driver,
1344 unsigned int maxkeysize,
1345 struct ahash_request *req,
1346 struct test_sglist *tsgl,
1347 u8 *hashstate)
1348{
1349 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1350 const unsigned int digestsize = crypto_ahash_digestsize(tfm);
1351 const unsigned int blocksize = crypto_ahash_blocksize(tfm);
1352 const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
1353 const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
1354 char _generic_driver[CRYPTO_MAX_ALG_NAME];
1355 struct crypto_shash *generic_tfm = NULL;
1356 unsigned int i;
1357 struct hash_testvec vec = { 0 };
1358 char vec_name[64];
1359 struct testvec_config cfg;
1360 char cfgname[TESTVEC_CONFIG_NAMELEN];
1361 int err;
1362
1363 if (noextratests)
1364 return 0;
1365
1366 if (!generic_driver) { /* Use default naming convention? */
1367 err = build_generic_driver_name(algname, _generic_driver);
1368 if (err)
1369 return err;
1370 generic_driver = _generic_driver;
1371 }
1372
1373 if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
1374 return 0;
1375
1376 generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
1377 if (IS_ERR(generic_tfm)) {
1378 err = PTR_ERR(generic_tfm);
1379 if (err == -ENOENT) {
1380 pr_warn("alg: hash: skipping comparison tests for %s because %s is unavailable\n",
1381 driver, generic_driver);
1382 return 0;
1383 }
1384 pr_err("alg: hash: error allocating %s (generic impl of %s): %d\n",
1385 generic_driver, algname, err);
1386 return err;
1387 }
1388
1389 /* Check the algorithm properties for consistency. */
1390
1391 if (digestsize != crypto_shash_digestsize(generic_tfm)) {
1392 pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
1393 driver, digestsize,
1394 crypto_shash_digestsize(generic_tfm));
1395 err = -EINVAL;
1396 goto out;
1397 }
1398
1399 if (blocksize != crypto_shash_blocksize(generic_tfm)) {
1400 pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
1401 driver, blocksize, crypto_shash_blocksize(generic_tfm));
1402 err = -EINVAL;
1403 goto out;
1404 }
1405
1406 /*
1407 * Now generate test vectors using the generic implementation, and test
1408 * the other implementation against them.
1409 */
1410
1411 vec.key = kmalloc(maxkeysize, GFP_KERNEL);
1412 vec.plaintext = kmalloc(maxdatasize, GFP_KERNEL);
1413 vec.digest = kmalloc(digestsize, GFP_KERNEL);
1414 if (!vec.key || !vec.plaintext || !vec.digest) {
1415 err = -ENOMEM;
1416 goto out;
1417 }
1418
1419 for (i = 0; i < fuzz_iterations * 8; i++) {
1420 generate_random_hash_testvec(generic_tfm, &vec,
1421 maxkeysize, maxdatasize,
1422 vec_name, sizeof(vec_name));
1423 generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
1424
1425 err = test_hash_vec_cfg(driver, &vec, vec_name, &cfg,
1426 req, tsgl, hashstate);
1427 if (err)
1428 goto out;
1429 cond_resched();
1430 }
1431 err = 0;
1432out:
1433 kfree(vec.key);
1434 kfree(vec.plaintext);
1435 kfree(vec.digest);
1436 crypto_free_shash(generic_tfm);
1437 return err;
1438}
1439#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
1440static int test_hash_vs_generic_impl(const char *driver,
1441 const char *generic_driver,
1442 unsigned int maxkeysize,
1443 struct ahash_request *req,
1444 struct test_sglist *tsgl,
1445 u8 *hashstate)
1446{
1447 return 0;
1448}
1449#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
1450
1059static int __alg_test_hash(const struct hash_testvec *vecs, 1451static int __alg_test_hash(const struct hash_testvec *vecs,
1060 unsigned int num_vecs, const char *driver, 1452 unsigned int num_vecs, const char *driver,
1061 u32 type, u32 mask) 1453 u32 type, u32 mask,
1454 const char *generic_driver, unsigned int maxkeysize)
1062{ 1455{
1063 struct crypto_ahash *tfm; 1456 struct crypto_ahash *tfm;
1064 struct ahash_request *req = NULL; 1457 struct ahash_request *req = NULL;
@@ -1106,7 +1499,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
1106 if (err) 1499 if (err)
1107 goto out; 1500 goto out;
1108 } 1501 }
1109 err = 0; 1502 err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
1503 tsgl, hashstate);
1110out: 1504out:
1111 kfree(hashstate); 1505 kfree(hashstate);
1112 if (tsgl) { 1506 if (tsgl) {
@@ -1124,6 +1518,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
1124 const struct hash_testvec *template = desc->suite.hash.vecs; 1518 const struct hash_testvec *template = desc->suite.hash.vecs;
1125 unsigned int tcount = desc->suite.hash.count; 1519 unsigned int tcount = desc->suite.hash.count;
1126 unsigned int nr_unkeyed, nr_keyed; 1520 unsigned int nr_unkeyed, nr_keyed;
1521 unsigned int maxkeysize = 0;
1127 int err; 1522 int err;
1128 1523
1129 /* 1524 /*
@@ -1142,23 +1537,27 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
1142 "unkeyed ones must come first\n", desc->alg); 1537 "unkeyed ones must come first\n", desc->alg);
1143 return -EINVAL; 1538 return -EINVAL;
1144 } 1539 }
1540 maxkeysize = max_t(unsigned int, maxkeysize,
1541 template[nr_unkeyed + nr_keyed].ksize);
1145 } 1542 }
1146 1543
1147 err = 0; 1544 err = 0;
1148 if (nr_unkeyed) { 1545 if (nr_unkeyed) {
1149 err = __alg_test_hash(template, nr_unkeyed, driver, type, mask); 1546 err = __alg_test_hash(template, nr_unkeyed, driver, type, mask,
1547 desc->generic_driver, maxkeysize);
1150 template += nr_unkeyed; 1548 template += nr_unkeyed;
1151 } 1549 }
1152 1550
1153 if (!err && nr_keyed) 1551 if (!err && nr_keyed)
1154 err = __alg_test_hash(template, nr_keyed, driver, type, mask); 1552 err = __alg_test_hash(template, nr_keyed, driver, type, mask,
1553 desc->generic_driver, maxkeysize);
1155 1554
1156 return err; 1555 return err;
1157} 1556}
1158 1557
1159static int test_aead_vec_cfg(const char *driver, int enc, 1558static int test_aead_vec_cfg(const char *driver, int enc,
1160 const struct aead_testvec *vec, 1559 const struct aead_testvec *vec,
1161 unsigned int vec_num, 1560 const char *vec_name,
1162 const struct testvec_config *cfg, 1561 const struct testvec_config *cfg,
1163 struct aead_request *req, 1562 struct aead_request *req,
1164 struct cipher_test_sglists *tsgls) 1563 struct cipher_test_sglists *tsgls)
@@ -1175,6 +1574,7 @@ static int test_aead_vec_cfg(const char *driver, int enc,
1175 cfg->iv_offset + 1574 cfg->iv_offset +
1176 (cfg->iv_offset_relative_to_alignmask ? alignmask : 0); 1575 (cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
1177 struct kvec input[2]; 1576 struct kvec input[2];
1577 int expected_error;
1178 int err; 1578 int err;
1179 1579
1180 /* Set the key */ 1580 /* Set the key */
@@ -1183,26 +1583,33 @@ static int test_aead_vec_cfg(const char *driver, int enc,
1183 else 1583 else
1184 crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); 1584 crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
1185 err = crypto_aead_setkey(tfm, vec->key, vec->klen); 1585 err = crypto_aead_setkey(tfm, vec->key, vec->klen);
1186 if (err) { 1586 if (err && err != vec->setkey_error) {
1187 if (vec->fail) /* expectedly failed to set key? */ 1587 pr_err("alg: aead: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
1188 return 0; 1588 driver, vec_name, vec->setkey_error, err,
1189 pr_err("alg: aead: %s setkey failed with err %d on test vector %u; flags=%#x\n", 1589 crypto_aead_get_flags(tfm));
1190 driver, err, vec_num, crypto_aead_get_flags(tfm));
1191 return err; 1590 return err;
1192 } 1591 }
1193 if (vec->fail) { 1592 if (!err && vec->setkey_error) {
1194 pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %u\n", 1593 pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
1195 driver, vec_num); 1594 driver, vec_name, vec->setkey_error);
1196 return -EINVAL; 1595 return -EINVAL;
1197 } 1596 }
1198 1597
1199 /* Set the authentication tag size */ 1598 /* Set the authentication tag size */
1200 err = crypto_aead_setauthsize(tfm, authsize); 1599 err = crypto_aead_setauthsize(tfm, authsize);
1201 if (err) { 1600 if (err && err != vec->setauthsize_error) {
1202 pr_err("alg: aead: %s setauthsize failed with err %d on test vector %u\n", 1601 pr_err("alg: aead: %s setauthsize failed on test vector %s; expected_error=%d, actual_error=%d\n",
1203 driver, err, vec_num); 1602 driver, vec_name, vec->setauthsize_error, err);
1204 return err; 1603 return err;
1205 } 1604 }
1605 if (!err && vec->setauthsize_error) {
1606 pr_err("alg: aead: %s setauthsize unexpectedly succeeded on test vector %s; expected_error=%d\n",
1607 driver, vec_name, vec->setauthsize_error);
1608 return -EINVAL;
1609 }
1610
1611 if (vec->setkey_error || vec->setauthsize_error)
1612 return 0;
1206 1613
1207 /* The IV must be copied to a buffer, as the algorithm may modify it */ 1614 /* The IV must be copied to a buffer, as the algorithm may modify it */
1208 if (WARN_ON(ivsize > MAX_IVLEN)) 1615 if (WARN_ON(ivsize > MAX_IVLEN))
@@ -1224,8 +1631,8 @@ static int test_aead_vec_cfg(const char *driver, int enc,
1224 vec->plen), 1631 vec->plen),
1225 input, 2); 1632 input, 2);
1226 if (err) { 1633 if (err) {
1227 pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n", 1634 pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
1228 driver, op, vec_num, cfg->name); 1635 driver, op, vec_name, cfg->name);
1229 return err; 1636 return err;
1230 } 1637 }
1231 1638
@@ -1235,23 +1642,12 @@ static int test_aead_vec_cfg(const char *driver, int enc,
1235 aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr, 1642 aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
1236 enc ? vec->plen : vec->clen, iv); 1643 enc ? vec->plen : vec->clen, iv);
1237 aead_request_set_ad(req, vec->alen); 1644 aead_request_set_ad(req, vec->alen);
1238 err = crypto_wait_req(enc ? crypto_aead_encrypt(req) : 1645 if (cfg->nosimd)
1239 crypto_aead_decrypt(req), &wait); 1646 crypto_disable_simd_for_test();
1240 1647 err = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1241 aead_request_set_tfm(req, tfm); /* TODO: get rid of this */ 1648 if (cfg->nosimd)
1242 1649 crypto_reenable_simd_for_test();
1243 if (err) { 1650 err = crypto_wait_req(err, &wait);
1244 if (err == -EBADMSG && vec->novrfy)
1245 return 0;
1246 pr_err("alg: aead: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
1247 driver, op, err, vec_num, cfg->name);
1248 return err;
1249 }
1250 if (vec->novrfy) {
1251 pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %u, cfg=\"%s\"\n",
1252 driver, op, vec_num, cfg->name);
1253 return -EINVAL;
1254 }
1255 1651
1256 /* Check that the algorithm didn't overwrite things it shouldn't have */ 1652 /* Check that the algorithm didn't overwrite things it shouldn't have */
1257 if (req->cryptlen != (enc ? vec->plen : vec->clen) || 1653 if (req->cryptlen != (enc ? vec->plen : vec->clen) ||
@@ -1263,8 +1659,8 @@ static int test_aead_vec_cfg(const char *driver, int enc,
1263 req->base.complete != crypto_req_done || 1659 req->base.complete != crypto_req_done ||
1264 req->base.flags != req_flags || 1660 req->base.flags != req_flags ||
1265 req->base.data != &wait) { 1661 req->base.data != &wait) {
1266 pr_err("alg: aead: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n", 1662 pr_err("alg: aead: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
1267 driver, op, vec_num, cfg->name); 1663 driver, op, vec_name, cfg->name);
1268 if (req->cryptlen != (enc ? vec->plen : vec->clen)) 1664 if (req->cryptlen != (enc ? vec->plen : vec->clen))
1269 pr_err("alg: aead: changed 'req->cryptlen'\n"); 1665 pr_err("alg: aead: changed 'req->cryptlen'\n");
1270 if (req->assoclen != vec->alen) 1666 if (req->assoclen != vec->alen)
@@ -1286,14 +1682,29 @@ static int test_aead_vec_cfg(const char *driver, int enc,
1286 return -EINVAL; 1682 return -EINVAL;
1287 } 1683 }
1288 if (is_test_sglist_corrupted(&tsgls->src)) { 1684 if (is_test_sglist_corrupted(&tsgls->src)) {
1289 pr_err("alg: aead: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n", 1685 pr_err("alg: aead: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
1290 driver, op, vec_num, cfg->name); 1686 driver, op, vec_name, cfg->name);
1291 return -EINVAL; 1687 return -EINVAL;
1292 } 1688 }
1293 if (tsgls->dst.sgl_ptr != tsgls->src.sgl && 1689 if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
1294 is_test_sglist_corrupted(&tsgls->dst)) { 1690 is_test_sglist_corrupted(&tsgls->dst)) {
1295 pr_err("alg: aead: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n", 1691 pr_err("alg: aead: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
1296 driver, op, vec_num, cfg->name); 1692 driver, op, vec_name, cfg->name);
1693 return -EINVAL;
1694 }
1695
1696 /* Check for success or failure */
1697 expected_error = vec->novrfy ? -EBADMSG : vec->crypt_error;
1698 if (err) {
1699 if (err == expected_error)
1700 return 0;
1701 pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
1702 driver, op, vec_name, expected_error, err, cfg->name);
1703 return err;
1704 }
1705 if (expected_error) {
1706 pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
1707 driver, op, vec_name, expected_error, cfg->name);
1297 return -EINVAL; 1708 return -EINVAL;
1298 } 1709 }
1299 1710
@@ -1302,13 +1713,13 @@ static int test_aead_vec_cfg(const char *driver, int enc,
1302 enc ? vec->clen : vec->plen, 1713 enc ? vec->clen : vec->plen,
1303 vec->alen, enc || !cfg->inplace); 1714 vec->alen, enc || !cfg->inplace);
1304 if (err == -EOVERFLOW) { 1715 if (err == -EOVERFLOW) {
1305 pr_err("alg: aead: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n", 1716 pr_err("alg: aead: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
1306 driver, op, vec_num, cfg->name); 1717 driver, op, vec_name, cfg->name);
1307 return err; 1718 return err;
1308 } 1719 }
1309 if (err) { 1720 if (err) {
1310 pr_err("alg: aead: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n", 1721 pr_err("alg: aead: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
1311 driver, op, vec_num, cfg->name); 1722 driver, op, vec_name, cfg->name);
1312 return err; 1723 return err;
1313 } 1724 }
1314 1725
@@ -1320,14 +1731,17 @@ static int test_aead_vec(const char *driver, int enc,
1320 struct aead_request *req, 1731 struct aead_request *req,
1321 struct cipher_test_sglists *tsgls) 1732 struct cipher_test_sglists *tsgls)
1322{ 1733{
1734 char vec_name[16];
1323 unsigned int i; 1735 unsigned int i;
1324 int err; 1736 int err;
1325 1737
1326 if (enc && vec->novrfy) 1738 if (enc && vec->novrfy)
1327 return 0; 1739 return 0;
1328 1740
1741 sprintf(vec_name, "%u", vec_num);
1742
1329 for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) { 1743 for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
1330 err = test_aead_vec_cfg(driver, enc, vec, vec_num, 1744 err = test_aead_vec_cfg(driver, enc, vec, vec_name,
1331 &default_cipher_testvec_configs[i], 1745 &default_cipher_testvec_configs[i],
1332 req, tsgls); 1746 req, tsgls);
1333 if (err) 1747 if (err)
@@ -1342,7 +1756,7 @@ static int test_aead_vec(const char *driver, int enc,
1342 for (i = 0; i < fuzz_iterations; i++) { 1756 for (i = 0; i < fuzz_iterations; i++) {
1343 generate_random_testvec_config(&cfg, cfgname, 1757 generate_random_testvec_config(&cfg, cfgname,
1344 sizeof(cfgname)); 1758 sizeof(cfgname));
1345 err = test_aead_vec_cfg(driver, enc, vec, vec_num, 1759 err = test_aead_vec_cfg(driver, enc, vec, vec_name,
1346 &cfg, req, tsgls); 1760 &cfg, req, tsgls);
1347 if (err) 1761 if (err)
1348 return err; 1762 return err;
@@ -1352,6 +1766,226 @@ static int test_aead_vec(const char *driver, int enc,
1352 return 0; 1766 return 0;
1353} 1767}
1354 1768
1769#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
1770/*
1771 * Generate an AEAD test vector from the given implementation.
1772 * Assumes the buffers in 'vec' were already allocated.
1773 */
1774static void generate_random_aead_testvec(struct aead_request *req,
1775 struct aead_testvec *vec,
1776 unsigned int maxkeysize,
1777 unsigned int maxdatasize,
1778 char *name, size_t max_namelen)
1779{
1780 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1781 const unsigned int ivsize = crypto_aead_ivsize(tfm);
1782 unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
1783 unsigned int authsize;
1784 unsigned int total_len;
1785 int i;
1786 struct scatterlist src[2], dst;
1787 u8 iv[MAX_IVLEN];
1788 DECLARE_CRYPTO_WAIT(wait);
1789
1790 /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
1791 vec->klen = maxkeysize;
1792 if (prandom_u32() % 4 == 0)
1793 vec->klen = prandom_u32() % (maxkeysize + 1);
1794 generate_random_bytes((u8 *)vec->key, vec->klen);
1795 vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
1796
1797 /* IV */
1798 generate_random_bytes((u8 *)vec->iv, ivsize);
1799
1800 /* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
1801 authsize = maxauthsize;
1802 if (prandom_u32() % 4 == 0)
1803 authsize = prandom_u32() % (maxauthsize + 1);
1804 if (WARN_ON(authsize > maxdatasize))
1805 authsize = maxdatasize;
1806 maxdatasize -= authsize;
1807 vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
1808
1809 /* Plaintext and associated data */
1810 total_len = generate_random_length(maxdatasize);
1811 if (prandom_u32() % 4 == 0)
1812 vec->alen = 0;
1813 else
1814 vec->alen = generate_random_length(total_len);
1815 vec->plen = total_len - vec->alen;
1816 generate_random_bytes((u8 *)vec->assoc, vec->alen);
1817 generate_random_bytes((u8 *)vec->ptext, vec->plen);
1818
1819 vec->clen = vec->plen + authsize;
1820
1821 /*
1822 * If the key or authentication tag size couldn't be set, no need to
1823 * continue to encrypt.
1824 */
1825 if (vec->setkey_error || vec->setauthsize_error)
1826 goto done;
1827
1828 /* Ciphertext */
1829 sg_init_table(src, 2);
1830 i = 0;
1831 if (vec->alen)
1832 sg_set_buf(&src[i++], vec->assoc, vec->alen);
1833 if (vec->plen)
1834 sg_set_buf(&src[i++], vec->ptext, vec->plen);
1835 sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
1836 memcpy(iv, vec->iv, ivsize);
1837 aead_request_set_callback(req, 0, crypto_req_done, &wait);
1838 aead_request_set_crypt(req, src, &dst, vec->plen, iv);
1839 aead_request_set_ad(req, vec->alen);
1840 vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req), &wait);
1841 if (vec->crypt_error == 0)
1842 memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen);
1843done:
1844 snprintf(name, max_namelen,
1845 "\"random: alen=%u plen=%u authsize=%u klen=%u\"",
1846 vec->alen, vec->plen, authsize, vec->klen);
1847}
1848
1849/*
1850 * Test the AEAD algorithm represented by @req against the corresponding generic
1851 * implementation, if one is available.
1852 */
1853static int test_aead_vs_generic_impl(const char *driver,
1854 const struct alg_test_desc *test_desc,
1855 struct aead_request *req,
1856 struct cipher_test_sglists *tsgls)
1857{
1858 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1859 const unsigned int ivsize = crypto_aead_ivsize(tfm);
1860 const unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
1861 const unsigned int blocksize = crypto_aead_blocksize(tfm);
1862 const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
1863 const char *algname = crypto_aead_alg(tfm)->base.cra_name;
1864 const char *generic_driver = test_desc->generic_driver;
1865 char _generic_driver[CRYPTO_MAX_ALG_NAME];
1866 struct crypto_aead *generic_tfm = NULL;
1867 struct aead_request *generic_req = NULL;
1868 unsigned int maxkeysize;
1869 unsigned int i;
1870 struct aead_testvec vec = { 0 };
1871 char vec_name[64];
1872 struct testvec_config cfg;
1873 char cfgname[TESTVEC_CONFIG_NAMELEN];
1874 int err;
1875
1876 if (noextratests)
1877 return 0;
1878
1879 if (!generic_driver) { /* Use default naming convention? */
1880 err = build_generic_driver_name(algname, _generic_driver);
1881 if (err)
1882 return err;
1883 generic_driver = _generic_driver;
1884 }
1885
1886 if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
1887 return 0;
1888
1889 generic_tfm = crypto_alloc_aead(generic_driver, 0, 0);
1890 if (IS_ERR(generic_tfm)) {
1891 err = PTR_ERR(generic_tfm);
1892 if (err == -ENOENT) {
1893 pr_warn("alg: aead: skipping comparison tests for %s because %s is unavailable\n",
1894 driver, generic_driver);
1895 return 0;
1896 }
1897 pr_err("alg: aead: error allocating %s (generic impl of %s): %d\n",
1898 generic_driver, algname, err);
1899 return err;
1900 }
1901
1902 generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
1903 if (!generic_req) {
1904 err = -ENOMEM;
1905 goto out;
1906 }
1907
1908 /* Check the algorithm properties for consistency. */
1909
1910 if (maxauthsize != crypto_aead_alg(generic_tfm)->maxauthsize) {
1911 pr_err("alg: aead: maxauthsize for %s (%u) doesn't match generic impl (%u)\n",
1912 driver, maxauthsize,
1913 crypto_aead_alg(generic_tfm)->maxauthsize);
1914 err = -EINVAL;
1915 goto out;
1916 }
1917
1918 if (ivsize != crypto_aead_ivsize(generic_tfm)) {
1919 pr_err("alg: aead: ivsize for %s (%u) doesn't match generic impl (%u)\n",
1920 driver, ivsize, crypto_aead_ivsize(generic_tfm));
1921 err = -EINVAL;
1922 goto out;
1923 }
1924
1925 if (blocksize != crypto_aead_blocksize(generic_tfm)) {
1926 pr_err("alg: aead: blocksize for %s (%u) doesn't match generic impl (%u)\n",
1927 driver, blocksize, crypto_aead_blocksize(generic_tfm));
1928 err = -EINVAL;
1929 goto out;
1930 }
1931
1932 /*
1933 * Now generate test vectors using the generic implementation, and test
1934 * the other implementation against them.
1935 */
1936
1937 maxkeysize = 0;
1938 for (i = 0; i < test_desc->suite.aead.count; i++)
1939 maxkeysize = max_t(unsigned int, maxkeysize,
1940 test_desc->suite.aead.vecs[i].klen);
1941
1942 vec.key = kmalloc(maxkeysize, GFP_KERNEL);
1943 vec.iv = kmalloc(ivsize, GFP_KERNEL);
1944 vec.assoc = kmalloc(maxdatasize, GFP_KERNEL);
1945 vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
1946 vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
1947 if (!vec.key || !vec.iv || !vec.assoc || !vec.ptext || !vec.ctext) {
1948 err = -ENOMEM;
1949 goto out;
1950 }
1951
1952 for (i = 0; i < fuzz_iterations * 8; i++) {
1953 generate_random_aead_testvec(generic_req, &vec,
1954 maxkeysize, maxdatasize,
1955 vec_name, sizeof(vec_name));
1956 generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
1957
1958 err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, &cfg,
1959 req, tsgls);
1960 if (err)
1961 goto out;
1962 err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, &cfg,
1963 req, tsgls);
1964 if (err)
1965 goto out;
1966 cond_resched();
1967 }
1968 err = 0;
1969out:
1970 kfree(vec.key);
1971 kfree(vec.iv);
1972 kfree(vec.assoc);
1973 kfree(vec.ptext);
1974 kfree(vec.ctext);
1975 crypto_free_aead(generic_tfm);
1976 aead_request_free(generic_req);
1977 return err;
1978}
1979#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
1980static int test_aead_vs_generic_impl(const char *driver,
1981 const struct alg_test_desc *test_desc,
1982 struct aead_request *req,
1983 struct cipher_test_sglists *tsgls)
1984{
1985 return 0;
1986}
1987#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
1988
1355static int test_aead(const char *driver, int enc, 1989static int test_aead(const char *driver, int enc,
1356 const struct aead_test_suite *suite, 1990 const struct aead_test_suite *suite,
1357 struct aead_request *req, 1991 struct aead_request *req,
@@ -1411,6 +2045,10 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
1411 goto out; 2045 goto out;
1412 2046
1413 err = test_aead(driver, DECRYPT, suite, req, tsgls); 2047 err = test_aead(driver, DECRYPT, suite, req, tsgls);
2048 if (err)
2049 goto out;
2050
2051 err = test_aead_vs_generic_impl(driver, desc, req, tsgls);
1414out: 2052out:
1415 free_cipher_test_sglists(tsgls); 2053 free_cipher_test_sglists(tsgls);
1416 aead_request_free(req); 2054 aead_request_free(req);
@@ -1462,13 +2100,20 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
1462 2100
1463 ret = crypto_cipher_setkey(tfm, template[i].key, 2101 ret = crypto_cipher_setkey(tfm, template[i].key,
1464 template[i].klen); 2102 template[i].klen);
1465 if (template[i].fail == !ret) { 2103 if (ret) {
1466 printk(KERN_ERR "alg: cipher: setkey failed " 2104 if (ret == template[i].setkey_error)
1467 "on test %d for %s: flags=%x\n", j, 2105 continue;
1468 algo, crypto_cipher_get_flags(tfm)); 2106 pr_err("alg: cipher: %s setkey failed on test vector %u; expected_error=%d, actual_error=%d, flags=%#x\n",
2107 algo, j, template[i].setkey_error, ret,
2108 crypto_cipher_get_flags(tfm));
1469 goto out; 2109 goto out;
1470 } else if (ret) 2110 }
1471 continue; 2111 if (template[i].setkey_error) {
2112 pr_err("alg: cipher: %s setkey unexpectedly succeeded on test vector %u; expected_error=%d\n",
2113 algo, j, template[i].setkey_error);
2114 ret = -EINVAL;
2115 goto out;
2116 }
1472 2117
1473 for (k = 0; k < template[i].len; 2118 for (k = 0; k < template[i].len;
1474 k += crypto_cipher_blocksize(tfm)) { 2119 k += crypto_cipher_blocksize(tfm)) {
@@ -1500,7 +2145,7 @@ out_nobuf:
1500 2145
1501static int test_skcipher_vec_cfg(const char *driver, int enc, 2146static int test_skcipher_vec_cfg(const char *driver, int enc,
1502 const struct cipher_testvec *vec, 2147 const struct cipher_testvec *vec,
1503 unsigned int vec_num, 2148 const char *vec_name,
1504 const struct testvec_config *cfg, 2149 const struct testvec_config *cfg,
1505 struct skcipher_request *req, 2150 struct skcipher_request *req,
1506 struct cipher_test_sglists *tsgls) 2151 struct cipher_test_sglists *tsgls)
@@ -1526,15 +2171,16 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
1526 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); 2171 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
1527 err = crypto_skcipher_setkey(tfm, vec->key, vec->klen); 2172 err = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
1528 if (err) { 2173 if (err) {
1529 if (vec->fail) /* expectedly failed to set key? */ 2174 if (err == vec->setkey_error)
1530 return 0; 2175 return 0;
1531 pr_err("alg: skcipher: %s setkey failed with err %d on test vector %u; flags=%#x\n", 2176 pr_err("alg: skcipher: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
1532 driver, err, vec_num, crypto_skcipher_get_flags(tfm)); 2177 driver, vec_name, vec->setkey_error, err,
2178 crypto_skcipher_get_flags(tfm));
1533 return err; 2179 return err;
1534 } 2180 }
1535 if (vec->fail) { 2181 if (vec->setkey_error) {
1536 pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %u\n", 2182 pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
1537 driver, vec_num); 2183 driver, vec_name, vec->setkey_error);
1538 return -EINVAL; 2184 return -EINVAL;
1539 } 2185 }
1540 2186
@@ -1550,8 +2196,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
1550 memset(iv, 0, ivsize); 2196 memset(iv, 0, ivsize);
1551 } else { 2197 } else {
1552 if (vec->generates_iv) { 2198 if (vec->generates_iv) {
1553 pr_err("alg: skcipher: %s has ivsize=0 but test vector %u generates IV!\n", 2199 pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
1554 driver, vec_num); 2200 driver, vec_name);
1555 return -EINVAL; 2201 return -EINVAL;
1556 } 2202 }
1557 iv = NULL; 2203 iv = NULL;
@@ -1563,8 +2209,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
1563 err = build_cipher_test_sglists(tsgls, cfg, alignmask, 2209 err = build_cipher_test_sglists(tsgls, cfg, alignmask,
1564 vec->len, vec->len, &input, 1); 2210 vec->len, vec->len, &input, 1);
1565 if (err) { 2211 if (err) {
1566 pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n", 2212 pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
1567 driver, op, vec_num, cfg->name); 2213 driver, op, vec_name, cfg->name);
1568 return err; 2214 return err;
1569 } 2215 }
1570 2216
@@ -1573,13 +2219,12 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
1573 skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait); 2219 skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait);
1574 skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr, 2220 skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
1575 vec->len, iv); 2221 vec->len, iv);
1576 err = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : 2222 if (cfg->nosimd)
1577 crypto_skcipher_decrypt(req), &wait); 2223 crypto_disable_simd_for_test();
1578 if (err) { 2224 err = enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
1579 pr_err("alg: skcipher: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n", 2225 if (cfg->nosimd)
1580 driver, op, err, vec_num, cfg->name); 2226 crypto_reenable_simd_for_test();
1581 return err; 2227 err = crypto_wait_req(err, &wait);
1582 }
1583 2228
1584 /* Check that the algorithm didn't overwrite things it shouldn't have */ 2229 /* Check that the algorithm didn't overwrite things it shouldn't have */
1585 if (req->cryptlen != vec->len || 2230 if (req->cryptlen != vec->len ||
@@ -1590,8 +2235,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
1590 req->base.complete != crypto_req_done || 2235 req->base.complete != crypto_req_done ||
1591 req->base.flags != req_flags || 2236 req->base.flags != req_flags ||
1592 req->base.data != &wait) { 2237 req->base.data != &wait) {
1593 pr_err("alg: skcipher: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n", 2238 pr_err("alg: skcipher: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
1594 driver, op, vec_num, cfg->name); 2239 driver, op, vec_name, cfg->name);
1595 if (req->cryptlen != vec->len) 2240 if (req->cryptlen != vec->len)
1596 pr_err("alg: skcipher: changed 'req->cryptlen'\n"); 2241 pr_err("alg: skcipher: changed 'req->cryptlen'\n");
1597 if (req->iv != iv) 2242 if (req->iv != iv)
@@ -1611,14 +2256,28 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
1611 return -EINVAL; 2256 return -EINVAL;
1612 } 2257 }
1613 if (is_test_sglist_corrupted(&tsgls->src)) { 2258 if (is_test_sglist_corrupted(&tsgls->src)) {
1614 pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n", 2259 pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
1615 driver, op, vec_num, cfg->name); 2260 driver, op, vec_name, cfg->name);
1616 return -EINVAL; 2261 return -EINVAL;
1617 } 2262 }
1618 if (tsgls->dst.sgl_ptr != tsgls->src.sgl && 2263 if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
1619 is_test_sglist_corrupted(&tsgls->dst)) { 2264 is_test_sglist_corrupted(&tsgls->dst)) {
1620 pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n", 2265 pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
1621 driver, op, vec_num, cfg->name); 2266 driver, op, vec_name, cfg->name);
2267 return -EINVAL;
2268 }
2269
2270 /* Check for success or failure */
2271 if (err) {
2272 if (err == vec->crypt_error)
2273 return 0;
2274 pr_err("alg: skcipher: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
2275 driver, op, vec_name, vec->crypt_error, err, cfg->name);
2276 return err;
2277 }
2278 if (vec->crypt_error) {
2279 pr_err("alg: skcipher: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
2280 driver, op, vec_name, vec->crypt_error, cfg->name);
1622 return -EINVAL; 2281 return -EINVAL;
1623 } 2282 }
1624 2283
@@ -1626,20 +2285,20 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
1626 err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext, 2285 err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
1627 vec->len, 0, true); 2286 vec->len, 0, true);
1628 if (err == -EOVERFLOW) { 2287 if (err == -EOVERFLOW) {
1629 pr_err("alg: skcipher: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n", 2288 pr_err("alg: skcipher: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
1630 driver, op, vec_num, cfg->name); 2289 driver, op, vec_name, cfg->name);
1631 return err; 2290 return err;
1632 } 2291 }
1633 if (err) { 2292 if (err) {
1634 pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n", 2293 pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
1635 driver, op, vec_num, cfg->name); 2294 driver, op, vec_name, cfg->name);
1636 return err; 2295 return err;
1637 } 2296 }
1638 2297
1639 /* If applicable, check that the algorithm generated the correct IV */ 2298 /* If applicable, check that the algorithm generated the correct IV */
1640 if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) { 2299 if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) {
1641 pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %u, cfg=\"%s\"\n", 2300 pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %s, cfg=\"%s\"\n",
1642 driver, op, vec_num, cfg->name); 2301 driver, op, vec_name, cfg->name);
1643 hexdump(iv, ivsize); 2302 hexdump(iv, ivsize);
1644 return -EINVAL; 2303 return -EINVAL;
1645 } 2304 }
@@ -1653,14 +2312,17 @@ static int test_skcipher_vec(const char *driver, int enc,
1653 struct skcipher_request *req, 2312 struct skcipher_request *req,
1654 struct cipher_test_sglists *tsgls) 2313 struct cipher_test_sglists *tsgls)
1655{ 2314{
2315 char vec_name[16];
1656 unsigned int i; 2316 unsigned int i;
1657 int err; 2317 int err;
1658 2318
1659 if (fips_enabled && vec->fips_skip) 2319 if (fips_enabled && vec->fips_skip)
1660 return 0; 2320 return 0;
1661 2321
2322 sprintf(vec_name, "%u", vec_num);
2323
1662 for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) { 2324 for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
1663 err = test_skcipher_vec_cfg(driver, enc, vec, vec_num, 2325 err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
1664 &default_cipher_testvec_configs[i], 2326 &default_cipher_testvec_configs[i],
1665 req, tsgls); 2327 req, tsgls);
1666 if (err) 2328 if (err)
@@ -1675,7 +2337,7 @@ static int test_skcipher_vec(const char *driver, int enc,
1675 for (i = 0; i < fuzz_iterations; i++) { 2337 for (i = 0; i < fuzz_iterations; i++) {
1676 generate_random_testvec_config(&cfg, cfgname, 2338 generate_random_testvec_config(&cfg, cfgname,
1677 sizeof(cfgname)); 2339 sizeof(cfgname));
1678 err = test_skcipher_vec_cfg(driver, enc, vec, vec_num, 2340 err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
1679 &cfg, req, tsgls); 2341 &cfg, req, tsgls);
1680 if (err) 2342 if (err)
1681 return err; 2343 return err;
@@ -1685,6 +2347,186 @@ static int test_skcipher_vec(const char *driver, int enc,
1685 return 0; 2347 return 0;
1686} 2348}
1687 2349
2350#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
2351/*
2352 * Generate a symmetric cipher test vector from the given implementation.
2353 * Assumes the buffers in 'vec' were already allocated.
2354 */
2355static void generate_random_cipher_testvec(struct skcipher_request *req,
2356 struct cipher_testvec *vec,
2357 unsigned int maxdatasize,
2358 char *name, size_t max_namelen)
2359{
2360 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2361 const unsigned int maxkeysize = tfm->keysize;
2362 const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
2363 struct scatterlist src, dst;
2364 u8 iv[MAX_IVLEN];
2365 DECLARE_CRYPTO_WAIT(wait);
2366
2367 /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
2368 vec->klen = maxkeysize;
2369 if (prandom_u32() % 4 == 0)
2370 vec->klen = prandom_u32() % (maxkeysize + 1);
2371 generate_random_bytes((u8 *)vec->key, vec->klen);
2372 vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
2373
2374 /* IV */
2375 generate_random_bytes((u8 *)vec->iv, ivsize);
2376
2377 /* Plaintext */
2378 vec->len = generate_random_length(maxdatasize);
2379 generate_random_bytes((u8 *)vec->ptext, vec->len);
2380
2381 /* If the key couldn't be set, no need to continue to encrypt. */
2382 if (vec->setkey_error)
2383 goto done;
2384
2385 /* Ciphertext */
2386 sg_init_one(&src, vec->ptext, vec->len);
2387 sg_init_one(&dst, vec->ctext, vec->len);
2388 memcpy(iv, vec->iv, ivsize);
2389 skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
2390 skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
2391 vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
2392done:
2393 snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
2394 vec->len, vec->klen);
2395}
2396
2397/*
2398 * Test the skcipher algorithm represented by @req against the corresponding
2399 * generic implementation, if one is available.
2400 */
2401static int test_skcipher_vs_generic_impl(const char *driver,
2402 const char *generic_driver,
2403 struct skcipher_request *req,
2404 struct cipher_test_sglists *tsgls)
2405{
2406 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2407 const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
2408 const unsigned int blocksize = crypto_skcipher_blocksize(tfm);
2409 const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
2410 const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
2411 char _generic_driver[CRYPTO_MAX_ALG_NAME];
2412 struct crypto_skcipher *generic_tfm = NULL;
2413 struct skcipher_request *generic_req = NULL;
2414 unsigned int i;
2415 struct cipher_testvec vec = { 0 };
2416 char vec_name[64];
2417 struct testvec_config cfg;
2418 char cfgname[TESTVEC_CONFIG_NAMELEN];
2419 int err;
2420
2421 if (noextratests)
2422 return 0;
2423
2424 /* Keywrap isn't supported here yet as it handles its IV differently. */
2425 if (strncmp(algname, "kw(", 3) == 0)
2426 return 0;
2427
2428 if (!generic_driver) { /* Use default naming convention? */
2429 err = build_generic_driver_name(algname, _generic_driver);
2430 if (err)
2431 return err;
2432 generic_driver = _generic_driver;
2433 }
2434
2435 if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
2436 return 0;
2437
2438 generic_tfm = crypto_alloc_skcipher(generic_driver, 0, 0);
2439 if (IS_ERR(generic_tfm)) {
2440 err = PTR_ERR(generic_tfm);
2441 if (err == -ENOENT) {
2442 pr_warn("alg: skcipher: skipping comparison tests for %s because %s is unavailable\n",
2443 driver, generic_driver);
2444 return 0;
2445 }
2446 pr_err("alg: skcipher: error allocating %s (generic impl of %s): %d\n",
2447 generic_driver, algname, err);
2448 return err;
2449 }
2450
2451 generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL);
2452 if (!generic_req) {
2453 err = -ENOMEM;
2454 goto out;
2455 }
2456
2457 /* Check the algorithm properties for consistency. */
2458
2459 if (tfm->keysize != generic_tfm->keysize) {
2460 pr_err("alg: skcipher: max keysize for %s (%u) doesn't match generic impl (%u)\n",
2461 driver, tfm->keysize, generic_tfm->keysize);
2462 err = -EINVAL;
2463 goto out;
2464 }
2465
2466 if (ivsize != crypto_skcipher_ivsize(generic_tfm)) {
2467 pr_err("alg: skcipher: ivsize for %s (%u) doesn't match generic impl (%u)\n",
2468 driver, ivsize, crypto_skcipher_ivsize(generic_tfm));
2469 err = -EINVAL;
2470 goto out;
2471 }
2472
2473 if (blocksize != crypto_skcipher_blocksize(generic_tfm)) {
2474 pr_err("alg: skcipher: blocksize for %s (%u) doesn't match generic impl (%u)\n",
2475 driver, blocksize,
2476 crypto_skcipher_blocksize(generic_tfm));
2477 err = -EINVAL;
2478 goto out;
2479 }
2480
2481 /*
2482 * Now generate test vectors using the generic implementation, and test
2483 * the other implementation against them.
2484 */
2485
2486 vec.key = kmalloc(tfm->keysize, GFP_KERNEL);
2487 vec.iv = kmalloc(ivsize, GFP_KERNEL);
2488 vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
2489 vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
2490 if (!vec.key || !vec.iv || !vec.ptext || !vec.ctext) {
2491 err = -ENOMEM;
2492 goto out;
2493 }
2494
2495 for (i = 0; i < fuzz_iterations * 8; i++) {
2496 generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
2497 vec_name, sizeof(vec_name));
2498 generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
2499
2500 err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name,
2501 &cfg, req, tsgls);
2502 if (err)
2503 goto out;
2504 err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name,
2505 &cfg, req, tsgls);
2506 if (err)
2507 goto out;
2508 cond_resched();
2509 }
2510 err = 0;
2511out:
2512 kfree(vec.key);
2513 kfree(vec.iv);
2514 kfree(vec.ptext);
2515 kfree(vec.ctext);
2516 crypto_free_skcipher(generic_tfm);
2517 skcipher_request_free(generic_req);
2518 return err;
2519}
2520#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
2521static int test_skcipher_vs_generic_impl(const char *driver,
2522 const char *generic_driver,
2523 struct skcipher_request *req,
2524 struct cipher_test_sglists *tsgls)
2525{
2526 return 0;
2527}
2528#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
2529
1688static int test_skcipher(const char *driver, int enc, 2530static int test_skcipher(const char *driver, int enc,
1689 const struct cipher_test_suite *suite, 2531 const struct cipher_test_suite *suite,
1690 struct skcipher_request *req, 2532 struct skcipher_request *req,
@@ -1744,6 +2586,11 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
1744 goto out; 2586 goto out;
1745 2587
1746 err = test_skcipher(driver, DECRYPT, suite, req, tsgls); 2588 err = test_skcipher(driver, DECRYPT, suite, req, tsgls);
2589 if (err)
2590 goto out;
2591
2592 err = test_skcipher_vs_generic_impl(driver, desc->generic_driver, req,
2593 tsgls);
1747out: 2594out:
1748 free_cipher_test_sglists(tsgls); 2595 free_cipher_test_sglists(tsgls);
1749 skcipher_request_free(req); 2596 skcipher_request_free(req);
@@ -2179,7 +3026,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
2179 u32 *ctx = (u32 *)shash_desc_ctx(shash); 3026 u32 *ctx = (u32 *)shash_desc_ctx(shash);
2180 3027
2181 shash->tfm = tfm; 3028 shash->tfm = tfm;
2182 shash->flags = 0;
2183 3029
2184 *ctx = 420553207; 3030 *ctx = 420553207;
2185 err = crypto_shash_final(shash, (u8 *)&val); 3031 err = crypto_shash_final(shash, (u8 *)&val);
@@ -2493,6 +3339,12 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
2493 return err; 3339 return err;
2494} 3340}
2495 3341
3342static u8 *test_pack_u32(u8 *dst, u32 val)
3343{
3344 memcpy(dst, &val, sizeof(val));
3345 return dst + sizeof(val);
3346}
3347
2496static int test_akcipher_one(struct crypto_akcipher *tfm, 3348static int test_akcipher_one(struct crypto_akcipher *tfm,
2497 const struct akcipher_testvec *vecs) 3349 const struct akcipher_testvec *vecs)
2498{ 3350{
@@ -2503,10 +3355,11 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
2503 struct crypto_wait wait; 3355 struct crypto_wait wait;
2504 unsigned int out_len_max, out_len = 0; 3356 unsigned int out_len_max, out_len = 0;
2505 int err = -ENOMEM; 3357 int err = -ENOMEM;
2506 struct scatterlist src, dst, src_tab[2]; 3358 struct scatterlist src, dst, src_tab[3];
2507 const char *m, *c; 3359 const char *m, *c;
2508 unsigned int m_size, c_size; 3360 unsigned int m_size, c_size;
2509 const char *op; 3361 const char *op;
3362 u8 *key, *ptr;
2510 3363
2511 if (testmgr_alloc_buf(xbuf)) 3364 if (testmgr_alloc_buf(xbuf))
2512 return err; 3365 return err;
@@ -2517,22 +3370,29 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
2517 3370
2518 crypto_init_wait(&wait); 3371 crypto_init_wait(&wait);
2519 3372
3373 key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len,
3374 GFP_KERNEL);
3375 if (!key)
3376 goto free_xbuf;
3377 memcpy(key, vecs->key, vecs->key_len);
3378 ptr = key + vecs->key_len;
3379 ptr = test_pack_u32(ptr, vecs->algo);
3380 ptr = test_pack_u32(ptr, vecs->param_len);
3381 memcpy(ptr, vecs->params, vecs->param_len);
3382
2520 if (vecs->public_key_vec) 3383 if (vecs->public_key_vec)
2521 err = crypto_akcipher_set_pub_key(tfm, vecs->key, 3384 err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len);
2522 vecs->key_len);
2523 else 3385 else
2524 err = crypto_akcipher_set_priv_key(tfm, vecs->key, 3386 err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len);
2525 vecs->key_len);
2526 if (err) 3387 if (err)
2527 goto free_req; 3388 goto free_req;
2528 3389
2529 err = -ENOMEM;
2530 out_len_max = crypto_akcipher_maxsize(tfm);
2531
2532 /* 3390 /*
2533 * First run test which do not require a private key, such as 3391 * First run test which do not require a private key, such as
2534 * encrypt or verify. 3392 * encrypt or verify.
2535 */ 3393 */
3394 err = -ENOMEM;
3395 out_len_max = crypto_akcipher_maxsize(tfm);
2536 outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); 3396 outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
2537 if (!outbuf_enc) 3397 if (!outbuf_enc)
2538 goto free_req; 3398 goto free_req;
@@ -2558,12 +3418,20 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
2558 goto free_all; 3418 goto free_all;
2559 memcpy(xbuf[0], m, m_size); 3419 memcpy(xbuf[0], m, m_size);
2560 3420
2561 sg_init_table(src_tab, 2); 3421 sg_init_table(src_tab, 3);
2562 sg_set_buf(&src_tab[0], xbuf[0], 8); 3422 sg_set_buf(&src_tab[0], xbuf[0], 8);
2563 sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8); 3423 sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
2564 sg_init_one(&dst, outbuf_enc, out_len_max); 3424 if (vecs->siggen_sigver_test) {
2565 akcipher_request_set_crypt(req, src_tab, &dst, m_size, 3425 if (WARN_ON(c_size > PAGE_SIZE))
2566 out_len_max); 3426 goto free_all;
3427 memcpy(xbuf[1], c, c_size);
3428 sg_set_buf(&src_tab[2], xbuf[1], c_size);
3429 akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size);
3430 } else {
3431 sg_init_one(&dst, outbuf_enc, out_len_max);
3432 akcipher_request_set_crypt(req, src_tab, &dst, m_size,
3433 out_len_max);
3434 }
2567 akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 3435 akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2568 crypto_req_done, &wait); 3436 crypto_req_done, &wait);
2569 3437
@@ -2576,18 +3444,21 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
2576 pr_err("alg: akcipher: %s test failed. err %d\n", op, err); 3444 pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
2577 goto free_all; 3445 goto free_all;
2578 } 3446 }
2579 if (req->dst_len != c_size) { 3447 if (!vecs->siggen_sigver_test) {
2580 pr_err("alg: akcipher: %s test failed. Invalid output len\n", 3448 if (req->dst_len != c_size) {
2581 op); 3449 pr_err("alg: akcipher: %s test failed. Invalid output len\n",
2582 err = -EINVAL; 3450 op);
2583 goto free_all; 3451 err = -EINVAL;
2584 } 3452 goto free_all;
2585 /* verify that encrypted message is equal to expected */ 3453 }
2586 if (memcmp(c, outbuf_enc, c_size)) { 3454 /* verify that encrypted message is equal to expected */
2587 pr_err("alg: akcipher: %s test failed. Invalid output\n", op); 3455 if (memcmp(c, outbuf_enc, c_size) != 0) {
2588 hexdump(outbuf_enc, c_size); 3456 pr_err("alg: akcipher: %s test failed. Invalid output\n",
2589 err = -EINVAL; 3457 op);
2590 goto free_all; 3458 hexdump(outbuf_enc, c_size);
3459 err = -EINVAL;
3460 goto free_all;
3461 }
2591 } 3462 }
2592 3463
2593 /* 3464 /*
@@ -2642,6 +3513,7 @@ free_all:
2642 kfree(outbuf_enc); 3513 kfree(outbuf_enc);
2643free_req: 3514free_req:
2644 akcipher_request_free(req); 3515 akcipher_request_free(req);
3516 kfree(key);
2645free_xbuf: 3517free_xbuf:
2646 testmgr_free_buf(xbuf); 3518 testmgr_free_buf(xbuf);
2647 return err; 3519 return err;
@@ -2699,12 +3571,14 @@ static int alg_test_null(const struct alg_test_desc *desc,
2699static const struct alg_test_desc alg_test_descs[] = { 3571static const struct alg_test_desc alg_test_descs[] = {
2700 { 3572 {
2701 .alg = "adiantum(xchacha12,aes)", 3573 .alg = "adiantum(xchacha12,aes)",
3574 .generic_driver = "adiantum(xchacha12-generic,aes-generic,nhpoly1305-generic)",
2702 .test = alg_test_skcipher, 3575 .test = alg_test_skcipher,
2703 .suite = { 3576 .suite = {
2704 .cipher = __VECS(adiantum_xchacha12_aes_tv_template) 3577 .cipher = __VECS(adiantum_xchacha12_aes_tv_template)
2705 }, 3578 },
2706 }, { 3579 }, {
2707 .alg = "adiantum(xchacha20,aes)", 3580 .alg = "adiantum(xchacha20,aes)",
3581 .generic_driver = "adiantum(xchacha20-generic,aes-generic,nhpoly1305-generic)",
2708 .test = alg_test_skcipher, 3582 .test = alg_test_skcipher,
2709 .suite = { 3583 .suite = {
2710 .cipher = __VECS(adiantum_xchacha20_aes_tv_template) 3584 .cipher = __VECS(adiantum_xchacha20_aes_tv_template)
@@ -2921,6 +3795,12 @@ static const struct alg_test_desc alg_test_descs[] = {
2921 .test = alg_test_null, 3795 .test = alg_test_null,
2922 .fips_allowed = 1, 3796 .fips_allowed = 1,
2923 }, { 3797 }, {
3798 /* Same as cbc(sm4) except the key is stored in
3799 * hardware secure memory which we reference by index
3800 */
3801 .alg = "cbc(psm4)",
3802 .test = alg_test_null,
3803 }, {
2924 .alg = "cbc(serpent)", 3804 .alg = "cbc(serpent)",
2925 .test = alg_test_skcipher, 3805 .test = alg_test_skcipher,
2926 .suite = { 3806 .suite = {
@@ -2947,6 +3827,7 @@ static const struct alg_test_desc alg_test_descs[] = {
2947 } 3827 }
2948 }, { 3828 }, {
2949 .alg = "ccm(aes)", 3829 .alg = "ccm(aes)",
3830 .generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
2950 .test = alg_test_aead, 3831 .test = alg_test_aead,
2951 .fips_allowed = 1, 3832 .fips_allowed = 1,
2952 .suite = { 3833 .suite = {
@@ -3055,6 +3936,13 @@ static const struct alg_test_desc alg_test_descs[] = {
3055 .test = alg_test_null, 3936 .test = alg_test_null,
3056 .fips_allowed = 1, 3937 .fips_allowed = 1,
3057 }, { 3938 }, {
3939
3940 /* Same as ctr(sm4) except the key is stored in
3941 * hardware secure memory which we reference by index
3942 */
3943 .alg = "ctr(psm4)",
3944 .test = alg_test_null,
3945 }, {
3058 .alg = "ctr(serpent)", 3946 .alg = "ctr(serpent)",
3059 .test = alg_test_skcipher, 3947 .test = alg_test_skcipher,
3060 .suite = { 3948 .suite = {
@@ -3080,6 +3968,13 @@ static const struct alg_test_desc alg_test_descs[] = {
3080 .cipher = __VECS(cts_mode_tv_template) 3968 .cipher = __VECS(cts_mode_tv_template)
3081 } 3969 }
3082 }, { 3970 }, {
3971 /* Same as cts(cbc((aes)) except the key is stored in
3972 * hardware secure memory which we reference by index
3973 */
3974 .alg = "cts(cbc(paes))",
3975 .test = alg_test_null,
3976 .fips_allowed = 1,
3977 }, {
3083 .alg = "deflate", 3978 .alg = "deflate",
3084 .test = alg_test_comp, 3979 .test = alg_test_comp,
3085 .fips_allowed = 1, 3980 .fips_allowed = 1,
@@ -3358,7 +4253,14 @@ static const struct alg_test_desc alg_test_descs[] = {
3358 .kpp = __VECS(ecdh_tv_template) 4253 .kpp = __VECS(ecdh_tv_template)
3359 } 4254 }
3360 }, { 4255 }, {
4256 .alg = "ecrdsa",
4257 .test = alg_test_akcipher,
4258 .suite = {
4259 .akcipher = __VECS(ecrdsa_tv_template)
4260 }
4261 }, {
3361 .alg = "gcm(aes)", 4262 .alg = "gcm(aes)",
4263 .generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)",
3362 .test = alg_test_aead, 4264 .test = alg_test_aead,
3363 .fips_allowed = 1, 4265 .fips_allowed = 1,
3364 .suite = { 4266 .suite = {
@@ -3477,30 +4379,35 @@ static const struct alg_test_desc alg_test_descs[] = {
3477 } 4379 }
3478 }, { 4380 }, {
3479 .alg = "lrw(aes)", 4381 .alg = "lrw(aes)",
4382 .generic_driver = "lrw(ecb(aes-generic))",
3480 .test = alg_test_skcipher, 4383 .test = alg_test_skcipher,
3481 .suite = { 4384 .suite = {
3482 .cipher = __VECS(aes_lrw_tv_template) 4385 .cipher = __VECS(aes_lrw_tv_template)
3483 } 4386 }
3484 }, { 4387 }, {
3485 .alg = "lrw(camellia)", 4388 .alg = "lrw(camellia)",
4389 .generic_driver = "lrw(ecb(camellia-generic))",
3486 .test = alg_test_skcipher, 4390 .test = alg_test_skcipher,
3487 .suite = { 4391 .suite = {
3488 .cipher = __VECS(camellia_lrw_tv_template) 4392 .cipher = __VECS(camellia_lrw_tv_template)
3489 } 4393 }
3490 }, { 4394 }, {
3491 .alg = "lrw(cast6)", 4395 .alg = "lrw(cast6)",
4396 .generic_driver = "lrw(ecb(cast6-generic))",
3492 .test = alg_test_skcipher, 4397 .test = alg_test_skcipher,
3493 .suite = { 4398 .suite = {
3494 .cipher = __VECS(cast6_lrw_tv_template) 4399 .cipher = __VECS(cast6_lrw_tv_template)
3495 } 4400 }
3496 }, { 4401 }, {
3497 .alg = "lrw(serpent)", 4402 .alg = "lrw(serpent)",
4403 .generic_driver = "lrw(ecb(serpent-generic))",
3498 .test = alg_test_skcipher, 4404 .test = alg_test_skcipher,
3499 .suite = { 4405 .suite = {
3500 .cipher = __VECS(serpent_lrw_tv_template) 4406 .cipher = __VECS(serpent_lrw_tv_template)
3501 } 4407 }
3502 }, { 4408 }, {
3503 .alg = "lrw(twofish)", 4409 .alg = "lrw(twofish)",
4410 .generic_driver = "lrw(ecb(twofish-generic))",
3504 .test = alg_test_skcipher, 4411 .test = alg_test_skcipher,
3505 .suite = { 4412 .suite = {
3506 .cipher = __VECS(tf_lrw_tv_template) 4413 .cipher = __VECS(tf_lrw_tv_template)
@@ -3625,6 +4532,7 @@ static const struct alg_test_desc alg_test_descs[] = {
3625 } 4532 }
3626 }, { 4533 }, {
3627 .alg = "rfc4106(gcm(aes))", 4534 .alg = "rfc4106(gcm(aes))",
4535 .generic_driver = "rfc4106(gcm_base(ctr(aes-generic),ghash-generic))",
3628 .test = alg_test_aead, 4536 .test = alg_test_aead,
3629 .fips_allowed = 1, 4537 .fips_allowed = 1,
3630 .suite = { 4538 .suite = {
@@ -3632,6 +4540,7 @@ static const struct alg_test_desc alg_test_descs[] = {
3632 } 4540 }
3633 }, { 4541 }, {
3634 .alg = "rfc4309(ccm(aes))", 4542 .alg = "rfc4309(ccm(aes))",
4543 .generic_driver = "rfc4309(ccm_base(ctr(aes-generic),cbcmac(aes-generic)))",
3635 .test = alg_test_aead, 4544 .test = alg_test_aead,
3636 .fips_allowed = 1, 4545 .fips_allowed = 1,
3637 .suite = { 4546 .suite = {
@@ -3639,6 +4548,7 @@ static const struct alg_test_desc alg_test_descs[] = {
3639 } 4548 }
3640 }, { 4549 }, {
3641 .alg = "rfc4543(gcm(aes))", 4550 .alg = "rfc4543(gcm(aes))",
4551 .generic_driver = "rfc4543(gcm_base(ctr(aes-generic),ghash-generic))",
3642 .test = alg_test_aead, 4552 .test = alg_test_aead,
3643 .suite = { 4553 .suite = {
3644 .aead = __VECS(aes_gcm_rfc4543_tv_template) 4554 .aead = __VECS(aes_gcm_rfc4543_tv_template)
@@ -3835,6 +4745,7 @@ static const struct alg_test_desc alg_test_descs[] = {
3835 }, 4745 },
3836 }, { 4746 }, {
3837 .alg = "xts(aes)", 4747 .alg = "xts(aes)",
4748 .generic_driver = "xts(ecb(aes-generic))",
3838 .test = alg_test_skcipher, 4749 .test = alg_test_skcipher,
3839 .fips_allowed = 1, 4750 .fips_allowed = 1,
3840 .suite = { 4751 .suite = {
@@ -3842,12 +4753,14 @@ static const struct alg_test_desc alg_test_descs[] = {
3842 } 4753 }
3843 }, { 4754 }, {
3844 .alg = "xts(camellia)", 4755 .alg = "xts(camellia)",
4756 .generic_driver = "xts(ecb(camellia-generic))",
3845 .test = alg_test_skcipher, 4757 .test = alg_test_skcipher,
3846 .suite = { 4758 .suite = {
3847 .cipher = __VECS(camellia_xts_tv_template) 4759 .cipher = __VECS(camellia_xts_tv_template)
3848 } 4760 }
3849 }, { 4761 }, {
3850 .alg = "xts(cast6)", 4762 .alg = "xts(cast6)",
4763 .generic_driver = "xts(ecb(cast6-generic))",
3851 .test = alg_test_skcipher, 4764 .test = alg_test_skcipher,
3852 .suite = { 4765 .suite = {
3853 .cipher = __VECS(cast6_xts_tv_template) 4766 .cipher = __VECS(cast6_xts_tv_template)
@@ -3861,12 +4774,14 @@ static const struct alg_test_desc alg_test_descs[] = {
3861 .fips_allowed = 1, 4774 .fips_allowed = 1,
3862 }, { 4775 }, {
3863 .alg = "xts(serpent)", 4776 .alg = "xts(serpent)",
4777 .generic_driver = "xts(ecb(serpent-generic))",
3864 .test = alg_test_skcipher, 4778 .test = alg_test_skcipher,
3865 .suite = { 4779 .suite = {
3866 .cipher = __VECS(serpent_xts_tv_template) 4780 .cipher = __VECS(serpent_xts_tv_template)
3867 } 4781 }
3868 }, { 4782 }, {
3869 .alg = "xts(twofish)", 4783 .alg = "xts(twofish)",
4784 .generic_driver = "xts(ecb(twofish-generic))",
3870 .test = alg_test_skcipher, 4785 .test = alg_test_skcipher,
3871 .suite = { 4786 .suite = {
3872 .cipher = __VECS(tf_xts_tv_template) 4787 .cipher = __VECS(tf_xts_tv_template)
@@ -4020,8 +4935,9 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
4020 type, mask); 4935 type, mask);
4021 4936
4022test_done: 4937test_done:
4023 if (fips_enabled && rc) 4938 if (rc && (fips_enabled || panic_on_fail))
4024 panic("%s: %s alg self test failed in fips mode!\n", driver, alg); 4939 panic("alg: self-tests for %s (%s) failed in %s mode!\n",
4940 driver, alg, fips_enabled ? "fips" : "panic_on_fail");
4025 4941
4026 if (fips_enabled && !rc) 4942 if (fips_enabled && !rc)
4027 pr_info("alg: self-tests for %s (%s) passed\n", driver, alg); 4943 pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d18a37629f05..b6daae1f6a1d 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -25,6 +25,8 @@
25#ifndef _CRYPTO_TESTMGR_H 25#ifndef _CRYPTO_TESTMGR_H
26#define _CRYPTO_TESTMGR_H 26#define _CRYPTO_TESTMGR_H
27 27
28#include <linux/oid_registry.h>
29
28#define MAX_IVLEN 32 30#define MAX_IVLEN 32
29 31
30/* 32/*
@@ -34,6 +36,8 @@
34 * @digest: Pointer to expected digest 36 * @digest: Pointer to expected digest
35 * @psize: Length of source data in bytes 37 * @psize: Length of source data in bytes
36 * @ksize: Length of @key in bytes (0 if no key) 38 * @ksize: Length of @key in bytes (0 if no key)
39 * @setkey_error: Expected error from setkey()
40 * @digest_error: Expected error from digest()
37 */ 41 */
38struct hash_testvec { 42struct hash_testvec {
39 const char *key; 43 const char *key;
@@ -41,6 +45,8 @@ struct hash_testvec {
41 const char *digest; 45 const char *digest;
42 unsigned short psize; 46 unsigned short psize;
43 unsigned short ksize; 47 unsigned short ksize;
48 int setkey_error;
49 int digest_error;
44}; 50};
45 51
46/* 52/*
@@ -52,12 +58,13 @@ struct hash_testvec {
52 * @ptext: Pointer to plaintext 58 * @ptext: Pointer to plaintext
53 * @ctext: Pointer to ciphertext 59 * @ctext: Pointer to ciphertext
54 * @len: Length of @ptext and @ctext in bytes 60 * @len: Length of @ptext and @ctext in bytes
55 * @fail: If set to one, the test need to fail
56 * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? 61 * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
57 * ( e.g. test needs to fail due to a weak key ) 62 * ( e.g. test needs to fail due to a weak key )
58 * @fips_skip: Skip the test vector in FIPS mode 63 * @fips_skip: Skip the test vector in FIPS mode
59 * @generates_iv: Encryption should ignore the given IV, and output @iv_out. 64 * @generates_iv: Encryption should ignore the given IV, and output @iv_out.
60 * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)"). 65 * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)").
66 * @setkey_error: Expected error from setkey()
67 * @crypt_error: Expected error from encrypt() and decrypt()
61 */ 68 */
62struct cipher_testvec { 69struct cipher_testvec {
63 const char *key; 70 const char *key;
@@ -65,12 +72,13 @@ struct cipher_testvec {
65 const char *iv_out; 72 const char *iv_out;
66 const char *ptext; 73 const char *ptext;
67 const char *ctext; 74 const char *ctext;
68 bool fail;
69 unsigned char wk; /* weak key flag */ 75 unsigned char wk; /* weak key flag */
70 unsigned char klen; 76 unsigned short klen;
71 unsigned short len; 77 unsigned short len;
72 bool fips_skip; 78 bool fips_skip;
73 bool generates_iv; 79 bool generates_iv;
80 int setkey_error;
81 int crypt_error;
74}; 82};
75 83
76/* 84/*
@@ -82,7 +90,6 @@ struct cipher_testvec {
82 * @ctext: Pointer to the full authenticated ciphertext. For AEADs that 90 * @ctext: Pointer to the full authenticated ciphertext. For AEADs that
83 * produce a separate "ciphertext" and "authentication tag", these 91 * produce a separate "ciphertext" and "authentication tag", these
84 * two parts are concatenated: ciphertext || tag. 92 * two parts are concatenated: ciphertext || tag.
85 * @fail: setkey() failure expected?
86 * @novrfy: Decryption verification failure expected? 93 * @novrfy: Decryption verification failure expected?
87 * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? 94 * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
88 * (e.g. setkey() needs to fail due to a weak key) 95 * (e.g. setkey() needs to fail due to a weak key)
@@ -90,6 +97,9 @@ struct cipher_testvec {
90 * @plen: Length of @ptext in bytes 97 * @plen: Length of @ptext in bytes
91 * @alen: Length of @assoc in bytes 98 * @alen: Length of @assoc in bytes
92 * @clen: Length of @ctext in bytes 99 * @clen: Length of @ctext in bytes
100 * @setkey_error: Expected error from setkey()
101 * @setauthsize_error: Expected error from setauthsize()
102 * @crypt_error: Expected error from encrypt() and decrypt()
93 */ 103 */
94struct aead_testvec { 104struct aead_testvec {
95 const char *key; 105 const char *key;
@@ -97,13 +107,15 @@ struct aead_testvec {
97 const char *ptext; 107 const char *ptext;
98 const char *assoc; 108 const char *assoc;
99 const char *ctext; 109 const char *ctext;
100 bool fail;
101 unsigned char novrfy; 110 unsigned char novrfy;
102 unsigned char wk; 111 unsigned char wk;
103 unsigned char klen; 112 unsigned char klen;
104 unsigned short plen; 113 unsigned short plen;
105 unsigned short clen; 114 unsigned short clen;
106 unsigned short alen; 115 unsigned short alen;
116 int setkey_error;
117 int setauthsize_error;
118 int crypt_error;
107}; 119};
108 120
109struct cprng_testvec { 121struct cprng_testvec {
@@ -135,13 +147,16 @@ struct drbg_testvec {
135 147
136struct akcipher_testvec { 148struct akcipher_testvec {
137 const unsigned char *key; 149 const unsigned char *key;
150 const unsigned char *params;
138 const unsigned char *m; 151 const unsigned char *m;
139 const unsigned char *c; 152 const unsigned char *c;
140 unsigned int key_len; 153 unsigned int key_len;
154 unsigned int param_len;
141 unsigned int m_size; 155 unsigned int m_size;
142 unsigned int c_size; 156 unsigned int c_size;
143 bool public_key_vec; 157 bool public_key_vec;
144 bool siggen_sigver_test; 158 bool siggen_sigver_test;
159 enum OID algo;
145}; 160};
146 161
147struct kpp_testvec { 162struct kpp_testvec {
@@ -551,6 +566,160 @@ static const struct akcipher_testvec rsa_tv_template[] = {
551}; 566};
552 567
553/* 568/*
569 * EC-RDSA test vectors are generated by gost-engine.
570 */
571static const struct akcipher_testvec ecrdsa_tv_template[] = {
572 {
573 .key =
574 "\x04\x40\xd5\xa7\x77\xf9\x26\x2f\x8c\xbd\xcc\xe3\x1f\x01\x94\x05"
575 "\x3d\x2f\xec\xb5\x00\x34\xf5\x51\x6d\x3b\x90\x4b\x23\x28\x6f\x1d"
576 "\xc8\x36\x61\x60\x36\xec\xbb\xb4\x0b\x95\x4e\x54\x4f\x15\x21\x05"
577 "\xd8\x52\x66\x44\x31\x7e\x5d\xc5\xd1\x26\x00\x5f\x60\xd8\xf0\xc7"
578 "\x27\xfc",
579 .key_len = 66,
580 .params = /* OID_gostCPSignA */
581 "\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x01\x06\x08\x2a\x85\x03"
582 "\x07\x01\x01\x02\x02",
583 .param_len = 21,
584 .c =
585 "\x41\x32\x09\x73\xa4\xc1\x38\xd6\x63\x7d\x8b\xf7\x50\x3f\xda\x9f"
586 "\x68\x48\xc1\x50\xe3\x42\x3a\x9b\x2b\x28\x12\x2a\xa7\xc2\x75\x31"
587 "\x65\x77\x8c\x3c\x9e\x0d\x56\xb2\xf9\xdc\x04\x33\x3e\xb0\x9e\xf9"
588 "\x74\x4e\x59\xb3\x83\xf2\x91\x27\xda\x5e\xc7\x33\xc0\xc1\x8f\x41",
589 .c_size = 64,
590 .algo = OID_gost2012PKey256,
591 .m =
592 "\x75\x1b\x9b\x40\x25\xb9\x96\xd2\x9b\x00\x41\xb3\x58\xbf\x23\x14"
593 "\x79\xd2\x76\x64\xa3\xbd\x66\x10\x79\x05\x5a\x06\x42\xec\xb9\xc9",
594 .m_size = 32,
595 .public_key_vec = true,
596 .siggen_sigver_test = true,
597 },
598 {
599 .key =
600 "\x04\x40\x66\x6f\xd6\xb7\x06\xd0\xf5\xa5\x6f\x69\x5c\xa5\x13\x45"
601 "\x14\xdd\xcb\x12\x9c\x1b\xf5\x28\x64\x7a\x49\x48\x29\x14\x66\x42"
602 "\xb8\x1b\x5c\xf9\x56\x6d\x08\x3b\xce\xbb\x62\x2f\xc2\x3c\xc5\x49"
603 "\x93\x27\x70\x20\xcc\x79\xeb\xdc\x76\x8e\x48\x6e\x04\x96\xc3\x29"
604 "\xa0\x73",
605 .key_len = 66,
606 .params = /* OID_gostCPSignB */
607 "\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x02\x06\x08\x2a\x85\x03"
608 "\x07\x01\x01\x02\x02",
609 .param_len = 21,
610 .c =
611 "\x45\x6d\x4a\x03\x1d\x5c\x0b\x17\x79\xe7\x19\xdb\xbf\x81\x9f\x82"
612 "\xae\x06\xda\xf5\x47\x00\x05\x80\xc3\x16\x06\x9a\x8e\x7c\xb2\x8e"
613 "\x7f\x74\xaa\xec\x6b\x7b\x7f\x8b\xc6\x0b\x10\x42\x4e\x91\x2c\xdf"
614 "\x7b\x8b\x15\xf4\x9e\x59\x0f\xc7\xa4\x68\x2e\xce\x89\xdf\x84\xe9",
615 .c_size = 64,
616 .algo = OID_gost2012PKey256,
617 .m =
618 "\xd0\x54\x00\x27\x6a\xeb\xce\x6c\xf5\xf6\xfb\x57\x18\x18\x21\x13"
619 "\x11\x23\x4a\x70\x43\x52\x7a\x68\x11\x65\x45\x37\xbb\x25\xb7\x40",
620 .m_size = 32,
621 .public_key_vec = true,
622 .siggen_sigver_test = true,
623 },
624 {
625 .key =
626 "\x04\x40\x05\x91\xa9\x7d\xcb\x87\xdc\x98\xa1\xbf\xff\xdd\x20\x61"
627 "\xaa\x58\x3b\x2d\x8e\x9c\x41\x9d\x4f\xc6\x23\x17\xf9\xca\x60\x65"
628 "\xbc\x97\x97\xf6\x6b\x24\xe8\xac\xb1\xa7\x61\x29\x3c\x71\xdc\xad"
629 "\xcb\x20\xbe\x96\xe8\xf4\x44\x2e\x49\xd5\x2c\xb9\xc9\x3b\x9c\xaa"
630 "\xba\x15",
631 .key_len = 66,
632 .params = /* OID_gostCPSignC */
633 "\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x03\x06\x08\x2a\x85\x03"
634 "\x07\x01\x01\x02\x02",
635 .param_len = 21,
636 .c =
637 "\x3b\x2e\x2e\x74\x74\x47\xda\xea\x93\x90\x6a\xe2\xf5\xf5\xe6\x46"
638 "\x11\xfc\xab\xdc\x52\xbc\x58\xdb\x45\x44\x12\x4a\xf7\xd0\xab\xc9"
639 "\x73\xba\x64\xab\x0d\xac\x4e\x72\x10\xa8\x04\xf6\x1e\xe0\x48\x6a"
640 "\xcd\xe8\xe3\x78\x73\x77\x82\x24\x8d\xf1\xd3\xeb\x4c\x25\x7e\xc0",
641 .c_size = 64,
642 .algo = OID_gost2012PKey256,
643 .m =
644 "\x52\x33\xf4\x3f\x7b\x5d\xcf\x20\xee\xe4\x5c\xab\x0b\x3f\x14\xd6"
645 "\x9f\x16\xc6\x1c\xb1\x3f\x84\x41\x69\xec\x34\xfd\xf1\xf9\xa3\x39",
646 .m_size = 32,
647 .public_key_vec = true,
648 .siggen_sigver_test = true,
649 },
650 {
651 .key =
652 "\x04\x81\x80\x85\x46\x8f\x16\xf8\x7a\x7e\x4a\xc3\x81\x9e\xf1\x6e"
653 "\x94\x1e\x5d\x02\x87\xea\xfa\xa0\x0a\x17\x70\x49\x64\xad\x95\x68"
654 "\x60\x0a\xf0\x57\x29\x41\x79\x30\x3c\x61\x69\xf2\xa6\x94\x87\x17"
655 "\x54\xfa\x97\x2c\xe6\x1e\x0a\xbb\x55\x10\x57\xbe\xf7\xc1\x77\x2b"
656 "\x11\x74\x0a\x50\x37\x14\x10\x2a\x45\xfc\x7a\xae\x1c\x4c\xce\x08"
657 "\x05\xb7\xa4\x50\xc8\x3d\x39\x3d\xdc\x5c\x8f\x96\x6c\xe7\xfc\x21"
658 "\xc3\x2d\x1e\x9f\x11\xb3\xec\x22\x18\x8a\x8c\x08\x6b\x8b\xed\xf5"
659 "\xc5\x47\x3c\x7e\x73\x59\x44\x1e\x77\x83\x84\x52\x9e\x3b\x7d\xff"
660 "\x9d\x86\x1a",
661 .key_len = 131,
662 .params = /* OID_gostTC26Sign512A */
663 "\x30\x0b\x06\x09\x2a\x85\x03\x07\x01\x02\x01\x02\x01",
664 .param_len = 13,
665 .c =
666 "\x92\x81\x74\x5f\x95\x48\x38\x87\xd9\x8f\x5e\xc8\x8a\xbb\x01\x4e"
667 "\xb0\x75\x3c\x2f\xc7\x5a\x08\x4c\x68\xab\x75\x01\x32\x75\x75\xb5"
668 "\x37\xe0\x74\x6d\x94\x84\x31\x2a\x6b\xf4\xf7\xb7\xa7\x39\x7b\x46"
669 "\x07\xf0\x98\xbd\x33\x18\xa1\x72\xb2\x6d\x54\xe3\xde\x91\xc2\x2e"
670 "\x4f\x6a\xf8\xb7\xec\xa8\x83\xc9\x8f\xd9\xce\x7c\x45\x06\x02\xf4"
671 "\x4f\x21\xb5\x24\x3d\xb4\xb5\xd8\x58\x42\xbe\x2d\x29\xae\x93\xc0"
672 "\x13\x41\x96\x35\x08\x69\xe8\x36\xc7\xd1\x83\x81\xd7\xca\xfb\xc0"
673 "\xd2\xb7\x78\x32\x3e\x30\x1a\x1e\xce\xdc\x34\x35\xc6\xad\x68\x24",
674 .c_size = 128,
675 .algo = OID_gost2012PKey512,
676 .m =
677 "\x1f\x70\xb5\xe9\x55\x12\xd6\x88\xcc\x55\xb9\x0c\x7f\xc4\x94\xf2"
678 "\x04\x77\x41\x12\x02\xd6\xf1\x1f\x83\x56\xe9\xd6\x5a\x6a\x72\xb9"
679 "\x6e\x8e\x24\x2a\x84\xf1\xba\x67\xe8\xbf\xff\xc1\xd3\xde\xfb\xc6"
680 "\xa8\xf6\x80\x01\xb9\x27\xac\xd8\x45\x96\x66\xa1\xee\x48\x08\x3f",
681 .m_size = 64,
682 .public_key_vec = true,
683 .siggen_sigver_test = true,
684 },
685 {
686 .key =
687 "\x04\x81\x80\x28\xf3\x2b\x92\x04\x32\xea\x66\x20\xde\xa0\x2f\x74"
688 "\xbf\x2d\xf7\xb5\x30\x76\xb1\xc8\xee\x38\x9f\xea\xe5\xad\xc6\xa3"
689 "\x28\x1e\x51\x3d\x67\xa3\x41\xcc\x6b\x81\xe2\xe2\x9e\x82\xf3\x78"
690 "\x56\xd7\x2e\xb2\xb5\xbe\xb4\x50\x21\x05\xe5\x29\x82\xef\x15\x1b"
691 "\xc0\xd7\x30\xd6\x2f\x96\xe8\xff\x99\x4c\x25\xcf\x9a\xfc\x54\x30"
692 "\xce\xdf\x59\xe9\xc6\x45\xce\xe4\x22\xe8\x01\xd5\xcd\x2f\xaa\x78"
693 "\x99\xc6\x04\x1e\x6f\x4c\x25\x6a\x76\xad\xff\x48\xf3\xb3\xb4\xd6"
694 "\x14\x5c\x2c\x0e\xea\xa2\x4b\xb9\x7e\x89\x77\x02\x3a\x29\xc8\x16"
695 "\x8e\x78\x48",
696 .key_len = 131,
697 .params = /* OID_gostTC26Sign512B */
698 "\x30\x0b\x06\x09\x2a\x85\x03\x07\x01\x02\x01\x02\x02",
699 .param_len = 13,
700 .c =
701 "\x0a\xed\xb6\x27\xea\xa7\xa6\x7e\x2f\xc1\x02\x21\x74\xce\x27\xd2"
702 "\xee\x8a\x92\x4d\xa9\x43\x2d\xa4\x5b\xdc\x23\x02\xfc\x3a\xf3\xb2"
703 "\x10\x93\x0b\x40\x1b\x75\x95\x3e\x39\x41\x37\xb9\xab\x51\x09\xeb"
704 "\xf1\xb9\x49\x58\xec\x58\xc7\xf9\x2e\xb9\xc9\x40\xf2\x00\x39\x7e"
705 "\x3f\xde\x72\xe3\x85\x67\x06\xbe\xd8\xb8\xc1\x81\x1e\xe3\x0a\xfe"
706 "\xce\xd3\x77\x92\x56\x8c\x58\xf9\x37\x60\x2d\xe6\x8b\x66\xa3\xdd"
707 "\xd2\xf0\xf8\xda\x1b\x20\xbc\x9c\xec\x29\x5d\xd1\x8f\xcc\x37\xd1"
708 "\x3b\x8d\xb7\xc1\xe0\xb8\x3b\xef\x14\x1b\x87\xbc\xc1\x03\x9a\x93",
709 .c_size = 128,
710 .algo = OID_gost2012PKey512,
711 .m =
712 "\x11\x24\x21\x27\xf2\x42\x9f\xce\x5a\xf9\x01\x70\xe0\x07\x2b\x57"
713 "\xfb\x7d\x77\x5e\x74\x66\xe6\xa5\x40\x4c\x1a\x85\x18\xff\xd0\x63"
714 "\xe0\x39\xd3\xd6\xe5\x17\xf8\xc3\x4b\xc6\x1c\x33\x1a\xca\xa6\x66"
715 "\x6d\xf4\xd2\x45\xc2\x83\xa0\x42\x95\x05\x9d\x89\x8e\x0a\xca\xcc",
716 .m_size = 64,
717 .public_key_vec = true,
718 .siggen_sigver_test = true,
719 },
720};
721
722/*
554 * PKCS#1 RSA test vectors. Obtained from CAVS testing. 723 * PKCS#1 RSA test vectors. Obtained from CAVS testing.
555 */ 724 */
556static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { 725static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
@@ -7084,7 +7253,7 @@ static const struct cipher_testvec des_tv_template[] = {
7084 "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90", 7253 "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90",
7085 .len = 24, 7254 .len = 24,
7086 }, { /* Weak key */ 7255 }, { /* Weak key */
7087 .fail = true, 7256 .setkey_error = -EINVAL,
7088 .wk = 1, 7257 .wk = 1,
7089 .key = "\x01\x01\x01\x01\x01\x01\x01\x01", 7258 .key = "\x01\x01\x01\x01\x01\x01\x01\x01",
7090 .klen = 8, 7259 .klen = 8,
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index f8e1d9f9938f..40020f8adc46 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -677,7 +677,7 @@ MODULE_ALIAS_CRYPTO("tgr192");
677MODULE_ALIAS_CRYPTO("tgr160"); 677MODULE_ALIAS_CRYPTO("tgr160");
678MODULE_ALIAS_CRYPTO("tgr128"); 678MODULE_ALIAS_CRYPTO("tgr128");
679 679
680module_init(tgr192_mod_init); 680subsys_initcall(tgr192_mod_init);
681module_exit(tgr192_mod_fini); 681module_exit(tgr192_mod_fini);
682 682
683MODULE_LICENSE("GPL"); 683MODULE_LICENSE("GPL");
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 07e62433fbfb..dbac6e233285 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -205,7 +205,7 @@ static void __exit twofish_mod_fini(void)
205 crypto_unregister_alg(&alg); 205 crypto_unregister_alg(&alg);
206} 206}
207 207
208module_init(twofish_mod_init); 208subsys_initcall(twofish_mod_init);
209module_exit(twofish_mod_fini); 209module_exit(twofish_mod_fini);
210 210
211MODULE_LICENSE("GPL"); 211MODULE_LICENSE("GPL");
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 5f436dfdfc61..f50a85060b39 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -690,7 +690,7 @@ static void __exit vmac_module_exit(void)
690 crypto_unregister_template(&vmac64_tmpl); 690 crypto_unregister_template(&vmac64_tmpl);
691} 691}
692 692
693module_init(vmac_module_init); 693subsys_initcall(vmac_module_init);
694module_exit(vmac_module_exit); 694module_exit(vmac_module_exit);
695 695
696MODULE_LICENSE("GPL"); 696MODULE_LICENSE("GPL");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 149e577fb772..1b8e502d999f 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1168,7 +1168,7 @@ MODULE_ALIAS_CRYPTO("wp512");
1168MODULE_ALIAS_CRYPTO("wp384"); 1168MODULE_ALIAS_CRYPTO("wp384");
1169MODULE_ALIAS_CRYPTO("wp256"); 1169MODULE_ALIAS_CRYPTO("wp256");
1170 1170
1171module_init(wp512_mod_init); 1171subsys_initcall(wp512_mod_init);
1172module_exit(wp512_mod_fini); 1172module_exit(wp512_mod_fini);
1173 1173
1174MODULE_LICENSE("GPL"); 1174MODULE_LICENSE("GPL");
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index c055f57fab11..94ca694ef091 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -282,7 +282,7 @@ static void __exit crypto_xcbc_module_exit(void)
282 crypto_unregister_template(&crypto_xcbc_tmpl); 282 crypto_unregister_template(&crypto_xcbc_tmpl);
283} 283}
284 284
285module_init(crypto_xcbc_module_init); 285subsys_initcall(crypto_xcbc_module_init);
286module_exit(crypto_xcbc_module_exit); 286module_exit(crypto_xcbc_module_exit);
287 287
288MODULE_LICENSE("GPL"); 288MODULE_LICENSE("GPL");
diff --git a/crypto/xts.c b/crypto/xts.c
index 2f948328cabb..33cf726df4ac 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -363,7 +363,7 @@ static void __exit crypto_module_exit(void)
363 crypto_unregister_template(&crypto_tmpl); 363 crypto_unregister_template(&crypto_tmpl);
364} 364}
365 365
366module_init(crypto_module_init); 366subsys_initcall(crypto_module_init);
367module_exit(crypto_module_exit); 367module_exit(crypto_module_exit);
368 368
369MODULE_LICENSE("GPL"); 369MODULE_LICENSE("GPL");
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 9a76b3ed8b8b..2c04055e407f 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -257,7 +257,7 @@ static void __exit zstd_mod_fini(void)
257 crypto_unregister_scomp(&scomp); 257 crypto_unregister_scomp(&scomp);
258} 258}
259 259
260module_init(zstd_mod_init); 260subsys_initcall(zstd_mod_init);
261module_exit(zstd_mod_fini); 261module_exit(zstd_mod_fini);
262 262
263MODULE_LICENSE("GPL"); 263MODULE_LICENSE("GPL");
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c7ad88d91a09..843a9b9b3d74 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5443,7 +5443,6 @@ static int drbd_do_auth(struct drbd_connection *connection)
5443 rcu_read_unlock(); 5443 rcu_read_unlock();
5444 5444
5445 desc->tfm = connection->cram_hmac_tfm; 5445 desc->tfm = connection->cram_hmac_tfm;
5446 desc->flags = 0;
5447 5446
5448 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); 5447 rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
5449 if (rv) { 5448 if (rv) {
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 268ef0c5d4ab..6781bcf3ec26 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -304,7 +304,6 @@ void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req,
304 void *src; 304 void *src;
305 305
306 desc->tfm = tfm; 306 desc->tfm = tfm;
307 desc->flags = 0;
308 307
309 crypto_shash_init(desc); 308 crypto_shash_init(desc);
310 309
@@ -332,7 +331,6 @@ void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
332 struct bvec_iter iter; 331 struct bvec_iter iter;
333 332
334 desc->tfm = tfm; 333 desc->tfm = tfm;
335 desc->flags = 0;
336 334
337 crypto_shash_init(desc); 335 crypto_shash_init(desc);
338 336
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b65ff6962899..e9b6ac61fb7f 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
443 priv->rng.read = omap_rng_do_read; 443 priv->rng.read = omap_rng_do_read;
444 priv->rng.init = omap_rng_init; 444 priv->rng.init = omap_rng_init;
445 priv->rng.cleanup = omap_rng_cleanup; 445 priv->rng.cleanup = omap_rng_cleanup;
446 priv->rng.quality = 900;
446 447
447 priv->rng.priv = (unsigned long)priv; 448 priv->rng.priv = (unsigned long)priv;
448 platform_set_drvdata(pdev, priv); 449 platform_set_drvdata(pdev, priv);
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 042860d97b15..0ef5b6a3f560 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -161,6 +161,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
161#endif 161#endif
162 priv->rng.read = stm32_rng_read, 162 priv->rng.read = stm32_rng_read,
163 priv->rng.priv = (unsigned long) dev; 163 priv->rng.priv = (unsigned long) dev;
164 priv->rng.quality = 900;
164 165
165 pm_runtime_set_autosuspend_delay(dev, 100); 166 pm_runtime_set_autosuspend_delay(dev, 100);
166 pm_runtime_use_autosuspend(dev); 167 pm_runtime_use_autosuspend(dev);
@@ -169,6 +170,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
169 return devm_hwrng_register(dev, &priv->rng); 170 return devm_hwrng_register(dev, &priv->rng);
170} 171}
171 172
173static int stm32_rng_remove(struct platform_device *ofdev)
174{
175 pm_runtime_disable(&ofdev->dev);
176
177 return 0;
178}
179
172#ifdef CONFIG_PM 180#ifdef CONFIG_PM
173static int stm32_rng_runtime_suspend(struct device *dev) 181static int stm32_rng_runtime_suspend(struct device *dev)
174{ 182{
@@ -210,6 +218,7 @@ static struct platform_driver stm32_rng_driver = {
210 .of_match_table = stm32_rng_match, 218 .of_match_table = stm32_rng_match,
211 }, 219 },
212 .probe = stm32_rng_probe, 220 .probe = stm32_rng_probe,
221 .remove = stm32_rng_remove,
213}; 222};
214 223
215module_platform_driver(stm32_rng_driver); 224module_platform_driver(stm32_rng_driver);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0be55fcc19ba..177b7713bd2d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -404,15 +404,6 @@ config CRYPTO_DEV_SAHARA
404 This option enables support for the SAHARA HW crypto accelerator 404 This option enables support for the SAHARA HW crypto accelerator
405 found in some Freescale i.MX chips. 405 found in some Freescale i.MX chips.
406 406
407config CRYPTO_DEV_MXC_SCC
408 tristate "Support for Freescale Security Controller (SCC)"
409 depends on ARCH_MXC && OF
410 select CRYPTO_BLKCIPHER
411 select CRYPTO_DES
412 help
413 This option enables support for the Security Controller (SCC)
414 found in Freescale i.MX25 chips.
415
416config CRYPTO_DEV_EXYNOS_RNG 407config CRYPTO_DEV_EXYNOS_RNG
417 tristate "EXYNOS HW pseudo random number generator support" 408 tristate "EXYNOS HW pseudo random number generator support"
418 depends on ARCH_EXYNOS || COMPILE_TEST 409 depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 8e7e225d2446..a23a7197fcd7 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
18obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/ 18obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
19obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/ 19obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
20obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o 20obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
21obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
22obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o 21obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
23n2_crypto-y := n2_core.o n2_asm.o 22n2_crypto-y := n2_core.o n2_asm.o
24obj-$(CONFIG_CRYPTO_DEV_NX) += nx/ 23obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4092c2aad8e2..307f5cfa9ba4 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
141 /* Setup SA */ 141 /* Setup SA */
142 sa = ctx->sa_in; 142 sa = ctx->sa_in;
143 143
144 set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ? 144 set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
145 SA_SAVE_IV : SA_NOT_SAVE_IV), 145 SA_NOT_SAVE_IV : SA_SAVE_IV),
146 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, 146 SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
147 SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
147 SA_NO_HEADER_PROC, SA_HASH_ALG_NULL, 148 SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
148 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, 149 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
149 SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT, 150 SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
@@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
162 memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); 163 memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
163 sa = ctx->sa_out; 164 sa = ctx->sa_out;
164 sa->sa_command_0.bf.dir = DIR_OUTBOUND; 165 sa->sa_command_0.bf.dir = DIR_OUTBOUND;
166 /*
167 * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
168 * it's the DIR_(IN|OUT)BOUND that matters
169 */
170 sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
165 171
166 return 0; 172 return 0;
167} 173}
@@ -258,10 +264,10 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
258 * overlow. 264 * overlow.
259 */ 265 */
260 if (counter + nblks < counter) { 266 if (counter + nblks < counter) {
261 struct skcipher_request *subreq = skcipher_request_ctx(req); 267 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher.cipher);
262 int ret; 268 int ret;
263 269
264 skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher); 270 skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher.cipher);
265 skcipher_request_set_callback(subreq, req->base.flags, 271 skcipher_request_set_callback(subreq, req->base.flags,
266 NULL, NULL); 272 NULL, NULL);
267 skcipher_request_set_crypt(subreq, req->src, req->dst, 273 skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -283,14 +289,14 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
283{ 289{
284 int rc; 290 int rc;
285 291
286 crypto_skcipher_clear_flags(ctx->sw_cipher.cipher, 292 crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
287 CRYPTO_TFM_REQ_MASK); 293 CRYPTO_TFM_REQ_MASK);
288 crypto_skcipher_set_flags(ctx->sw_cipher.cipher, 294 crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
289 crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK); 295 crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
290 rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen); 296 rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
291 crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK); 297 crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
292 crypto_skcipher_set_flags(cipher, 298 crypto_skcipher_set_flags(cipher,
293 crypto_skcipher_get_flags(ctx->sw_cipher.cipher) & 299 crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
294 CRYPTO_TFM_RES_MASK); 300 CRYPTO_TFM_RES_MASK);
295 301
296 return rc; 302 return rc;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 06574a884715..3934c2523762 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -539,7 +539,7 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
539 539
540 req = skcipher_request_cast(pd_uinfo->async_req); 540 req = skcipher_request_cast(pd_uinfo->async_req);
541 541
542 if (pd_uinfo->using_sd) { 542 if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
543 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, 543 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
544 req->cryptlen, req->dst); 544 req->cryptlen, req->dst);
545 } else { 545 } else {
@@ -593,7 +593,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
593 u32 icv[AES_BLOCK_SIZE]; 593 u32 icv[AES_BLOCK_SIZE];
594 int err = 0; 594 int err = 0;
595 595
596 if (pd_uinfo->using_sd) { 596 if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
597 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, 597 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
598 pd->pd_ctl_len.bf.pkt_len, 598 pd->pd_ctl_len.bf.pkt_len,
599 dst); 599 dst);
@@ -714,7 +714,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
714 size_t offset_to_sr_ptr; 714 size_t offset_to_sr_ptr;
715 u32 gd_idx = 0; 715 u32 gd_idx = 0;
716 int tmp; 716 int tmp;
717 bool is_busy; 717 bool is_busy, force_sd;
718
719 /*
720 * There's a very subtile/disguised "bug" in the hardware that
721 * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
722 * of the hardware spec:
723 * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
724 * operation modes for >>> "Block ciphers" <<<.
725 *
726 * To workaround this issue and stop the hardware from causing
727 * "overran dst buffer" on crypttexts that are not a multiple
728 * of 16 (AES_BLOCK_SIZE), we force the driver to use the
729 * scatter buffers.
730 */
731 force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
732 || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
733 && (datalen % AES_BLOCK_SIZE);
718 734
719 /* figure how many gd are needed */ 735 /* figure how many gd are needed */
720 tmp = sg_nents_for_len(src, assoclen + datalen); 736 tmp = sg_nents_for_len(src, assoclen + datalen);
@@ -732,7 +748,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
732 } 748 }
733 749
734 /* figure how many sd are needed */ 750 /* figure how many sd are needed */
735 if (sg_is_last(dst)) { 751 if (sg_is_last(dst) && force_sd == false) {
736 num_sd = 0; 752 num_sd = 0;
737 } else { 753 } else {
738 if (datalen > PPC4XX_SD_BUFFER_SIZE) { 754 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -807,9 +823,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
807 pd->sa_len = sa_len; 823 pd->sa_len = sa_len;
808 824
809 pd_uinfo = &dev->pdr_uinfo[pd_entry]; 825 pd_uinfo = &dev->pdr_uinfo[pd_entry];
810 pd_uinfo->async_req = req;
811 pd_uinfo->num_gd = num_gd; 826 pd_uinfo->num_gd = num_gd;
812 pd_uinfo->num_sd = num_sd; 827 pd_uinfo->num_sd = num_sd;
828 pd_uinfo->dest_va = dst;
829 pd_uinfo->async_req = req;
813 830
814 if (iv_len) 831 if (iv_len)
815 memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len); 832 memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
@@ -828,7 +845,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
828 /* get first gd we are going to use */ 845 /* get first gd we are going to use */
829 gd_idx = fst_gd; 846 gd_idx = fst_gd;
830 pd_uinfo->first_gd = fst_gd; 847 pd_uinfo->first_gd = fst_gd;
831 pd_uinfo->num_gd = num_gd;
832 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); 848 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
833 pd->src = gd_dma; 849 pd->src = gd_dma;
834 /* enable gather */ 850 /* enable gather */
@@ -865,17 +881,13 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
865 * Indicate gather array is not used 881 * Indicate gather array is not used
866 */ 882 */
867 pd_uinfo->first_gd = 0xffffffff; 883 pd_uinfo->first_gd = 0xffffffff;
868 pd_uinfo->num_gd = 0;
869 } 884 }
870 if (sg_is_last(dst)) { 885 if (!num_sd) {
871 /* 886 /*
872 * we know application give us dst a whole piece of memory 887 * we know application give us dst a whole piece of memory
873 * no need to use scatter ring. 888 * no need to use scatter ring.
874 */ 889 */
875 pd_uinfo->using_sd = 0;
876 pd_uinfo->first_sd = 0xffffffff; 890 pd_uinfo->first_sd = 0xffffffff;
877 pd_uinfo->num_sd = 0;
878 pd_uinfo->dest_va = dst;
879 sa->sa_command_0.bf.scatter = 0; 891 sa->sa_command_0.bf.scatter = 0;
880 pd->dest = (u32)dma_map_page(dev->core_dev->device, 892 pd->dest = (u32)dma_map_page(dev->core_dev->device,
881 sg_page(dst), dst->offset, 893 sg_page(dst), dst->offset,
@@ -888,10 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
888 u32 sd_idx = fst_sd; 900 u32 sd_idx = fst_sd;
889 nbytes = datalen; 901 nbytes = datalen;
890 sa->sa_command_0.bf.scatter = 1; 902 sa->sa_command_0.bf.scatter = 1;
891 pd_uinfo->using_sd = 1;
892 pd_uinfo->dest_va = dst;
893 pd_uinfo->first_sd = fst_sd; 903 pd_uinfo->first_sd = fst_sd;
894 pd_uinfo->num_sd = num_sd;
895 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); 904 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
896 pd->dest = sd_dma; 905 pd->dest = sd_dma;
897 /* setup scatter descriptor */ 906 /* setup scatter descriptor */
@@ -954,15 +963,10 @@ static int crypto4xx_sk_init(struct crypto_skcipher *sk)
954 963
955 if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) { 964 if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
956 ctx->sw_cipher.cipher = 965 ctx->sw_cipher.cipher =
957 crypto_alloc_skcipher(alg->base.cra_name, 0, 966 crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
958 CRYPTO_ALG_NEED_FALLBACK | 967 CRYPTO_ALG_NEED_FALLBACK);
959 CRYPTO_ALG_ASYNC);
960 if (IS_ERR(ctx->sw_cipher.cipher)) 968 if (IS_ERR(ctx->sw_cipher.cipher))
961 return PTR_ERR(ctx->sw_cipher.cipher); 969 return PTR_ERR(ctx->sw_cipher.cipher);
962
963 crypto_skcipher_set_reqsize(sk,
964 sizeof(struct skcipher_request) + 32 +
965 crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
966 } 970 }
967 971
968 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher); 972 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
@@ -981,7 +985,7 @@ static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
981 985
982 crypto4xx_common_exit(ctx); 986 crypto4xx_common_exit(ctx);
983 if (ctx->sw_cipher.cipher) 987 if (ctx->sw_cipher.cipher)
984 crypto_free_skcipher(ctx->sw_cipher.cipher); 988 crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
985} 989}
986 990
987static int crypto4xx_aead_init(struct crypto_aead *tfm) 991static int crypto4xx_aead_init(struct crypto_aead *tfm)
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 18df695ca6b1..c624f8cd3d2e 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -64,7 +64,6 @@ union shadow_sa_buf {
64struct pd_uinfo { 64struct pd_uinfo {
65 struct crypto4xx_device *dev; 65 struct crypto4xx_device *dev;
66 u32 state; 66 u32 state;
67 u32 using_sd;
68 u32 first_gd; /* first gather discriptor 67 u32 first_gd; /* first gather discriptor
69 used by this packet */ 68 used by this packet */
70 u32 num_gd; /* number of gather discriptor 69 u32 num_gd; /* number of gather discriptor
@@ -131,7 +130,7 @@ struct crypto4xx_ctx {
131 __le32 iv_nonce; 130 __le32 iv_nonce;
132 u32 sa_len; 131 u32 sa_len;
133 union { 132 union {
134 struct crypto_skcipher *cipher; 133 struct crypto_sync_skcipher *cipher;
135 struct crypto_aead *aead; 134 struct crypto_aead *aead;
136 } sw_cipher; 135 } sw_cipher;
137}; 136};
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 65bf1a299562..fa76620281e8 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -800,20 +800,14 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
800 unsigned int keylen) 800 unsigned int keylen)
801{ 801{
802 struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 802 struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
803 const char *alg_name; 803 u32 flags;
804 804 int err;
805 alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
806 805
807 /* 806 flags = crypto_ablkcipher_get_flags(tfm);
808 * HW bug in cfb 3-keys mode. 807 err = __des3_verify_key(&flags, key);
809 */ 808 if (unlikely(err)) {
810 if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb") 809 crypto_ablkcipher_set_flags(tfm, flags);
811 && (keylen != 2*DES_KEY_SIZE)) { 810 return err;
812 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
813 return -EINVAL;
814 } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
815 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
816 return -EINVAL;
817 } 811 }
818 812
819 memcpy(ctx->key, key, keylen); 813 memcpy(ctx->key, key, keylen);
@@ -1060,7 +1054,7 @@ static struct crypto_alg tdes_algs[] = {
1060 .cra_module = THIS_MODULE, 1054 .cra_module = THIS_MODULE,
1061 .cra_init = atmel_tdes_cra_init, 1055 .cra_init = atmel_tdes_cra_init,
1062 .cra_u.ablkcipher = { 1056 .cra_u.ablkcipher = {
1063 .min_keysize = 2 * DES_KEY_SIZE, 1057 .min_keysize = 3 * DES_KEY_SIZE,
1064 .max_keysize = 3 * DES_KEY_SIZE, 1058 .max_keysize = 3 * DES_KEY_SIZE,
1065 .setkey = atmel_tdes_setkey, 1059 .setkey = atmel_tdes_setkey,
1066 .encrypt = atmel_tdes_ecb_encrypt, 1060 .encrypt = atmel_tdes_ecb_encrypt,
@@ -1079,7 +1073,7 @@ static struct crypto_alg tdes_algs[] = {
1079 .cra_module = THIS_MODULE, 1073 .cra_module = THIS_MODULE,
1080 .cra_init = atmel_tdes_cra_init, 1074 .cra_init = atmel_tdes_cra_init,
1081 .cra_u.ablkcipher = { 1075 .cra_u.ablkcipher = {
1082 .min_keysize = 2*DES_KEY_SIZE, 1076 .min_keysize = 3*DES_KEY_SIZE,
1083 .max_keysize = 3*DES_KEY_SIZE, 1077 .max_keysize = 3*DES_KEY_SIZE,
1084 .ivsize = DES_BLOCK_SIZE, 1078 .ivsize = DES_BLOCK_SIZE,
1085 .setkey = atmel_tdes_setkey, 1079 .setkey = atmel_tdes_setkey,
@@ -1088,86 +1082,6 @@ static struct crypto_alg tdes_algs[] = {
1088 } 1082 }
1089}, 1083},
1090{ 1084{
1091 .cra_name = "cfb(des3_ede)",
1092 .cra_driver_name = "atmel-cfb-tdes",
1093 .cra_priority = 100,
1094 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1095 .cra_blocksize = DES_BLOCK_SIZE,
1096 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1097 .cra_alignmask = 0x7,
1098 .cra_type = &crypto_ablkcipher_type,
1099 .cra_module = THIS_MODULE,
1100 .cra_init = atmel_tdes_cra_init,
1101 .cra_u.ablkcipher = {
1102 .min_keysize = 2*DES_KEY_SIZE,
1103 .max_keysize = 2*DES_KEY_SIZE,
1104 .ivsize = DES_BLOCK_SIZE,
1105 .setkey = atmel_tdes_setkey,
1106 .encrypt = atmel_tdes_cfb_encrypt,
1107 .decrypt = atmel_tdes_cfb_decrypt,
1108 }
1109},
1110{
1111 .cra_name = "cfb8(des3_ede)",
1112 .cra_driver_name = "atmel-cfb8-tdes",
1113 .cra_priority = 100,
1114 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1115 .cra_blocksize = CFB8_BLOCK_SIZE,
1116 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1117 .cra_alignmask = 0,
1118 .cra_type = &crypto_ablkcipher_type,
1119 .cra_module = THIS_MODULE,
1120 .cra_init = atmel_tdes_cra_init,
1121 .cra_u.ablkcipher = {
1122 .min_keysize = 2*DES_KEY_SIZE,
1123 .max_keysize = 2*DES_KEY_SIZE,
1124 .ivsize = DES_BLOCK_SIZE,
1125 .setkey = atmel_tdes_setkey,
1126 .encrypt = atmel_tdes_cfb8_encrypt,
1127 .decrypt = atmel_tdes_cfb8_decrypt,
1128 }
1129},
1130{
1131 .cra_name = "cfb16(des3_ede)",
1132 .cra_driver_name = "atmel-cfb16-tdes",
1133 .cra_priority = 100,
1134 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1135 .cra_blocksize = CFB16_BLOCK_SIZE,
1136 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1137 .cra_alignmask = 0x1,
1138 .cra_type = &crypto_ablkcipher_type,
1139 .cra_module = THIS_MODULE,
1140 .cra_init = atmel_tdes_cra_init,
1141 .cra_u.ablkcipher = {
1142 .min_keysize = 2*DES_KEY_SIZE,
1143 .max_keysize = 2*DES_KEY_SIZE,
1144 .ivsize = DES_BLOCK_SIZE,
1145 .setkey = atmel_tdes_setkey,
1146 .encrypt = atmel_tdes_cfb16_encrypt,
1147 .decrypt = atmel_tdes_cfb16_decrypt,
1148 }
1149},
1150{
1151 .cra_name = "cfb32(des3_ede)",
1152 .cra_driver_name = "atmel-cfb32-tdes",
1153 .cra_priority = 100,
1154 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1155 .cra_blocksize = CFB32_BLOCK_SIZE,
1156 .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1157 .cra_alignmask = 0x3,
1158 .cra_type = &crypto_ablkcipher_type,
1159 .cra_module = THIS_MODULE,
1160 .cra_init = atmel_tdes_cra_init,
1161 .cra_u.ablkcipher = {
1162 .min_keysize = 2*DES_KEY_SIZE,
1163 .max_keysize = 2*DES_KEY_SIZE,
1164 .ivsize = DES_BLOCK_SIZE,
1165 .setkey = atmel_tdes_setkey,
1166 .encrypt = atmel_tdes_cfb32_encrypt,
1167 .decrypt = atmel_tdes_cfb32_decrypt,
1168 }
1169},
1170{
1171 .cra_name = "ofb(des3_ede)", 1085 .cra_name = "ofb(des3_ede)",
1172 .cra_driver_name = "atmel-ofb-tdes", 1086 .cra_driver_name = "atmel-ofb-tdes",
1173 .cra_priority = 100, 1087 .cra_priority = 100,
@@ -1179,7 +1093,7 @@ static struct crypto_alg tdes_algs[] = {
1179 .cra_module = THIS_MODULE, 1093 .cra_module = THIS_MODULE,
1180 .cra_init = atmel_tdes_cra_init, 1094 .cra_init = atmel_tdes_cra_init,
1181 .cra_u.ablkcipher = { 1095 .cra_u.ablkcipher = {
1182 .min_keysize = 2*DES_KEY_SIZE, 1096 .min_keysize = 3*DES_KEY_SIZE,
1183 .max_keysize = 3*DES_KEY_SIZE, 1097 .max_keysize = 3*DES_KEY_SIZE,
1184 .ivsize = DES_BLOCK_SIZE, 1098 .ivsize = DES_BLOCK_SIZE,
1185 .setkey = atmel_tdes_setkey, 1099 .setkey = atmel_tdes_setkey,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 57e5dca3253f..d2fb72811442 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2247,8 +2247,6 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2247 SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash); 2247 SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
2248 2248
2249 hdesc->tfm = tfm_ctx->child_hash; 2249 hdesc->tfm = tfm_ctx->child_hash;
2250 hdesc->flags = crypto_ahash_get_flags(tfm) &
2251 CRYPTO_TFM_REQ_MAY_SLEEP;
2252 2250
2253 tfm_ctx->hmac_key_length = blocksize; 2251 tfm_ctx->hmac_key_length = blocksize;
2254 ret = crypto_shash_digest(hdesc, key, keylen, 2252 ret = crypto_shash_digest(hdesc, key, keylen,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 28f592f7e1b7..25f8d3913ceb 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1840,13 +1840,14 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1840 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1840 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1841 1841
1842 if (keylen == (DES_KEY_SIZE * 3)) { 1842 if (keylen == (DES_KEY_SIZE * 3)) {
1843 const u32 *K = (const u32 *)key; 1843 u32 flags;
1844 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 1844 int ret;
1845 1845
1846 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 1846 flags = crypto_ablkcipher_get_flags(cipher);
1847 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) { 1847 ret = __des3_verify_key(&flags, key);
1848 if (unlikely(ret)) {
1848 crypto_ablkcipher_set_flags(cipher, flags); 1849 crypto_ablkcipher_set_flags(cipher, flags);
1849 return -EINVAL; 1850 return ret;
1850 } 1851 }
1851 1852
1852 ctx->cipher_type = CIPHER_TYPE_3DES; 1853 ctx->cipher_type = CIPHER_TYPE_3DES;
@@ -2139,7 +2140,6 @@ static int ahash_init(struct ahash_request *req)
2139 goto err_hash; 2140 goto err_hash;
2140 } 2141 }
2141 ctx->shash->tfm = hash; 2142 ctx->shash->tfm = hash;
2142 ctx->shash->flags = 0;
2143 2143
2144 /* Set the key using data we already have from setkey */ 2144 /* Set the key using data we already have from setkey */
2145 if (ctx->authkeylen > 0) { 2145 if (ctx->authkeylen > 0) {
@@ -2885,13 +2885,13 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2885 break; 2885 break;
2886 case CIPHER_ALG_3DES: 2886 case CIPHER_ALG_3DES:
2887 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { 2887 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2888 const u32 *K = (const u32 *)keys.enckey; 2888 u32 flags;
2889 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
2890 2889
2891 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 2890 flags = crypto_aead_get_flags(cipher);
2892 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) { 2891 ret = __des3_verify_key(&flags, keys.enckey);
2892 if (unlikely(ret)) {
2893 crypto_aead_set_flags(cipher, flags); 2893 crypto_aead_set_flags(cipher, flags);
2894 return -EINVAL; 2894 return ret;
2895 } 2895 }
2896 2896
2897 ctx->cipher_type = CIPHER_TYPE_3DES; 2897 ctx->cipher_type = CIPHER_TYPE_3DES;
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index dbb5c03dde49..2baf6d7f2c1d 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -22,9 +22,6 @@
22#include "spum.h" 22#include "spum.h"
23#include "cipher.h" 23#include "cipher.h"
24 24
25/* This array is based on the hash algo type supported in spu.h */
26char *tag_to_hash_idx[] = { "none", "md5", "sha1", "sha224", "sha256" };
27
28char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes", 25char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
29 "sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" }; 26 "sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
30 27
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index d8cda5fb75ad..91ec56399d84 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -242,7 +242,6 @@ int do_shash(unsigned char *name, unsigned char *result,
242 goto do_shash_err; 242 goto do_shash_err;
243 } 243 }
244 sdesc->shash.tfm = hash; 244 sdesc->shash.tfm = hash;
245 sdesc->shash.flags = 0x0;
246 245
247 if (key_len > 0) { 246 if (key_len > 0) {
248 rc = crypto_shash_setkey(hash, key, key_len); 247 rc = crypto_shash_setkey(hash, key, key_len);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 579578498deb..3e23d4b2cce2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -638,6 +638,39 @@ badkey:
638 return -EINVAL; 638 return -EINVAL;
639} 639}
640 640
641static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
642 unsigned int keylen)
643{
644 struct crypto_authenc_keys keys;
645 u32 flags;
646 int err;
647
648 err = crypto_authenc_extractkeys(&keys, key, keylen);
649 if (unlikely(err))
650 goto badkey;
651
652 err = -EINVAL;
653 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
654 goto badkey;
655
656 flags = crypto_aead_get_flags(aead);
657 err = __des3_verify_key(&flags, keys.enckey);
658 if (unlikely(err)) {
659 crypto_aead_set_flags(aead, flags);
660 goto out;
661 }
662
663 err = aead_setkey(aead, key, keylen);
664
665out:
666 memzero_explicit(&keys, sizeof(keys));
667 return err;
668
669badkey:
670 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
671 goto out;
672}
673
641static int gcm_setkey(struct crypto_aead *aead, 674static int gcm_setkey(struct crypto_aead *aead,
642 const u8 *key, unsigned int keylen) 675 const u8 *key, unsigned int keylen)
643{ 676{
@@ -2457,7 +2490,7 @@ static struct caam_aead_alg driver_aeads[] = {
2457 "cbc-des3_ede-caam", 2490 "cbc-des3_ede-caam",
2458 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2491 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2459 }, 2492 },
2460 .setkey = aead_setkey, 2493 .setkey = des3_aead_setkey,
2461 .setauthsize = aead_setauthsize, 2494 .setauthsize = aead_setauthsize,
2462 .encrypt = aead_encrypt, 2495 .encrypt = aead_encrypt,
2463 .decrypt = aead_decrypt, 2496 .decrypt = aead_decrypt,
@@ -2479,7 +2512,7 @@ static struct caam_aead_alg driver_aeads[] = {
2479 "cbc-des3_ede-caam", 2512 "cbc-des3_ede-caam",
2480 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2513 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2481 }, 2514 },
2482 .setkey = aead_setkey, 2515 .setkey = des3_aead_setkey,
2483 .setauthsize = aead_setauthsize, 2516 .setauthsize = aead_setauthsize,
2484 .encrypt = aead_encrypt, 2517 .encrypt = aead_encrypt,
2485 .decrypt = aead_decrypt, 2518 .decrypt = aead_decrypt,
@@ -2502,7 +2535,7 @@ static struct caam_aead_alg driver_aeads[] = {
2502 "cbc-des3_ede-caam", 2535 "cbc-des3_ede-caam",
2503 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2536 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2504 }, 2537 },
2505 .setkey = aead_setkey, 2538 .setkey = des3_aead_setkey,
2506 .setauthsize = aead_setauthsize, 2539 .setauthsize = aead_setauthsize,
2507 .encrypt = aead_encrypt, 2540 .encrypt = aead_encrypt,
2508 .decrypt = aead_decrypt, 2541 .decrypt = aead_decrypt,
@@ -2525,7 +2558,7 @@ static struct caam_aead_alg driver_aeads[] = {
2525 "cbc-des3_ede-caam", 2558 "cbc-des3_ede-caam",
2526 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2559 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2527 }, 2560 },
2528 .setkey = aead_setkey, 2561 .setkey = des3_aead_setkey,
2529 .setauthsize = aead_setauthsize, 2562 .setauthsize = aead_setauthsize,
2530 .encrypt = aead_encrypt, 2563 .encrypt = aead_encrypt,
2531 .decrypt = aead_decrypt, 2564 .decrypt = aead_decrypt,
@@ -2548,7 +2581,7 @@ static struct caam_aead_alg driver_aeads[] = {
2548 "cbc-des3_ede-caam", 2581 "cbc-des3_ede-caam",
2549 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2582 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2550 }, 2583 },
2551 .setkey = aead_setkey, 2584 .setkey = des3_aead_setkey,
2552 .setauthsize = aead_setauthsize, 2585 .setauthsize = aead_setauthsize,
2553 .encrypt = aead_encrypt, 2586 .encrypt = aead_encrypt,
2554 .decrypt = aead_decrypt, 2587 .decrypt = aead_decrypt,
@@ -2571,7 +2604,7 @@ static struct caam_aead_alg driver_aeads[] = {
2571 "cbc-des3_ede-caam", 2604 "cbc-des3_ede-caam",
2572 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2605 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2573 }, 2606 },
2574 .setkey = aead_setkey, 2607 .setkey = des3_aead_setkey,
2575 .setauthsize = aead_setauthsize, 2608 .setauthsize = aead_setauthsize,
2576 .encrypt = aead_encrypt, 2609 .encrypt = aead_encrypt,
2577 .decrypt = aead_decrypt, 2610 .decrypt = aead_decrypt,
@@ -2594,7 +2627,7 @@ static struct caam_aead_alg driver_aeads[] = {
2594 "cbc-des3_ede-caam", 2627 "cbc-des3_ede-caam",
2595 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2628 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2596 }, 2629 },
2597 .setkey = aead_setkey, 2630 .setkey = des3_aead_setkey,
2598 .setauthsize = aead_setauthsize, 2631 .setauthsize = aead_setauthsize,
2599 .encrypt = aead_encrypt, 2632 .encrypt = aead_encrypt,
2600 .decrypt = aead_decrypt, 2633 .decrypt = aead_decrypt,
@@ -2617,7 +2650,7 @@ static struct caam_aead_alg driver_aeads[] = {
2617 "cbc-des3_ede-caam", 2650 "cbc-des3_ede-caam",
2618 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2651 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2619 }, 2652 },
2620 .setkey = aead_setkey, 2653 .setkey = des3_aead_setkey,
2621 .setauthsize = aead_setauthsize, 2654 .setauthsize = aead_setauthsize,
2622 .encrypt = aead_encrypt, 2655 .encrypt = aead_encrypt,
2623 .decrypt = aead_decrypt, 2656 .decrypt = aead_decrypt,
@@ -2640,7 +2673,7 @@ static struct caam_aead_alg driver_aeads[] = {
2640 "cbc-des3_ede-caam", 2673 "cbc-des3_ede-caam",
2641 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2674 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2642 }, 2675 },
2643 .setkey = aead_setkey, 2676 .setkey = des3_aead_setkey,
2644 .setauthsize = aead_setauthsize, 2677 .setauthsize = aead_setauthsize,
2645 .encrypt = aead_encrypt, 2678 .encrypt = aead_encrypt,
2646 .decrypt = aead_decrypt, 2679 .decrypt = aead_decrypt,
@@ -2663,7 +2696,7 @@ static struct caam_aead_alg driver_aeads[] = {
2663 "cbc-des3_ede-caam", 2696 "cbc-des3_ede-caam",
2664 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2697 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2665 }, 2698 },
2666 .setkey = aead_setkey, 2699 .setkey = des3_aead_setkey,
2667 .setauthsize = aead_setauthsize, 2700 .setauthsize = aead_setauthsize,
2668 .encrypt = aead_encrypt, 2701 .encrypt = aead_encrypt,
2669 .decrypt = aead_decrypt, 2702 .decrypt = aead_decrypt,
@@ -2686,7 +2719,7 @@ static struct caam_aead_alg driver_aeads[] = {
2686 "cbc-des3_ede-caam", 2719 "cbc-des3_ede-caam",
2687 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2720 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2688 }, 2721 },
2689 .setkey = aead_setkey, 2722 .setkey = des3_aead_setkey,
2690 .setauthsize = aead_setauthsize, 2723 .setauthsize = aead_setauthsize,
2691 .encrypt = aead_encrypt, 2724 .encrypt = aead_encrypt,
2692 .decrypt = aead_decrypt, 2725 .decrypt = aead_decrypt,
@@ -2709,7 +2742,7 @@ static struct caam_aead_alg driver_aeads[] = {
2709 "cbc-des3_ede-caam", 2742 "cbc-des3_ede-caam",
2710 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2743 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2711 }, 2744 },
2712 .setkey = aead_setkey, 2745 .setkey = des3_aead_setkey,
2713 .setauthsize = aead_setauthsize, 2746 .setauthsize = aead_setauthsize,
2714 .encrypt = aead_encrypt, 2747 .encrypt = aead_encrypt,
2715 .decrypt = aead_decrypt, 2748 .decrypt = aead_decrypt,
@@ -3460,7 +3493,7 @@ static int __init caam_algapi_init(void)
3460 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; 3493 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
3461 u32 arc4_inst; 3494 u32 arc4_inst;
3462 unsigned int md_limit = SHA512_DIGEST_SIZE; 3495 unsigned int md_limit = SHA512_DIGEST_SIZE;
3463 bool registered = false; 3496 bool registered = false, gcm_support;
3464 3497
3465 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 3498 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
3466 if (!dev_node) { 3499 if (!dev_node) {
@@ -3493,7 +3526,7 @@ static int __init caam_algapi_init(void)
3493 * First, detect presence and attributes of DES, AES, and MD blocks. 3526 * First, detect presence and attributes of DES, AES, and MD blocks.
3494 */ 3527 */
3495 if (priv->era < 10) { 3528 if (priv->era < 10) {
3496 u32 cha_vid, cha_inst; 3529 u32 cha_vid, cha_inst, aes_rn;
3497 3530
3498 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 3531 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
3499 aes_vid = cha_vid & CHA_ID_LS_AES_MASK; 3532 aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
@@ -3508,6 +3541,10 @@ static int __init caam_algapi_init(void)
3508 CHA_ID_LS_ARC4_SHIFT; 3541 CHA_ID_LS_ARC4_SHIFT;
3509 ccha_inst = 0; 3542 ccha_inst = 0;
3510 ptha_inst = 0; 3543 ptha_inst = 0;
3544
3545 aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
3546 CHA_ID_LS_AES_MASK;
3547 gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
3511 } else { 3548 } else {
3512 u32 aesa, mdha; 3549 u32 aesa, mdha;
3513 3550
@@ -3523,6 +3560,8 @@ static int __init caam_algapi_init(void)
3523 ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; 3560 ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
3524 ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; 3561 ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
3525 arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK; 3562 arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
3563
3564 gcm_support = aesa & CHA_VER_MISC_AES_GCM;
3526 } 3565 }
3527 3566
3528 /* If MD is present, limit digest size based on LP256 */ 3567 /* If MD is present, limit digest size based on LP256 */
@@ -3595,11 +3634,9 @@ static int __init caam_algapi_init(void)
3595 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst) 3634 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
3596 continue; 3635 continue;
3597 3636
3598 /* 3637 /* Skip GCM algorithms if not supported by device */
3599 * Check support for AES algorithms not available 3638 if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
3600 * on LP devices. 3639 alg_aai == OP_ALG_AAI_GCM && !gcm_support)
3601 */
3602 if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
3603 continue; 3640 continue;
3604 3641
3605 /* 3642 /*
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c61921d32489..70af211d2d01 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -292,6 +292,39 @@ badkey:
292 return -EINVAL; 292 return -EINVAL;
293} 293}
294 294
295static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
296 unsigned int keylen)
297{
298 struct crypto_authenc_keys keys;
299 u32 flags;
300 int err;
301
302 err = crypto_authenc_extractkeys(&keys, key, keylen);
303 if (unlikely(err))
304 goto badkey;
305
306 err = -EINVAL;
307 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
308 goto badkey;
309
310 flags = crypto_aead_get_flags(aead);
311 err = __des3_verify_key(&flags, keys.enckey);
312 if (unlikely(err)) {
313 crypto_aead_set_flags(aead, flags);
314 goto out;
315 }
316
317 err = aead_setkey(aead, key, keylen);
318
319out:
320 memzero_explicit(&keys, sizeof(keys));
321 return err;
322
323badkey:
324 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
325 goto out;
326}
327
295static int gcm_set_sh_desc(struct crypto_aead *aead) 328static int gcm_set_sh_desc(struct crypto_aead *aead)
296{ 329{
297 struct caam_ctx *ctx = crypto_aead_ctx(aead); 330 struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -667,6 +700,13 @@ badkey:
667 return -EINVAL; 700 return -EINVAL;
668} 701}
669 702
703static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
704 const u8 *key, unsigned int keylen)
705{
706 return unlikely(des3_verify_key(skcipher, key)) ?:
707 skcipher_setkey(skcipher, key, keylen);
708}
709
670static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 710static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
671 unsigned int keylen) 711 unsigned int keylen)
672{ 712{
@@ -1382,7 +1422,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1382 .cra_driver_name = "cbc-3des-caam-qi", 1422 .cra_driver_name = "cbc-3des-caam-qi",
1383 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1423 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1384 }, 1424 },
1385 .setkey = skcipher_setkey, 1425 .setkey = des3_skcipher_setkey,
1386 .encrypt = skcipher_encrypt, 1426 .encrypt = skcipher_encrypt,
1387 .decrypt = skcipher_decrypt, 1427 .decrypt = skcipher_decrypt,
1388 .min_keysize = DES3_EDE_KEY_SIZE, 1428 .min_keysize = DES3_EDE_KEY_SIZE,
@@ -1798,7 +1838,7 @@ static struct caam_aead_alg driver_aeads[] = {
1798 "cbc-des3_ede-caam-qi", 1838 "cbc-des3_ede-caam-qi",
1799 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1839 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1800 }, 1840 },
1801 .setkey = aead_setkey, 1841 .setkey = des3_aead_setkey,
1802 .setauthsize = aead_setauthsize, 1842 .setauthsize = aead_setauthsize,
1803 .encrypt = aead_encrypt, 1843 .encrypt = aead_encrypt,
1804 .decrypt = aead_decrypt, 1844 .decrypt = aead_decrypt,
@@ -1820,7 +1860,7 @@ static struct caam_aead_alg driver_aeads[] = {
1820 "cbc-des3_ede-caam-qi", 1860 "cbc-des3_ede-caam-qi",
1821 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1861 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1822 }, 1862 },
1823 .setkey = aead_setkey, 1863 .setkey = des3_aead_setkey,
1824 .setauthsize = aead_setauthsize, 1864 .setauthsize = aead_setauthsize,
1825 .encrypt = aead_encrypt, 1865 .encrypt = aead_encrypt,
1826 .decrypt = aead_decrypt, 1866 .decrypt = aead_decrypt,
@@ -1843,7 +1883,7 @@ static struct caam_aead_alg driver_aeads[] = {
1843 "cbc-des3_ede-caam-qi", 1883 "cbc-des3_ede-caam-qi",
1844 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1884 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1845 }, 1885 },
1846 .setkey = aead_setkey, 1886 .setkey = des3_aead_setkey,
1847 .setauthsize = aead_setauthsize, 1887 .setauthsize = aead_setauthsize,
1848 .encrypt = aead_encrypt, 1888 .encrypt = aead_encrypt,
1849 .decrypt = aead_decrypt, 1889 .decrypt = aead_decrypt,
@@ -1866,7 +1906,7 @@ static struct caam_aead_alg driver_aeads[] = {
1866 "cbc-des3_ede-caam-qi", 1906 "cbc-des3_ede-caam-qi",
1867 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1907 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1868 }, 1908 },
1869 .setkey = aead_setkey, 1909 .setkey = des3_aead_setkey,
1870 .setauthsize = aead_setauthsize, 1910 .setauthsize = aead_setauthsize,
1871 .encrypt = aead_encrypt, 1911 .encrypt = aead_encrypt,
1872 .decrypt = aead_decrypt, 1912 .decrypt = aead_decrypt,
@@ -1889,7 +1929,7 @@ static struct caam_aead_alg driver_aeads[] = {
1889 "cbc-des3_ede-caam-qi", 1929 "cbc-des3_ede-caam-qi",
1890 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1930 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1891 }, 1931 },
1892 .setkey = aead_setkey, 1932 .setkey = des3_aead_setkey,
1893 .setauthsize = aead_setauthsize, 1933 .setauthsize = aead_setauthsize,
1894 .encrypt = aead_encrypt, 1934 .encrypt = aead_encrypt,
1895 .decrypt = aead_decrypt, 1935 .decrypt = aead_decrypt,
@@ -1912,7 +1952,7 @@ static struct caam_aead_alg driver_aeads[] = {
1912 "cbc-des3_ede-caam-qi", 1952 "cbc-des3_ede-caam-qi",
1913 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1953 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1914 }, 1954 },
1915 .setkey = aead_setkey, 1955 .setkey = des3_aead_setkey,
1916 .setauthsize = aead_setauthsize, 1956 .setauthsize = aead_setauthsize,
1917 .encrypt = aead_encrypt, 1957 .encrypt = aead_encrypt,
1918 .decrypt = aead_decrypt, 1958 .decrypt = aead_decrypt,
@@ -1935,7 +1975,7 @@ static struct caam_aead_alg driver_aeads[] = {
1935 "cbc-des3_ede-caam-qi", 1975 "cbc-des3_ede-caam-qi",
1936 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1976 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1937 }, 1977 },
1938 .setkey = aead_setkey, 1978 .setkey = des3_aead_setkey,
1939 .setauthsize = aead_setauthsize, 1979 .setauthsize = aead_setauthsize,
1940 .encrypt = aead_encrypt, 1980 .encrypt = aead_encrypt,
1941 .decrypt = aead_decrypt, 1981 .decrypt = aead_decrypt,
@@ -1958,7 +1998,7 @@ static struct caam_aead_alg driver_aeads[] = {
1958 "cbc-des3_ede-caam-qi", 1998 "cbc-des3_ede-caam-qi",
1959 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1999 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1960 }, 2000 },
1961 .setkey = aead_setkey, 2001 .setkey = des3_aead_setkey,
1962 .setauthsize = aead_setauthsize, 2002 .setauthsize = aead_setauthsize,
1963 .encrypt = aead_encrypt, 2003 .encrypt = aead_encrypt,
1964 .decrypt = aead_decrypt, 2004 .decrypt = aead_decrypt,
@@ -1981,7 +2021,7 @@ static struct caam_aead_alg driver_aeads[] = {
1981 "cbc-des3_ede-caam-qi", 2021 "cbc-des3_ede-caam-qi",
1982 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2022 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1983 }, 2023 },
1984 .setkey = aead_setkey, 2024 .setkey = des3_aead_setkey,
1985 .setauthsize = aead_setauthsize, 2025 .setauthsize = aead_setauthsize,
1986 .encrypt = aead_encrypt, 2026 .encrypt = aead_encrypt,
1987 .decrypt = aead_decrypt, 2027 .decrypt = aead_decrypt,
@@ -2004,7 +2044,7 @@ static struct caam_aead_alg driver_aeads[] = {
2004 "cbc-des3_ede-caam-qi", 2044 "cbc-des3_ede-caam-qi",
2005 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2045 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2006 }, 2046 },
2007 .setkey = aead_setkey, 2047 .setkey = des3_aead_setkey,
2008 .setauthsize = aead_setauthsize, 2048 .setauthsize = aead_setauthsize,
2009 .encrypt = aead_encrypt, 2049 .encrypt = aead_encrypt,
2010 .decrypt = aead_decrypt, 2050 .decrypt = aead_decrypt,
@@ -2027,7 +2067,7 @@ static struct caam_aead_alg driver_aeads[] = {
2027 "cbc-des3_ede-caam-qi", 2067 "cbc-des3_ede-caam-qi",
2028 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2068 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2029 }, 2069 },
2030 .setkey = aead_setkey, 2070 .setkey = des3_aead_setkey,
2031 .setauthsize = aead_setauthsize, 2071 .setauthsize = aead_setauthsize,
2032 .encrypt = aead_encrypt, 2072 .encrypt = aead_encrypt,
2033 .decrypt = aead_decrypt, 2073 .decrypt = aead_decrypt,
@@ -2050,7 +2090,7 @@ static struct caam_aead_alg driver_aeads[] = {
2050 "cbc-des3_ede-caam-qi", 2090 "cbc-des3_ede-caam-qi",
2051 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2091 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2052 }, 2092 },
2053 .setkey = aead_setkey, 2093 .setkey = des3_aead_setkey,
2054 .setauthsize = aead_setauthsize, 2094 .setauthsize = aead_setauthsize,
2055 .encrypt = aead_encrypt, 2095 .encrypt = aead_encrypt,
2056 .decrypt = aead_decrypt, 2096 .decrypt = aead_decrypt,
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index c2c1abc68f81..33a4df6b81de 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -323,6 +323,39 @@ badkey:
323 return -EINVAL; 323 return -EINVAL;
324} 324}
325 325
326static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
327 unsigned int keylen)
328{
329 struct crypto_authenc_keys keys;
330 u32 flags;
331 int err;
332
333 err = crypto_authenc_extractkeys(&keys, key, keylen);
334 if (unlikely(err))
335 goto badkey;
336
337 err = -EINVAL;
338 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
339 goto badkey;
340
341 flags = crypto_aead_get_flags(aead);
342 err = __des3_verify_key(&flags, keys.enckey);
343 if (unlikely(err)) {
344 crypto_aead_set_flags(aead, flags);
345 goto out;
346 }
347
348 err = aead_setkey(aead, key, keylen);
349
350out:
351 memzero_explicit(&keys, sizeof(keys));
352 return err;
353
354badkey:
355 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
356 goto out;
357}
358
326static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 359static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
327 bool encrypt) 360 bool encrypt)
328{ 361{
@@ -938,6 +971,13 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
938 return 0; 971 return 0;
939} 972}
940 973
974static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
975 const u8 *key, unsigned int keylen)
976{
977 return unlikely(des3_verify_key(skcipher, key)) ?:
978 skcipher_setkey(skcipher, key, keylen);
979}
980
941static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 981static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
942 unsigned int keylen) 982 unsigned int keylen)
943{ 983{
@@ -1484,7 +1524,7 @@ static struct caam_skcipher_alg driver_algs[] = {
1484 .cra_driver_name = "cbc-3des-caam-qi2", 1524 .cra_driver_name = "cbc-3des-caam-qi2",
1485 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1525 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1486 }, 1526 },
1487 .setkey = skcipher_setkey, 1527 .setkey = des3_skcipher_setkey,
1488 .encrypt = skcipher_encrypt, 1528 .encrypt = skcipher_encrypt,
1489 .decrypt = skcipher_decrypt, 1529 .decrypt = skcipher_decrypt,
1490 .min_keysize = DES3_EDE_KEY_SIZE, 1530 .min_keysize = DES3_EDE_KEY_SIZE,
@@ -1916,7 +1956,7 @@ static struct caam_aead_alg driver_aeads[] = {
1916 "cbc-des3_ede-caam-qi2", 1956 "cbc-des3_ede-caam-qi2",
1917 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1957 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1918 }, 1958 },
1919 .setkey = aead_setkey, 1959 .setkey = des3_aead_setkey,
1920 .setauthsize = aead_setauthsize, 1960 .setauthsize = aead_setauthsize,
1921 .encrypt = aead_encrypt, 1961 .encrypt = aead_encrypt,
1922 .decrypt = aead_decrypt, 1962 .decrypt = aead_decrypt,
@@ -1938,7 +1978,7 @@ static struct caam_aead_alg driver_aeads[] = {
1938 "cbc-des3_ede-caam-qi2", 1978 "cbc-des3_ede-caam-qi2",
1939 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1979 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1940 }, 1980 },
1941 .setkey = aead_setkey, 1981 .setkey = des3_aead_setkey,
1942 .setauthsize = aead_setauthsize, 1982 .setauthsize = aead_setauthsize,
1943 .encrypt = aead_encrypt, 1983 .encrypt = aead_encrypt,
1944 .decrypt = aead_decrypt, 1984 .decrypt = aead_decrypt,
@@ -1961,7 +2001,7 @@ static struct caam_aead_alg driver_aeads[] = {
1961 "cbc-des3_ede-caam-qi2", 2001 "cbc-des3_ede-caam-qi2",
1962 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2002 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1963 }, 2003 },
1964 .setkey = aead_setkey, 2004 .setkey = des3_aead_setkey,
1965 .setauthsize = aead_setauthsize, 2005 .setauthsize = aead_setauthsize,
1966 .encrypt = aead_encrypt, 2006 .encrypt = aead_encrypt,
1967 .decrypt = aead_decrypt, 2007 .decrypt = aead_decrypt,
@@ -1984,7 +2024,7 @@ static struct caam_aead_alg driver_aeads[] = {
1984 "cbc-des3_ede-caam-qi2", 2024 "cbc-des3_ede-caam-qi2",
1985 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2025 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1986 }, 2026 },
1987 .setkey = aead_setkey, 2027 .setkey = des3_aead_setkey,
1988 .setauthsize = aead_setauthsize, 2028 .setauthsize = aead_setauthsize,
1989 .encrypt = aead_encrypt, 2029 .encrypt = aead_encrypt,
1990 .decrypt = aead_decrypt, 2030 .decrypt = aead_decrypt,
@@ -2007,7 +2047,7 @@ static struct caam_aead_alg driver_aeads[] = {
2007 "cbc-des3_ede-caam-qi2", 2047 "cbc-des3_ede-caam-qi2",
2008 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2048 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2009 }, 2049 },
2010 .setkey = aead_setkey, 2050 .setkey = des3_aead_setkey,
2011 .setauthsize = aead_setauthsize, 2051 .setauthsize = aead_setauthsize,
2012 .encrypt = aead_encrypt, 2052 .encrypt = aead_encrypt,
2013 .decrypt = aead_decrypt, 2053 .decrypt = aead_decrypt,
@@ -2030,7 +2070,7 @@ static struct caam_aead_alg driver_aeads[] = {
2030 "cbc-des3_ede-caam-qi2", 2070 "cbc-des3_ede-caam-qi2",
2031 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2071 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2032 }, 2072 },
2033 .setkey = aead_setkey, 2073 .setkey = des3_aead_setkey,
2034 .setauthsize = aead_setauthsize, 2074 .setauthsize = aead_setauthsize,
2035 .encrypt = aead_encrypt, 2075 .encrypt = aead_encrypt,
2036 .decrypt = aead_decrypt, 2076 .decrypt = aead_decrypt,
@@ -2053,7 +2093,7 @@ static struct caam_aead_alg driver_aeads[] = {
2053 "cbc-des3_ede-caam-qi2", 2093 "cbc-des3_ede-caam-qi2",
2054 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2094 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2055 }, 2095 },
2056 .setkey = aead_setkey, 2096 .setkey = des3_aead_setkey,
2057 .setauthsize = aead_setauthsize, 2097 .setauthsize = aead_setauthsize,
2058 .encrypt = aead_encrypt, 2098 .encrypt = aead_encrypt,
2059 .decrypt = aead_decrypt, 2099 .decrypt = aead_decrypt,
@@ -2076,7 +2116,7 @@ static struct caam_aead_alg driver_aeads[] = {
2076 "cbc-des3_ede-caam-qi2", 2116 "cbc-des3_ede-caam-qi2",
2077 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2117 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2078 }, 2118 },
2079 .setkey = aead_setkey, 2119 .setkey = des3_aead_setkey,
2080 .setauthsize = aead_setauthsize, 2120 .setauthsize = aead_setauthsize,
2081 .encrypt = aead_encrypt, 2121 .encrypt = aead_encrypt,
2082 .decrypt = aead_decrypt, 2122 .decrypt = aead_decrypt,
@@ -2099,7 +2139,7 @@ static struct caam_aead_alg driver_aeads[] = {
2099 "cbc-des3_ede-caam-qi2", 2139 "cbc-des3_ede-caam-qi2",
2100 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2140 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2101 }, 2141 },
2102 .setkey = aead_setkey, 2142 .setkey = des3_aead_setkey,
2103 .setauthsize = aead_setauthsize, 2143 .setauthsize = aead_setauthsize,
2104 .encrypt = aead_encrypt, 2144 .encrypt = aead_encrypt,
2105 .decrypt = aead_decrypt, 2145 .decrypt = aead_decrypt,
@@ -2122,7 +2162,7 @@ static struct caam_aead_alg driver_aeads[] = {
2122 "cbc-des3_ede-caam-qi2", 2162 "cbc-des3_ede-caam-qi2",
2123 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2163 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2124 }, 2164 },
2125 .setkey = aead_setkey, 2165 .setkey = des3_aead_setkey,
2126 .setauthsize = aead_setauthsize, 2166 .setauthsize = aead_setauthsize,
2127 .encrypt = aead_encrypt, 2167 .encrypt = aead_encrypt,
2128 .decrypt = aead_decrypt, 2168 .decrypt = aead_decrypt,
@@ -2145,7 +2185,7 @@ static struct caam_aead_alg driver_aeads[] = {
2145 "cbc-des3_ede-caam-qi2", 2185 "cbc-des3_ede-caam-qi2",
2146 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2186 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2147 }, 2187 },
2148 .setkey = aead_setkey, 2188 .setkey = des3_aead_setkey,
2149 .setauthsize = aead_setauthsize, 2189 .setauthsize = aead_setauthsize,
2150 .encrypt = aead_encrypt, 2190 .encrypt = aead_encrypt,
2151 .decrypt = aead_decrypt, 2191 .decrypt = aead_decrypt,
@@ -2168,7 +2208,7 @@ static struct caam_aead_alg driver_aeads[] = {
2168 "cbc-des3_ede-caam-qi2", 2208 "cbc-des3_ede-caam-qi2",
2169 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2209 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2170 }, 2210 },
2171 .setkey = aead_setkey, 2211 .setkey = des3_aead_setkey,
2172 .setauthsize = aead_setauthsize, 2212 .setauthsize = aead_setauthsize,
2173 .encrypt = aead_encrypt, 2213 .encrypt = aead_encrypt,
2174 .decrypt = aead_decrypt, 2214 .decrypt = aead_decrypt,
@@ -2854,6 +2894,7 @@ struct caam_hash_state {
2854 struct caam_request caam_req; 2894 struct caam_request caam_req;
2855 dma_addr_t buf_dma; 2895 dma_addr_t buf_dma;
2856 dma_addr_t ctx_dma; 2896 dma_addr_t ctx_dma;
2897 int ctx_dma_len;
2857 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 2898 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2858 int buflen_0; 2899 int buflen_0;
2859 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 2900 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
@@ -2927,6 +2968,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
2927 struct caam_hash_state *state, int ctx_len, 2968 struct caam_hash_state *state, int ctx_len,
2928 struct dpaa2_sg_entry *qm_sg, u32 flag) 2969 struct dpaa2_sg_entry *qm_sg, u32 flag)
2929{ 2970{
2971 state->ctx_dma_len = ctx_len;
2930 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag); 2972 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
2931 if (dma_mapping_error(dev, state->ctx_dma)) { 2973 if (dma_mapping_error(dev, state->ctx_dma)) {
2932 dev_err(dev, "unable to map ctx\n"); 2974 dev_err(dev, "unable to map ctx\n");
@@ -3018,13 +3060,13 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
3018} 3060}
3019 3061
3020/* Digest hash size if it is too large */ 3062/* Digest hash size if it is too large */
3021static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 3063static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3022 u32 *keylen, u8 *key_out, u32 digestsize) 3064 u32 digestsize)
3023{ 3065{
3024 struct caam_request *req_ctx; 3066 struct caam_request *req_ctx;
3025 u32 *desc; 3067 u32 *desc;
3026 struct split_key_sh_result result; 3068 struct split_key_sh_result result;
3027 dma_addr_t src_dma, dst_dma; 3069 dma_addr_t key_dma;
3028 struct caam_flc *flc; 3070 struct caam_flc *flc;
3029 dma_addr_t flc_dma; 3071 dma_addr_t flc_dma;
3030 int ret = -ENOMEM; 3072 int ret = -ENOMEM;
@@ -3041,17 +3083,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
3041 if (!flc) 3083 if (!flc)
3042 goto err_flc; 3084 goto err_flc;
3043 3085
3044 src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen, 3086 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3045 DMA_TO_DEVICE); 3087 if (dma_mapping_error(ctx->dev, key_dma)) {
3046 if (dma_mapping_error(ctx->dev, src_dma)) { 3088 dev_err(ctx->dev, "unable to map key memory\n");
3047 dev_err(ctx->dev, "unable to map key input memory\n"); 3089 goto err_key_dma;
3048 goto err_src_dma;
3049 }
3050 dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
3051 DMA_FROM_DEVICE);
3052 if (dma_mapping_error(ctx->dev, dst_dma)) {
3053 dev_err(ctx->dev, "unable to map key output memory\n");
3054 goto err_dst_dma;
3055 } 3090 }
3056 3091
3057 desc = flc->sh_desc; 3092 desc = flc->sh_desc;
@@ -3076,14 +3111,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
3076 3111
3077 dpaa2_fl_set_final(in_fle, true); 3112 dpaa2_fl_set_final(in_fle, true);
3078 dpaa2_fl_set_format(in_fle, dpaa2_fl_single); 3113 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3079 dpaa2_fl_set_addr(in_fle, src_dma); 3114 dpaa2_fl_set_addr(in_fle, key_dma);
3080 dpaa2_fl_set_len(in_fle, *keylen); 3115 dpaa2_fl_set_len(in_fle, *keylen);
3081 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 3116 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3082 dpaa2_fl_set_addr(out_fle, dst_dma); 3117 dpaa2_fl_set_addr(out_fle, key_dma);
3083 dpaa2_fl_set_len(out_fle, digestsize); 3118 dpaa2_fl_set_len(out_fle, digestsize);
3084 3119
3085 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ", 3120 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3086 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 3121 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3087 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ", 3122 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3088 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 3123 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3089 1); 3124 1);
@@ -3103,17 +3138,15 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
3103 wait_for_completion(&result.completion); 3138 wait_for_completion(&result.completion);
3104 ret = result.err; 3139 ret = result.err;
3105 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ", 3140 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3106 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 3141 DUMP_PREFIX_ADDRESS, 16, 4, key,
3107 digestsize, 1); 3142 digestsize, 1);
3108 } 3143 }
3109 3144
3110 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc), 3145 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3111 DMA_TO_DEVICE); 3146 DMA_TO_DEVICE);
3112err_flc_dma: 3147err_flc_dma:
3113 dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE); 3148 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3114err_dst_dma: 3149err_key_dma:
3115 dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
3116err_src_dma:
3117 kfree(flc); 3150 kfree(flc);
3118err_flc: 3151err_flc:
3119 kfree(req_ctx); 3152 kfree(req_ctx);
@@ -3135,12 +3168,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3135 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); 3168 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3136 3169
3137 if (keylen > blocksize) { 3170 if (keylen > blocksize) {
3138 hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key), 3171 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3139 GFP_KERNEL | GFP_DMA);
3140 if (!hashed_key) 3172 if (!hashed_key)
3141 return -ENOMEM; 3173 return -ENOMEM;
3142 ret = hash_digest_key(ctx, key, &keylen, hashed_key, 3174 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3143 digestsize);
3144 if (ret) 3175 if (ret)
3145 goto bad_free_key; 3176 goto bad_free_key;
3146 key = hashed_key; 3177 key = hashed_key;
@@ -3165,14 +3196,12 @@ bad_free_key:
3165} 3196}
3166 3197
3167static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, 3198static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3168 struct ahash_request *req, int dst_len) 3199 struct ahash_request *req)
3169{ 3200{
3170 struct caam_hash_state *state = ahash_request_ctx(req); 3201 struct caam_hash_state *state = ahash_request_ctx(req);
3171 3202
3172 if (edesc->src_nents) 3203 if (edesc->src_nents)
3173 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 3204 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3174 if (edesc->dst_dma)
3175 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
3176 3205
3177 if (edesc->qm_sg_bytes) 3206 if (edesc->qm_sg_bytes)
3178 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes, 3207 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
@@ -3187,18 +3216,15 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3187 3216
3188static inline void ahash_unmap_ctx(struct device *dev, 3217static inline void ahash_unmap_ctx(struct device *dev,
3189 struct ahash_edesc *edesc, 3218 struct ahash_edesc *edesc,
3190 struct ahash_request *req, int dst_len, 3219 struct ahash_request *req, u32 flag)
3191 u32 flag)
3192{ 3220{
3193 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3194 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3195 struct caam_hash_state *state = ahash_request_ctx(req); 3221 struct caam_hash_state *state = ahash_request_ctx(req);
3196 3222
3197 if (state->ctx_dma) { 3223 if (state->ctx_dma) {
3198 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 3224 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3199 state->ctx_dma = 0; 3225 state->ctx_dma = 0;
3200 } 3226 }
3201 ahash_unmap(dev, edesc, req, dst_len); 3227 ahash_unmap(dev, edesc, req);
3202} 3228}
3203 3229
3204static void ahash_done(void *cbk_ctx, u32 status) 3230static void ahash_done(void *cbk_ctx, u32 status)
@@ -3219,16 +3245,13 @@ static void ahash_done(void *cbk_ctx, u32 status)
3219 ecode = -EIO; 3245 ecode = -EIO;
3220 } 3246 }
3221 3247
3222 ahash_unmap(ctx->dev, edesc, req, digestsize); 3248 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3249 memcpy(req->result, state->caam_ctx, digestsize);
3223 qi_cache_free(edesc); 3250 qi_cache_free(edesc);
3224 3251
3225 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", 3252 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3226 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 3253 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3227 ctx->ctx_len, 1); 3254 ctx->ctx_len, 1);
3228 if (req->result)
3229 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3230 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3231 digestsize, 1);
3232 3255
3233 req->base.complete(&req->base, ecode); 3256 req->base.complete(&req->base, ecode);
3234} 3257}
@@ -3250,7 +3273,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
3250 ecode = -EIO; 3273 ecode = -EIO;
3251 } 3274 }
3252 3275
3253 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 3276 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3254 switch_buf(state); 3277 switch_buf(state);
3255 qi_cache_free(edesc); 3278 qi_cache_free(edesc);
3256 3279
@@ -3283,16 +3306,13 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3283 ecode = -EIO; 3306 ecode = -EIO;
3284 } 3307 }
3285 3308
3286 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE); 3309 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3310 memcpy(req->result, state->caam_ctx, digestsize);
3287 qi_cache_free(edesc); 3311 qi_cache_free(edesc);
3288 3312
3289 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", 3313 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3290 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 3314 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3291 ctx->ctx_len, 1); 3315 ctx->ctx_len, 1);
3292 if (req->result)
3293 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3294 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3295 digestsize, 1);
3296 3316
3297 req->base.complete(&req->base, ecode); 3317 req->base.complete(&req->base, ecode);
3298} 3318}
@@ -3314,7 +3334,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3314 ecode = -EIO; 3334 ecode = -EIO;
3315 } 3335 }
3316 3336
3317 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 3337 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3318 switch_buf(state); 3338 switch_buf(state);
3319 qi_cache_free(edesc); 3339 qi_cache_free(edesc);
3320 3340
@@ -3452,7 +3472,7 @@ static int ahash_update_ctx(struct ahash_request *req)
3452 3472
3453 return ret; 3473 return ret;
3454unmap_ctx: 3474unmap_ctx:
3455 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 3475 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3456 qi_cache_free(edesc); 3476 qi_cache_free(edesc);
3457 return ret; 3477 return ret;
3458} 3478}
@@ -3484,7 +3504,7 @@ static int ahash_final_ctx(struct ahash_request *req)
3484 sg_table = &edesc->sgt[0]; 3504 sg_table = &edesc->sgt[0];
3485 3505
3486 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, 3506 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3487 DMA_TO_DEVICE); 3507 DMA_BIDIRECTIONAL);
3488 if (ret) 3508 if (ret)
3489 goto unmap_ctx; 3509 goto unmap_ctx;
3490 3510
@@ -3503,22 +3523,13 @@ static int ahash_final_ctx(struct ahash_request *req)
3503 } 3523 }
3504 edesc->qm_sg_bytes = qm_sg_bytes; 3524 edesc->qm_sg_bytes = qm_sg_bytes;
3505 3525
3506 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3507 DMA_FROM_DEVICE);
3508 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3509 dev_err(ctx->dev, "unable to map dst\n");
3510 edesc->dst_dma = 0;
3511 ret = -ENOMEM;
3512 goto unmap_ctx;
3513 }
3514
3515 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); 3526 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3516 dpaa2_fl_set_final(in_fle, true); 3527 dpaa2_fl_set_final(in_fle, true);
3517 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); 3528 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3518 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); 3529 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3519 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen); 3530 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3520 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 3531 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3521 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 3532 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3522 dpaa2_fl_set_len(out_fle, digestsize); 3533 dpaa2_fl_set_len(out_fle, digestsize);
3523 3534
3524 req_ctx->flc = &ctx->flc[FINALIZE]; 3535 req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3533,7 +3544,7 @@ static int ahash_final_ctx(struct ahash_request *req)
3533 return ret; 3544 return ret;
3534 3545
3535unmap_ctx: 3546unmap_ctx:
3536 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE); 3547 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3537 qi_cache_free(edesc); 3548 qi_cache_free(edesc);
3538 return ret; 3549 return ret;
3539} 3550}
@@ -3586,7 +3597,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
3586 sg_table = &edesc->sgt[0]; 3597 sg_table = &edesc->sgt[0];
3587 3598
3588 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, 3599 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3589 DMA_TO_DEVICE); 3600 DMA_BIDIRECTIONAL);
3590 if (ret) 3601 if (ret)
3591 goto unmap_ctx; 3602 goto unmap_ctx;
3592 3603
@@ -3605,22 +3616,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
3605 } 3616 }
3606 edesc->qm_sg_bytes = qm_sg_bytes; 3617 edesc->qm_sg_bytes = qm_sg_bytes;
3607 3618
3608 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
3609 DMA_FROM_DEVICE);
3610 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
3611 dev_err(ctx->dev, "unable to map dst\n");
3612 edesc->dst_dma = 0;
3613 ret = -ENOMEM;
3614 goto unmap_ctx;
3615 }
3616
3617 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); 3619 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3618 dpaa2_fl_set_final(in_fle, true); 3620 dpaa2_fl_set_final(in_fle, true);
3619 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); 3621 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3620 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); 3622 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3621 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes); 3623 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3622 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 3624 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3623 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 3625 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3624 dpaa2_fl_set_len(out_fle, digestsize); 3626 dpaa2_fl_set_len(out_fle, digestsize);
3625 3627
3626 req_ctx->flc = &ctx->flc[FINALIZE]; 3628 req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3635,7 +3637,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
3635 return ret; 3637 return ret;
3636 3638
3637unmap_ctx: 3639unmap_ctx:
3638 ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE); 3640 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3639 qi_cache_free(edesc); 3641 qi_cache_free(edesc);
3640 return ret; 3642 return ret;
3641} 3643}
@@ -3704,18 +3706,19 @@ static int ahash_digest(struct ahash_request *req)
3704 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); 3706 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3705 } 3707 }
3706 3708
3707 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, 3709 state->ctx_dma_len = digestsize;
3710 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3708 DMA_FROM_DEVICE); 3711 DMA_FROM_DEVICE);
3709 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { 3712 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3710 dev_err(ctx->dev, "unable to map dst\n"); 3713 dev_err(ctx->dev, "unable to map ctx\n");
3711 edesc->dst_dma = 0; 3714 state->ctx_dma = 0;
3712 goto unmap; 3715 goto unmap;
3713 } 3716 }
3714 3717
3715 dpaa2_fl_set_final(in_fle, true); 3718 dpaa2_fl_set_final(in_fle, true);
3716 dpaa2_fl_set_len(in_fle, req->nbytes); 3719 dpaa2_fl_set_len(in_fle, req->nbytes);
3717 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 3720 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3718 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 3721 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3719 dpaa2_fl_set_len(out_fle, digestsize); 3722 dpaa2_fl_set_len(out_fle, digestsize);
3720 3723
3721 req_ctx->flc = &ctx->flc[DIGEST]; 3724 req_ctx->flc = &ctx->flc[DIGEST];
@@ -3729,7 +3732,7 @@ static int ahash_digest(struct ahash_request *req)
3729 return ret; 3732 return ret;
3730 3733
3731unmap: 3734unmap:
3732 ahash_unmap(ctx->dev, edesc, req, digestsize); 3735 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3733 qi_cache_free(edesc); 3736 qi_cache_free(edesc);
3734 return ret; 3737 return ret;
3735} 3738}
@@ -3755,27 +3758,39 @@ static int ahash_final_no_ctx(struct ahash_request *req)
3755 if (!edesc) 3758 if (!edesc)
3756 return ret; 3759 return ret;
3757 3760
3758 state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE); 3761 if (buflen) {
3759 if (dma_mapping_error(ctx->dev, state->buf_dma)) { 3762 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3760 dev_err(ctx->dev, "unable to map src\n"); 3763 DMA_TO_DEVICE);
3761 goto unmap; 3764 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3765 dev_err(ctx->dev, "unable to map src\n");
3766 goto unmap;
3767 }
3762 } 3768 }
3763 3769
3764 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, 3770 state->ctx_dma_len = digestsize;
3771 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3765 DMA_FROM_DEVICE); 3772 DMA_FROM_DEVICE);
3766 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { 3773 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3767 dev_err(ctx->dev, "unable to map dst\n"); 3774 dev_err(ctx->dev, "unable to map ctx\n");
3768 edesc->dst_dma = 0; 3775 state->ctx_dma = 0;
3769 goto unmap; 3776 goto unmap;
3770 } 3777 }
3771 3778
3772 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); 3779 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3773 dpaa2_fl_set_final(in_fle, true); 3780 dpaa2_fl_set_final(in_fle, true);
3774 dpaa2_fl_set_format(in_fle, dpaa2_fl_single); 3781 /*
3775 dpaa2_fl_set_addr(in_fle, state->buf_dma); 3782 * crypto engine requires the input entry to be present when
3776 dpaa2_fl_set_len(in_fle, buflen); 3783 * "frame list" FD is used.
3784 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3785 * in_fle zeroized (except for "Final" flag) is the best option.
3786 */
3787 if (buflen) {
3788 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3789 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3790 dpaa2_fl_set_len(in_fle, buflen);
3791 }
3777 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 3792 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3778 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 3793 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3779 dpaa2_fl_set_len(out_fle, digestsize); 3794 dpaa2_fl_set_len(out_fle, digestsize);
3780 3795
3781 req_ctx->flc = &ctx->flc[DIGEST]; 3796 req_ctx->flc = &ctx->flc[DIGEST];
@@ -3790,7 +3805,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
3790 return ret; 3805 return ret;
3791 3806
3792unmap: 3807unmap:
3793 ahash_unmap(ctx->dev, edesc, req, digestsize); 3808 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3794 qi_cache_free(edesc); 3809 qi_cache_free(edesc);
3795 return ret; 3810 return ret;
3796} 3811}
@@ -3870,6 +3885,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
3870 } 3885 }
3871 edesc->qm_sg_bytes = qm_sg_bytes; 3886 edesc->qm_sg_bytes = qm_sg_bytes;
3872 3887
3888 state->ctx_dma_len = ctx->ctx_len;
3873 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, 3889 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3874 ctx->ctx_len, DMA_FROM_DEVICE); 3890 ctx->ctx_len, DMA_FROM_DEVICE);
3875 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { 3891 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -3918,7 +3934,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
3918 3934
3919 return ret; 3935 return ret;
3920unmap_ctx: 3936unmap_ctx:
3921 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 3937 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
3922 qi_cache_free(edesc); 3938 qi_cache_free(edesc);
3923 return ret; 3939 return ret;
3924} 3940}
@@ -3983,11 +3999,12 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
3983 } 3999 }
3984 edesc->qm_sg_bytes = qm_sg_bytes; 4000 edesc->qm_sg_bytes = qm_sg_bytes;
3985 4001
3986 edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize, 4002 state->ctx_dma_len = digestsize;
4003 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3987 DMA_FROM_DEVICE); 4004 DMA_FROM_DEVICE);
3988 if (dma_mapping_error(ctx->dev, edesc->dst_dma)) { 4005 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3989 dev_err(ctx->dev, "unable to map dst\n"); 4006 dev_err(ctx->dev, "unable to map ctx\n");
3990 edesc->dst_dma = 0; 4007 state->ctx_dma = 0;
3991 ret = -ENOMEM; 4008 ret = -ENOMEM;
3992 goto unmap; 4009 goto unmap;
3993 } 4010 }
@@ -3998,7 +4015,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
3998 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); 4015 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3999 dpaa2_fl_set_len(in_fle, buflen + req->nbytes); 4016 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4000 dpaa2_fl_set_format(out_fle, dpaa2_fl_single); 4017 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4001 dpaa2_fl_set_addr(out_fle, edesc->dst_dma); 4018 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4002 dpaa2_fl_set_len(out_fle, digestsize); 4019 dpaa2_fl_set_len(out_fle, digestsize);
4003 4020
4004 req_ctx->flc = &ctx->flc[DIGEST]; 4021 req_ctx->flc = &ctx->flc[DIGEST];
@@ -4013,7 +4030,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
4013 4030
4014 return ret; 4031 return ret;
4015unmap: 4032unmap:
4016 ahash_unmap(ctx->dev, edesc, req, digestsize); 4033 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4017 qi_cache_free(edesc); 4034 qi_cache_free(edesc);
4018 return -ENOMEM; 4035 return -ENOMEM;
4019} 4036}
@@ -4100,6 +4117,7 @@ static int ahash_update_first(struct ahash_request *req)
4100 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 4117 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4101 *next_buflen, 0); 4118 *next_buflen, 0);
4102 4119
4120 state->ctx_dma_len = ctx->ctx_len;
4103 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, 4121 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4104 ctx->ctx_len, DMA_FROM_DEVICE); 4122 ctx->ctx_len, DMA_FROM_DEVICE);
4105 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { 4123 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -4143,7 +4161,7 @@ static int ahash_update_first(struct ahash_request *req)
4143 4161
4144 return ret; 4162 return ret;
4145unmap_ctx: 4163unmap_ctx:
4146 ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 4164 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4147 qi_cache_free(edesc); 4165 qi_cache_free(edesc);
4148 return ret; 4166 return ret;
4149} 4167}
@@ -4162,6 +4180,7 @@ static int ahash_init(struct ahash_request *req)
4162 state->final = ahash_final_no_ctx; 4180 state->final = ahash_final_no_ctx;
4163 4181
4164 state->ctx_dma = 0; 4182 state->ctx_dma = 0;
4183 state->ctx_dma_len = 0;
4165 state->current_buf = 0; 4184 state->current_buf = 0;
4166 state->buf_dma = 0; 4185 state->buf_dma = 0;
4167 state->buflen_0 = 0; 4186 state->buflen_0 = 0;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 20890780fb82..be5085451053 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -162,14 +162,12 @@ struct skcipher_edesc {
162 162
163/* 163/*
164 * ahash_edesc - s/w-extended ahash descriptor 164 * ahash_edesc - s/w-extended ahash descriptor
165 * @dst_dma: I/O virtual address of req->result
166 * @qm_sg_dma: I/O virtual address of h/w link table 165 * @qm_sg_dma: I/O virtual address of h/w link table
167 * @src_nents: number of segments in input scatterlist 166 * @src_nents: number of segments in input scatterlist
168 * @qm_sg_bytes: length of dma mapped qm_sg space 167 * @qm_sg_bytes: length of dma mapped qm_sg space
169 * @sgt: pointer to h/w link table 168 * @sgt: pointer to h/w link table
170 */ 169 */
171struct ahash_edesc { 170struct ahash_edesc {
172 dma_addr_t dst_dma;
173 dma_addr_t qm_sg_dma; 171 dma_addr_t qm_sg_dma;
174 int src_nents; 172 int src_nents;
175 int qm_sg_bytes; 173 int qm_sg_bytes;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 58285642306e..fe24485274e1 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -994,8 +994,6 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
994static struct akcipher_alg caam_rsa = { 994static struct akcipher_alg caam_rsa = {
995 .encrypt = caam_rsa_enc, 995 .encrypt = caam_rsa_enc,
996 .decrypt = caam_rsa_dec, 996 .decrypt = caam_rsa_dec,
997 .sign = caam_rsa_dec,
998 .verify = caam_rsa_enc,
999 .set_pub_key = caam_rsa_set_pub_key, 997 .set_pub_key = caam_rsa_set_pub_key,
1000 .set_priv_key = caam_rsa_set_priv_key, 998 .set_priv_key = caam_rsa_set_priv_key,
1001 .max_size = caam_rsa_max_size, 999 .max_size = caam_rsa_max_size,
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 858bdc9ab4a3..e2ba3d202da5 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -468,6 +468,24 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
468 return caam_get_era_from_hw(ctrl); 468 return caam_get_era_from_hw(ctrl);
469} 469}
470 470
471/*
472 * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6DQ)
473 * have an issue wherein AXI bus transactions may not occur in the correct
474 * order. This isn't a problem running single descriptors, but can be if
475 * running multiple concurrent descriptors. Reworking the driver to throttle
476 * to single requests is impractical, thus the workaround is to limit the AXI
477 * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
478 * from occurring.
479 */
480static void handle_imx6_err005766(u32 *mcr)
481{
482 if (of_machine_is_compatible("fsl,imx6q") ||
483 of_machine_is_compatible("fsl,imx6dl") ||
484 of_machine_is_compatible("fsl,imx6qp"))
485 clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
486 1 << MCFGR_AXIPIPE_SHIFT);
487}
488
471static const struct of_device_id caam_match[] = { 489static const struct of_device_id caam_match[] = {
472 { 490 {
473 .compatible = "fsl,sec-v4.0", 491 .compatible = "fsl,sec-v4.0",
@@ -640,6 +658,8 @@ static int caam_probe(struct platform_device *pdev)
640 (sizeof(dma_addr_t) == sizeof(u64) ? 658 (sizeof(dma_addr_t) == sizeof(u64) ?
641 MCFGR_LONG_PTR : 0)); 659 MCFGR_LONG_PTR : 0));
642 660
661 handle_imx6_err005766(&ctrl->mcr);
662
643 /* 663 /*
644 * Read the Compile Time paramters and SCFGR to determine 664 * Read the Compile Time paramters and SCFGR to determine
645 * if Virtualization is enabled for this platform 665 * if Virtualization is enabled for this platform
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 21a70fd32f5d..a4129a35a330 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -138,7 +138,7 @@ static const struct {
138 { 0x46, "Annotation length exceeds offset (reuse mode)"}, 138 { 0x46, "Annotation length exceeds offset (reuse mode)"},
139 { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"}, 139 { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
140 { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"}, 140 { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
141 { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"}, 141 { 0x4B, "Annotation output enabled but ASA cannot be expanded (frame list)"},
142 { 0x51, "Unsupported IF reuse mode"}, 142 { 0x51, "Unsupported IF reuse mode"},
143 { 0x52, "Unsupported FL use mode"}, 143 { 0x52, "Unsupported FL use mode"},
144 { 0x53, "Unsupported RJD use mode"}, 144 { 0x53, "Unsupported RJD use mode"},
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5869ad58d497..3392615dc91b 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -49,13 +49,11 @@ struct caam_drv_private_jr {
49 atomic_t tfm_count ____cacheline_aligned; 49 atomic_t tfm_count ____cacheline_aligned;
50 50
51 /* Job ring info */ 51 /* Job ring info */
52 int ringsize; /* Size of rings (assume input = output) */
53 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ 52 struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
54 spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */ 53 spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
55 int inp_ring_write_index; /* Input index "tail" */ 54 u32 inpring_avail; /* Number of free entries in input ring */
56 int head; /* entinfo (s/w ring) head index */ 55 int head; /* entinfo (s/w ring) head index */
57 dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */ 56 dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
58 spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
59 int out_ring_read_index; /* Output index "tail" */ 57 int out_ring_read_index; /* Output index "tail" */
60 int tail; /* entinfo (s/w ring) tail index */ 58 int tail; /* entinfo (s/w ring) tail index */
61 struct jr_outentry *outring; /* Base of output ring, DMA-safe */ 59 struct jr_outentry *outring; /* Base of output ring, DMA-safe */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d50085a03597..044a69b526f7 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -170,13 +170,13 @@ static void caam_jr_dequeue(unsigned long devarg)
170 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); 170 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
171 u32 *userdesc, userstatus; 171 u32 *userdesc, userstatus;
172 void *userarg; 172 void *userarg;
173 u32 outring_used = 0;
173 174
174 while (rd_reg32(&jrp->rregs->outring_used)) { 175 while (outring_used ||
176 (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
175 177
176 head = READ_ONCE(jrp->head); 178 head = READ_ONCE(jrp->head);
177 179
178 spin_lock(&jrp->outlock);
179
180 sw_idx = tail = jrp->tail; 180 sw_idx = tail = jrp->tail;
181 hw_idx = jrp->out_ring_read_index; 181 hw_idx = jrp->out_ring_read_index;
182 182
@@ -199,7 +199,7 @@ static void caam_jr_dequeue(unsigned long devarg)
199 /* mark completed, avoid matching on a recycled desc addr */ 199 /* mark completed, avoid matching on a recycled desc addr */
200 jrp->entinfo[sw_idx].desc_addr_dma = 0; 200 jrp->entinfo[sw_idx].desc_addr_dma = 0;
201 201
202 /* Stash callback params for use outside of lock */ 202 /* Stash callback params */
203 usercall = jrp->entinfo[sw_idx].callbk; 203 usercall = jrp->entinfo[sw_idx].callbk;
204 userarg = jrp->entinfo[sw_idx].cbkarg; 204 userarg = jrp->entinfo[sw_idx].cbkarg;
205 userdesc = jrp->entinfo[sw_idx].desc_addr_virt; 205 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
@@ -213,7 +213,7 @@ static void caam_jr_dequeue(unsigned long devarg)
213 mb(); 213 mb();
214 214
215 /* set done */ 215 /* set done */
216 wr_reg32(&jrp->rregs->outring_rmvd, 1); 216 wr_reg32_relaxed(&jrp->rregs->outring_rmvd, 1);
217 217
218 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & 218 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
219 (JOBR_DEPTH - 1); 219 (JOBR_DEPTH - 1);
@@ -232,10 +232,9 @@ static void caam_jr_dequeue(unsigned long devarg)
232 jrp->tail = tail; 232 jrp->tail = tail;
233 } 233 }
234 234
235 spin_unlock(&jrp->outlock);
236
237 /* Finally, execute user's callback */ 235 /* Finally, execute user's callback */
238 usercall(dev, userdesc, userstatus, userarg); 236 usercall(dev, userdesc, userstatus, userarg);
237 outring_used--;
239 } 238 }
240 239
241 /* reenable / unmask IRQs */ 240 /* reenable / unmask IRQs */
@@ -345,7 +344,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
345 head = jrp->head; 344 head = jrp->head;
346 tail = READ_ONCE(jrp->tail); 345 tail = READ_ONCE(jrp->tail);
347 346
348 if (!rd_reg32(&jrp->rregs->inpring_avail) || 347 if (!jrp->inpring_avail ||
349 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { 348 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
350 spin_unlock_bh(&jrp->inplock); 349 spin_unlock_bh(&jrp->inplock);
351 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); 350 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
@@ -359,7 +358,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
359 head_entry->cbkarg = areq; 358 head_entry->cbkarg = areq;
360 head_entry->desc_addr_dma = desc_dma; 359 head_entry->desc_addr_dma = desc_dma;
361 360
362 jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma); 361 jrp->inpring[head] = cpu_to_caam_dma(desc_dma);
363 362
364 /* 363 /*
365 * Guarantee that the descriptor's DMA address has been written to 364 * Guarantee that the descriptor's DMA address has been written to
@@ -368,18 +367,22 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
368 */ 367 */
369 smp_wmb(); 368 smp_wmb();
370 369
371 jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
372 (JOBR_DEPTH - 1);
373 jrp->head = (head + 1) & (JOBR_DEPTH - 1); 370 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
374 371
375 /* 372 /*
376 * Ensure that all job information has been written before 373 * Ensure that all job information has been written before
377 * notifying CAAM that a new job was added to the input ring. 374 * notifying CAAM that a new job was added to the input ring
375 * using a memory barrier. The wr_reg32() uses api iowrite32()
376 * to do the register write. iowrite32() issues a memory barrier
377 * before the write operation.
378 */ 378 */
379 wmb();
380 379
381 wr_reg32(&jrp->rregs->inpring_jobadd, 1); 380 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
382 381
382 jrp->inpring_avail--;
383 if (!jrp->inpring_avail)
384 jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
385
383 spin_unlock_bh(&jrp->inplock); 386 spin_unlock_bh(&jrp->inplock);
384 387
385 return 0; 388 return 0;
@@ -431,7 +434,6 @@ static int caam_jr_init(struct device *dev)
431 jrp->entinfo[i].desc_addr_dma = !0; 434 jrp->entinfo[i].desc_addr_dma = !0;
432 435
433 /* Setup rings */ 436 /* Setup rings */
434 jrp->inp_ring_write_index = 0;
435 jrp->out_ring_read_index = 0; 437 jrp->out_ring_read_index = 0;
436 jrp->head = 0; 438 jrp->head = 0;
437 jrp->tail = 0; 439 jrp->tail = 0;
@@ -441,10 +443,9 @@ static int caam_jr_init(struct device *dev)
441 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); 443 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
442 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); 444 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
443 445
444 jrp->ringsize = JOBR_DEPTH; 446 jrp->inpring_avail = JOBR_DEPTH;
445 447
446 spin_lock_init(&jrp->inplock); 448 spin_lock_init(&jrp->inplock);
447 spin_lock_init(&jrp->outlock);
448 449
449 /* Select interrupt coalescing parameters */ 450 /* Select interrupt coalescing parameters */
450 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC | 451 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 7cb8b1755e57..9f08f84cca59 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
318 /* Create a new req FQ in parked state */ 318 /* Create a new req FQ in parked state */
319 new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, 319 new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
320 drv_ctx->context_a, 0); 320 drv_ctx->context_a, 0);
321 if (IS_ERR_OR_NULL(new_fq)) { 321 if (IS_ERR(new_fq)) {
322 dev_err(qidev, "FQ allocation for shdesc update failed\n"); 322 dev_err(qidev, "FQ allocation for shdesc update failed\n");
323 return PTR_ERR(new_fq); 323 return PTR_ERR(new_fq);
324 } 324 }
@@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
431 /* Attach request FQ */ 431 /* Attach request FQ */
432 drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, 432 drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
433 QMAN_INITFQ_FLAG_SCHED); 433 QMAN_INITFQ_FLAG_SCHED);
434 if (IS_ERR_OR_NULL(drv_ctx->req_fq)) { 434 if (IS_ERR(drv_ctx->req_fq)) {
435 dev_err(qidev, "create_caam_req_fq failed\n"); 435 dev_err(qidev, "create_caam_req_fq failed\n");
436 dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); 436 dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
437 kfree(drv_ctx); 437 kfree(drv_ctx);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 3cd0822ea819..c1fa1ec701d9 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -96,6 +96,14 @@ cpu_to_caam(16)
96cpu_to_caam(32) 96cpu_to_caam(32)
97cpu_to_caam(64) 97cpu_to_caam(64)
98 98
99static inline void wr_reg32_relaxed(void __iomem *reg, u32 data)
100{
101 if (caam_little_end)
102 writel_relaxed(data, reg);
103 else
104 writel_relaxed(cpu_to_be32(data), reg);
105}
106
99static inline void wr_reg32(void __iomem *reg, u32 data) 107static inline void wr_reg32(void __iomem *reg, u32 data)
100{ 108{
101 if (caam_little_end) 109 if (caam_little_end)
@@ -253,6 +261,9 @@ struct version_regs {
253#define CHA_VER_VID_SHIFT 24 261#define CHA_VER_VID_SHIFT 24
254#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT) 262#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
255 263
264/* CHA Miscellaneous Information - AESA_MISC specific */
265#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
266
256/* 267/*
257 * caam_perfmon - Performance Monitor/Secure Memory Status/ 268 * caam_perfmon - Performance Monitor/Secure Memory Status/
258 * CAAM Global Status/Component Version IDs 269 * CAAM Global Status/Component Version IDs
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 600336d169a9..9810ad8ac519 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -10,7 +10,6 @@
10#include <crypto/aes.h> 10#include <crypto/aes.h>
11#include <crypto/algapi.h> 11#include <crypto/algapi.h>
12#include <crypto/authenc.h> 12#include <crypto/authenc.h>
13#include <crypto/cryptd.h>
14#include <crypto/crypto_wq.h> 13#include <crypto/crypto_wq.h>
15#include <crypto/des.h> 14#include <crypto/des.h>
16#include <crypto/xts.h> 15#include <crypto/xts.h>
@@ -327,27 +326,36 @@ static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
327static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 326static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
328 u32 keylen) 327 u32 keylen)
329{ 328{
329 u32 flags = crypto_ablkcipher_get_flags(cipher);
330 int err;
331
332 err = __des3_verify_key(&flags, key);
333 if (unlikely(err)) {
334 crypto_ablkcipher_set_flags(cipher, flags);
335 return err;
336 }
337
330 return cvm_setkey(cipher, key, keylen, DES3_CBC); 338 return cvm_setkey(cipher, key, keylen, DES3_CBC);
331} 339}
332 340
333static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 341static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
334 u32 keylen) 342 u32 keylen)
335{ 343{
344 u32 flags = crypto_ablkcipher_get_flags(cipher);
345 int err;
346
347 err = __des3_verify_key(&flags, key);
348 if (unlikely(err)) {
349 crypto_ablkcipher_set_flags(cipher, flags);
350 return err;
351 }
352
336 return cvm_setkey(cipher, key, keylen, DES3_ECB); 353 return cvm_setkey(cipher, key, keylen, DES3_ECB);
337} 354}
338 355
339static int cvm_enc_dec_init(struct crypto_tfm *tfm) 356static int cvm_enc_dec_init(struct crypto_tfm *tfm)
340{ 357{
341 struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm); 358 tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
342
343 memset(ctx, 0, sizeof(*ctx));
344 tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx) +
345 sizeof(struct ablkcipher_request);
346 /* Additional memory for ablkcipher_request is
347 * allocated since the cryptd daemon uses
348 * this memory for request_ctx information
349 */
350
351 return 0; 359 return 0;
352} 360}
353 361
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 2ca431ed1db8..88a0166f5477 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -641,7 +641,7 @@ static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
641 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u); 641 cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
642} 642}
643 643
644void cptvf_device_init(struct cpt_vf *cptvf) 644static void cptvf_device_init(struct cpt_vf *cptvf)
645{ 645{
646 u64 base_addr = 0; 646 u64 base_addr = 0;
647 647
diff --git a/drivers/crypto/cavium/cpt/cptvf_mbox.c b/drivers/crypto/cavium/cpt/cptvf_mbox.c
index d5ec3b8a9e61..4f438eceb506 100644
--- a/drivers/crypto/cavium/cpt/cptvf_mbox.c
+++ b/drivers/crypto/cavium/cpt/cptvf_mbox.c
@@ -17,23 +17,6 @@ static void cptvf_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
17 mbx->data); 17 mbx->data);
18} 18}
19 19
20/* ACKs PF's mailbox message
21 */
22void cptvf_mbox_send_ack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
23{
24 mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
25 cptvf_send_msg_to_pf(cptvf, mbx);
26}
27
28/* NACKs PF's mailbox message that VF is not able to
29 * complete the action
30 */
31void cptvf_mbox_send_nack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
32{
33 mbx->msg = CPT_MBOX_MSG_TYPE_NACK;
34 cptvf_send_msg_to_pf(cptvf, mbx);
35}
36
37/* Interrupt handler to handle mailbox messages from VFs */ 20/* Interrupt handler to handle mailbox messages from VFs */
38void cptvf_handle_mbox_intr(struct cpt_vf *cptvf) 21void cptvf_handle_mbox_intr(struct cpt_vf *cptvf)
39{ 22{
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index ca549c5dc08e..f16f61504241 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -223,7 +223,7 @@ scatter_gather_clean:
223 return ret; 223 return ret;
224} 224}
225 225
226int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd, 226static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
227 u32 qno) 227 u32 qno)
228{ 228{
229 struct pci_dev *pdev = cptvf->pdev; 229 struct pci_dev *pdev = cptvf->pdev;
@@ -270,7 +270,7 @@ int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
270 return ret; 270 return ret;
271} 271}
272 272
273void do_request_cleanup(struct cpt_vf *cptvf, 273static void do_request_cleanup(struct cpt_vf *cptvf,
274 struct cpt_info_buffer *info) 274 struct cpt_info_buffer *info)
275{ 275{
276 int i; 276 int i;
@@ -316,7 +316,7 @@ void do_request_cleanup(struct cpt_vf *cptvf,
316 kzfree(info); 316 kzfree(info);
317} 317}
318 318
319void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info) 319static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
320{ 320{
321 struct pci_dev *pdev = cptvf->pdev; 321 struct pci_dev *pdev = cptvf->pdev;
322 322
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 4f43eacd2557..e4841eb2a09f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -18,26 +18,6 @@
18 18
19#define GCM_AES_SALT_SIZE 4 19#define GCM_AES_SALT_SIZE 4
20 20
21/**
22 * struct nitrox_crypt_params - Params to set nitrox crypto request.
23 * @cryptlen: Encryption/Decryption data length
24 * @authlen: Assoc data length + Cryptlen
25 * @srclen: Input buffer length
26 * @dstlen: Output buffer length
27 * @iv: IV data
28 * @ivsize: IV data length
29 * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
30 */
31struct nitrox_crypt_params {
32 unsigned int cryptlen;
33 unsigned int authlen;
34 unsigned int srclen;
35 unsigned int dstlen;
36 u8 *iv;
37 int ivsize;
38 u8 ctrl_arg;
39};
40
41union gph_p3 { 21union gph_p3 {
42 struct { 22 struct {
43#ifdef __BIG_ENDIAN_BITFIELD 23#ifdef __BIG_ENDIAN_BITFIELD
@@ -94,36 +74,40 @@ static int nitrox_aead_setauthsize(struct crypto_aead *aead,
94 return 0; 74 return 0;
95} 75}
96 76
97static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize, 77static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq,
78 struct scatterlist *src, char *iv, int ivsize,
98 int buflen) 79 int buflen)
99{ 80{
100 struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 81 int nents = sg_nents_for_len(src, buflen);
101 int nents = sg_nents_for_len(areq->src, buflen) + 1;
102 int ret; 82 int ret;
103 83
104 if (nents < 0) 84 if (nents < 0)
105 return nents; 85 return nents;
106 86
87 /* IV entry */
88 nents += 1;
107 /* Allocate buffer to hold IV and input scatterlist array */ 89 /* Allocate buffer to hold IV and input scatterlist array */
108 ret = alloc_src_req_buf(nkreq, nents, ivsize); 90 ret = alloc_src_req_buf(nkreq, nents, ivsize);
109 if (ret) 91 if (ret)
110 return ret; 92 return ret;
111 93
112 nitrox_creq_copy_iv(nkreq->src, iv, ivsize); 94 nitrox_creq_copy_iv(nkreq->src, iv, ivsize);
113 nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen); 95 nitrox_creq_set_src_sg(nkreq, nents, ivsize, src, buflen);
114 96
115 return 0; 97 return 0;
116} 98}
117 99
118static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen) 100static int alloc_dst_sglist(struct nitrox_kcrypt_request *nkreq,
101 struct scatterlist *dst, int ivsize, int buflen)
119{ 102{
120 struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 103 int nents = sg_nents_for_len(dst, buflen);
121 int nents = sg_nents_for_len(areq->dst, buflen) + 3;
122 int ret; 104 int ret;
123 105
124 if (nents < 0) 106 if (nents < 0)
125 return nents; 107 return nents;
126 108
109 /* IV, ORH, COMPLETION entries */
110 nents += 3;
127 /* Allocate buffer to hold ORH, COMPLETION and output scatterlist 111 /* Allocate buffer to hold ORH, COMPLETION and output scatterlist
128 * array 112 * array
129 */ 113 */
@@ -133,61 +117,54 @@ static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
133 117
134 nitrox_creq_set_orh(nkreq); 118 nitrox_creq_set_orh(nkreq);
135 nitrox_creq_set_comp(nkreq); 119 nitrox_creq_set_comp(nkreq);
136 nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen); 120 nitrox_creq_set_dst_sg(nkreq, nents, ivsize, dst, buflen);
137 121
138 return 0; 122 return 0;
139} 123}
140 124
141static void free_src_sglist(struct aead_request *areq) 125static void free_src_sglist(struct nitrox_kcrypt_request *nkreq)
142{ 126{
143 struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
144
145 kfree(nkreq->src); 127 kfree(nkreq->src);
146} 128}
147 129
148static void free_dst_sglist(struct aead_request *areq) 130static void free_dst_sglist(struct nitrox_kcrypt_request *nkreq)
149{ 131{
150 struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
151
152 kfree(nkreq->dst); 132 kfree(nkreq->dst);
153} 133}
154 134
155static int nitrox_set_creq(struct aead_request *areq, 135static int nitrox_set_creq(struct nitrox_aead_rctx *rctx)
156 struct nitrox_crypt_params *params)
157{ 136{
158 struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 137 struct se_crypto_request *creq = &rctx->nkreq.creq;
159 struct se_crypto_request *creq = &nkreq->creq;
160 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
161 union gph_p3 param3; 138 union gph_p3 param3;
162 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
163 int ret; 139 int ret;
164 140
165 creq->flags = areq->base.flags; 141 creq->flags = rctx->flags;
166 creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 142 creq->gfp = (rctx->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
167 GFP_KERNEL : GFP_ATOMIC; 143 GFP_ATOMIC;
168 144
169 creq->ctrl.value = 0; 145 creq->ctrl.value = 0;
170 creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; 146 creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
171 creq->ctrl.s.arg = params->ctrl_arg; 147 creq->ctrl.s.arg = rctx->ctrl_arg;
172 148
173 creq->gph.param0 = cpu_to_be16(params->cryptlen); 149 creq->gph.param0 = cpu_to_be16(rctx->cryptlen);
174 creq->gph.param1 = cpu_to_be16(params->authlen); 150 creq->gph.param1 = cpu_to_be16(rctx->cryptlen + rctx->assoclen);
175 creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen); 151 creq->gph.param2 = cpu_to_be16(rctx->ivsize + rctx->assoclen);
176 param3.iv_offset = 0; 152 param3.iv_offset = 0;
177 param3.auth_offset = params->ivsize; 153 param3.auth_offset = rctx->ivsize;
178 creq->gph.param3 = cpu_to_be16(param3.param); 154 creq->gph.param3 = cpu_to_be16(param3.param);
179 155
180 creq->ctx_handle = nctx->u.ctx_handle; 156 creq->ctx_handle = rctx->ctx_handle;
181 creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); 157 creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
182 158
183 ret = alloc_src_sglist(areq, params->iv, params->ivsize, 159 ret = alloc_src_sglist(&rctx->nkreq, rctx->src, rctx->iv, rctx->ivsize,
184 params->srclen); 160 rctx->srclen);
185 if (ret) 161 if (ret)
186 return ret; 162 return ret;
187 163
188 ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen); 164 ret = alloc_dst_sglist(&rctx->nkreq, rctx->dst, rctx->ivsize,
165 rctx->dstlen);
189 if (ret) { 166 if (ret) {
190 free_src_sglist(areq); 167 free_src_sglist(&rctx->nkreq);
191 return ret; 168 return ret;
192 } 169 }
193 170
@@ -197,9 +174,10 @@ static int nitrox_set_creq(struct aead_request *areq,
197static void nitrox_aead_callback(void *arg, int err) 174static void nitrox_aead_callback(void *arg, int err)
198{ 175{
199 struct aead_request *areq = arg; 176 struct aead_request *areq = arg;
177 struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
200 178
201 free_src_sglist(areq); 179 free_src_sglist(&rctx->nkreq);
202 free_dst_sglist(areq); 180 free_dst_sglist(&rctx->nkreq);
203 if (err) { 181 if (err) {
204 pr_err_ratelimited("request failed status 0x%0x\n", err); 182 pr_err_ratelimited("request failed status 0x%0x\n", err);
205 err = -EINVAL; 183 err = -EINVAL;
@@ -212,23 +190,25 @@ static int nitrox_aes_gcm_enc(struct aead_request *areq)
212{ 190{
213 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 191 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
214 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 192 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
215 struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 193 struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
216 struct se_crypto_request *creq = &nkreq->creq; 194 struct se_crypto_request *creq = &rctx->nkreq.creq;
217 struct flexi_crypto_context *fctx = nctx->u.fctx; 195 struct flexi_crypto_context *fctx = nctx->u.fctx;
218 struct nitrox_crypt_params params;
219 int ret; 196 int ret;
220 197
221 memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); 198 memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
222 199
223 memset(&params, 0, sizeof(params)); 200 rctx->cryptlen = areq->cryptlen;
224 params.cryptlen = areq->cryptlen; 201 rctx->assoclen = areq->assoclen;
225 params.authlen = areq->assoclen + params.cryptlen; 202 rctx->srclen = areq->assoclen + areq->cryptlen;
226 params.srclen = params.authlen; 203 rctx->dstlen = rctx->srclen + aead->authsize;
227 params.dstlen = params.srclen + aead->authsize; 204 rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
228 params.iv = &areq->iv[GCM_AES_SALT_SIZE]; 205 rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
229 params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; 206 rctx->flags = areq->base.flags;
230 params.ctrl_arg = ENCRYPT; 207 rctx->ctx_handle = nctx->u.ctx_handle;
231 ret = nitrox_set_creq(areq, &params); 208 rctx->src = areq->src;
209 rctx->dst = areq->dst;
210 rctx->ctrl_arg = ENCRYPT;
211 ret = nitrox_set_creq(rctx);
232 if (ret) 212 if (ret)
233 return ret; 213 return ret;
234 214
@@ -241,23 +221,25 @@ static int nitrox_aes_gcm_dec(struct aead_request *areq)
241{ 221{
242 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 222 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
243 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 223 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
244 struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 224 struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
245 struct se_crypto_request *creq = &nkreq->creq; 225 struct se_crypto_request *creq = &rctx->nkreq.creq;
246 struct flexi_crypto_context *fctx = nctx->u.fctx; 226 struct flexi_crypto_context *fctx = nctx->u.fctx;
247 struct nitrox_crypt_params params;
248 int ret; 227 int ret;
249 228
250 memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); 229 memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
251 230
252 memset(&params, 0, sizeof(params)); 231 rctx->cryptlen = areq->cryptlen - aead->authsize;
253 params.cryptlen = areq->cryptlen - aead->authsize; 232 rctx->assoclen = areq->assoclen;
254 params.authlen = areq->assoclen + params.cryptlen; 233 rctx->srclen = areq->cryptlen + areq->assoclen;
255 params.srclen = areq->cryptlen + areq->assoclen; 234 rctx->dstlen = rctx->srclen - aead->authsize;
256 params.dstlen = params.srclen - aead->authsize; 235 rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
257 params.iv = &areq->iv[GCM_AES_SALT_SIZE]; 236 rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
258 params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; 237 rctx->flags = areq->base.flags;
259 params.ctrl_arg = DECRYPT; 238 rctx->ctx_handle = nctx->u.ctx_handle;
260 ret = nitrox_set_creq(areq, &params); 239 rctx->src = areq->src;
240 rctx->dst = areq->dst;
241 rctx->ctrl_arg = DECRYPT;
242 ret = nitrox_set_creq(rctx);
261 if (ret) 243 if (ret)
262 return ret; 244 return ret;
263 245
@@ -290,7 +272,7 @@ static int nitrox_aead_init(struct crypto_aead *aead)
290 return 0; 272 return 0;
291} 273}
292 274
293static int nitrox_aes_gcm_init(struct crypto_aead *aead) 275static int nitrox_gcm_common_init(struct crypto_aead *aead)
294{ 276{
295 int ret; 277 int ret;
296 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 278 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
@@ -308,8 +290,20 @@ static int nitrox_aes_gcm_init(struct crypto_aead *aead)
308 flags->w0.auth_input_type = 1; 290 flags->w0.auth_input_type = 1;
309 flags->f = be64_to_cpu(flags->f); 291 flags->f = be64_to_cpu(flags->f);
310 292
311 crypto_aead_set_reqsize(aead, sizeof(struct aead_request) + 293 return 0;
312 sizeof(struct nitrox_kcrypt_request)); 294}
295
296static int nitrox_aes_gcm_init(struct crypto_aead *aead)
297{
298 int ret;
299
300 ret = nitrox_gcm_common_init(aead);
301 if (ret)
302 return ret;
303
304 crypto_aead_set_reqsize(aead,
305 sizeof(struct aead_request) +
306 sizeof(struct nitrox_aead_rctx));
313 307
314 return 0; 308 return 0;
315} 309}
@@ -332,6 +326,166 @@ static void nitrox_aead_exit(struct crypto_aead *aead)
332 nctx->ndev = NULL; 326 nctx->ndev = NULL;
333} 327}
334 328
329static int nitrox_rfc4106_setkey(struct crypto_aead *aead, const u8 *key,
330 unsigned int keylen)
331{
332 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
333 struct flexi_crypto_context *fctx = nctx->u.fctx;
334 int ret;
335
336 if (keylen < GCM_AES_SALT_SIZE)
337 return -EINVAL;
338
339 keylen -= GCM_AES_SALT_SIZE;
340 ret = nitrox_aes_gcm_setkey(aead, key, keylen);
341 if (ret)
342 return ret;
343
344 memcpy(fctx->crypto.iv, key + keylen, GCM_AES_SALT_SIZE);
345 return 0;
346}
347
348static int nitrox_rfc4106_setauthsize(struct crypto_aead *aead,
349 unsigned int authsize)
350{
351 switch (authsize) {
352 case 8:
353 case 12:
354 case 16:
355 break;
356 default:
357 return -EINVAL;
358 }
359
360 return nitrox_aead_setauthsize(aead, authsize);
361}
362
363static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq)
364{
365 struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
366 struct nitrox_aead_rctx *aead_rctx = &rctx->base;
367 unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
368 struct scatterlist *sg;
369
370 if (areq->assoclen != 16 && areq->assoclen != 20)
371 return -EINVAL;
372
373 scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0);
374 sg_init_table(rctx->src, 3);
375 sg_set_buf(rctx->src, rctx->assoc, assoclen);
376 sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen);
377 if (sg != rctx->src + 1)
378 sg_chain(rctx->src, 2, sg);
379
380 if (areq->src != areq->dst) {
381 sg_init_table(rctx->dst, 3);
382 sg_set_buf(rctx->dst, rctx->assoc, assoclen);
383 sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen);
384 if (sg != rctx->dst + 1)
385 sg_chain(rctx->dst, 2, sg);
386 }
387
388 aead_rctx->src = rctx->src;
389 aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst;
390
391 return 0;
392}
393
394static void nitrox_rfc4106_callback(void *arg, int err)
395{
396 struct aead_request *areq = arg;
397 struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
398 struct nitrox_kcrypt_request *nkreq = &rctx->base.nkreq;
399
400 free_src_sglist(nkreq);
401 free_dst_sglist(nkreq);
402 if (err) {
403 pr_err_ratelimited("request failed status 0x%0x\n", err);
404 err = -EINVAL;
405 }
406
407 areq->base.complete(&areq->base, err);
408}
409
410static int nitrox_rfc4106_enc(struct aead_request *areq)
411{
412 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
413 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
414 struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
415 struct nitrox_aead_rctx *aead_rctx = &rctx->base;
416 struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
417 int ret;
418
419 aead_rctx->cryptlen = areq->cryptlen;
420 aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
421 aead_rctx->srclen = aead_rctx->assoclen + aead_rctx->cryptlen;
422 aead_rctx->dstlen = aead_rctx->srclen + aead->authsize;
423 aead_rctx->iv = areq->iv;
424 aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
425 aead_rctx->flags = areq->base.flags;
426 aead_rctx->ctx_handle = nctx->u.ctx_handle;
427 aead_rctx->ctrl_arg = ENCRYPT;
428
429 ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
430 if (ret)
431 return ret;
432
433 ret = nitrox_set_creq(aead_rctx);
434 if (ret)
435 return ret;
436
437 /* send the crypto request */
438 return nitrox_process_se_request(nctx->ndev, creq,
439 nitrox_rfc4106_callback, areq);
440}
441
442static int nitrox_rfc4106_dec(struct aead_request *areq)
443{
444 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
445 struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
446 struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
447 struct nitrox_aead_rctx *aead_rctx = &rctx->base;
448 struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
449 int ret;
450
451 aead_rctx->cryptlen = areq->cryptlen - aead->authsize;
452 aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
453 aead_rctx->srclen =
454 areq->cryptlen - GCM_RFC4106_IV_SIZE + areq->assoclen;
455 aead_rctx->dstlen = aead_rctx->srclen - aead->authsize;
456 aead_rctx->iv = areq->iv;
457 aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
458 aead_rctx->flags = areq->base.flags;
459 aead_rctx->ctx_handle = nctx->u.ctx_handle;
460 aead_rctx->ctrl_arg = DECRYPT;
461
462 ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
463 if (ret)
464 return ret;
465
466 ret = nitrox_set_creq(aead_rctx);
467 if (ret)
468 return ret;
469
470 /* send the crypto request */
471 return nitrox_process_se_request(nctx->ndev, creq,
472 nitrox_rfc4106_callback, areq);
473}
474
475static int nitrox_rfc4106_init(struct crypto_aead *aead)
476{
477 int ret;
478
479 ret = nitrox_gcm_common_init(aead);
480 if (ret)
481 return ret;
482
483 crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
484 sizeof(struct nitrox_rfc4106_rctx));
485
486 return 0;
487}
488
335static struct aead_alg nitrox_aeads[] = { { 489static struct aead_alg nitrox_aeads[] = { {
336 .base = { 490 .base = {
337 .cra_name = "gcm(aes)", 491 .cra_name = "gcm(aes)",
@@ -351,6 +505,25 @@ static struct aead_alg nitrox_aeads[] = { {
351 .exit = nitrox_aead_exit, 505 .exit = nitrox_aead_exit,
352 .ivsize = GCM_AES_IV_SIZE, 506 .ivsize = GCM_AES_IV_SIZE,
353 .maxauthsize = AES_BLOCK_SIZE, 507 .maxauthsize = AES_BLOCK_SIZE,
508}, {
509 .base = {
510 .cra_name = "rfc4106(gcm(aes))",
511 .cra_driver_name = "n5_rfc4106",
512 .cra_priority = PRIO,
513 .cra_flags = CRYPTO_ALG_ASYNC,
514 .cra_blocksize = AES_BLOCK_SIZE,
515 .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
516 .cra_alignmask = 0,
517 .cra_module = THIS_MODULE,
518 },
519 .setkey = nitrox_rfc4106_setkey,
520 .setauthsize = nitrox_rfc4106_setauthsize,
521 .encrypt = nitrox_rfc4106_enc,
522 .decrypt = nitrox_rfc4106_dec,
523 .init = nitrox_rfc4106_init,
524 .exit = nitrox_aead_exit,
525 .ivsize = GCM_RFC4106_IV_SIZE,
526 .maxauthsize = AES_BLOCK_SIZE,
354} }; 527} };
355 528
356int nitrox_register_aeads(void) 529int nitrox_register_aeads(void)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index c08d9f33a3b1..3f0df60267a9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -437,6 +437,45 @@ void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
437 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value); 437 nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
438} 438}
439 439
440static const char *get_core_option(u8 se_cores, u8 ae_cores)
441{
442 const char *option = "";
443
444 if (ae_cores == AE_MAX_CORES) {
445 switch (se_cores) {
446 case SE_MAX_CORES:
447 option = "60";
448 break;
449 case 40:
450 option = "60s";
451 break;
452 }
453 } else if (ae_cores == (AE_MAX_CORES / 2)) {
454 option = "30";
455 } else {
456 option = "60i";
457 }
458
459 return option;
460}
461
462static const char *get_feature_option(u8 zip_cores, int core_freq)
463{
464 if (zip_cores == 0)
465 return "";
466 else if (zip_cores < ZIP_MAX_CORES)
467 return "-C15";
468
469 if (core_freq >= 850)
470 return "-C45";
471 else if (core_freq >= 750)
472 return "-C35";
473 else if (core_freq >= 550)
474 return "-C25";
475
476 return "";
477}
478
440void nitrox_get_hwinfo(struct nitrox_device *ndev) 479void nitrox_get_hwinfo(struct nitrox_device *ndev)
441{ 480{
442 union emu_fuse_map emu_fuse; 481 union emu_fuse_map emu_fuse;
@@ -469,24 +508,14 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev)
469 ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores; 508 ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
470 } 509 }
471 510
472 /* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/ 511 /* determine the partname
473 if (ndev->hw.ae_cores == AE_MAX_CORES) { 512 * CNN55<core option>-<freq><pincount>-<feature option>-<rev>
474 switch (ndev->hw.se_cores) { 513 */
475 case SE_MAX_CORES: 514 snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
476 i = snprintf(name, sizeof(name), "CNN5560"); 515 get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
477 break; 516 ndev->hw.freq,
478 case 40: 517 get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
479 i = snprintf(name, sizeof(name), "CNN5560s"); 518 ndev->hw.revision_id);
480 break;
481 }
482 } else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
483 i = snprintf(name, sizeof(name), "CNN5530");
484 } else {
485 i = snprintf(name, sizeof(name), "CNN5560i");
486 }
487
488 snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
489 ndev->hw.freq, ndev->hw.revision_id);
490 519
491 /* copy partname */ 520 /* copy partname */
492 strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname)); 521 strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index 76c0f0be7233..efdbd0fc3e3b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -212,6 +212,50 @@ struct nitrox_kcrypt_request {
212}; 212};
213 213
214/** 214/**
215 * struct nitrox_aead_rctx - AEAD request context
216 * @nkreq: Base request context
217 * @cryptlen: Encryption/Decryption data length
218 * @assoclen: AAD length
219 * @srclen: Input buffer length
220 * @dstlen: Output buffer length
221 * @iv: IV data
222 * @ivsize: IV data length
223 * @flags: AEAD req flags
224 * @ctx_handle: Device context handle
225 * @src: Source sglist
226 * @dst: Destination sglist
227 * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
228 */
229struct nitrox_aead_rctx {
230 struct nitrox_kcrypt_request nkreq;
231 unsigned int cryptlen;
232 unsigned int assoclen;
233 unsigned int srclen;
234 unsigned int dstlen;
235 u8 *iv;
236 int ivsize;
237 u32 flags;
238 u64 ctx_handle;
239 struct scatterlist *src;
240 struct scatterlist *dst;
241 u8 ctrl_arg;
242};
243
244/**
245 * struct nitrox_rfc4106_rctx - rfc4106 cipher request context
246 * @base: AEAD request context
247 * @src: Source sglist
248 * @dst: Destination sglist
249 * @assoc: AAD
250 */
251struct nitrox_rfc4106_rctx {
252 struct nitrox_aead_rctx base;
253 struct scatterlist src[3];
254 struct scatterlist dst[3];
255 u8 assoc[20];
256};
257
258/**
215 * struct pkt_instr_hdr - Packet Instruction Header 259 * struct pkt_instr_hdr - Packet Instruction Header
216 * @g: Gather used 260 * @g: Gather used
217 * When [G] is set and [GSZ] != 0, the instruction is 261 * When [G] is set and [GSZ] != 0, the instruction is
@@ -512,7 +556,7 @@ static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg,
512 struct scatterlist *sg = to_sg; 556 struct scatterlist *sg = to_sg;
513 unsigned int sglen; 557 unsigned int sglen;
514 558
515 for (; buflen; buflen -= sglen) { 559 for (; buflen && from_sg; buflen -= sglen) {
516 sglen = from_sg->length; 560 sglen = from_sg->length;
517 if (sglen > buflen) 561 if (sglen > buflen)
518 sglen = buflen; 562 sglen = buflen;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index d4935d6cefdd..7e4a5e69085e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -257,12 +257,8 @@ static int nitrox_aes_decrypt(struct skcipher_request *skreq)
257static int nitrox_3des_setkey(struct crypto_skcipher *cipher, 257static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
258 const u8 *key, unsigned int keylen) 258 const u8 *key, unsigned int keylen)
259{ 259{
260 if (keylen != DES3_EDE_KEY_SIZE) { 260 return unlikely(des3_verify_key(cipher, key)) ?:
261 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 261 nitrox_skcipher_setkey(cipher, 0, key, keylen);
262 return -EINVAL;
263 }
264
265 return nitrox_skcipher_setkey(cipher, 0, key, keylen);
266} 262}
267 263
268static int nitrox_3des_encrypt(struct skcipher_request *skreq) 264static int nitrox_3des_encrypt(struct skcipher_request *skreq)
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index b92b6e7e100f..4985bc812b0e 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -69,7 +69,7 @@ static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
69 zip_ops->csum = 1; /* Adler checksum desired */ 69 zip_ops->csum = 1; /* Adler checksum desired */
70} 70}
71 71
72int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag) 72static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
73{ 73{
74 struct zip_operation *comp_ctx = &zip_ctx->zip_comp; 74 struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
75 struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp; 75 struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
@@ -107,7 +107,7 @@ err_comp_input:
107 return -ENOMEM; 107 return -ENOMEM;
108} 108}
109 109
110void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx) 110static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
111{ 111{
112 struct zip_operation *comp_ctx = &zip_ctx->zip_comp; 112 struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
113 struct zip_operation *dec_ctx = &zip_ctx->zip_decomp; 113 struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
@@ -119,7 +119,7 @@ void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
119 zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE); 119 zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
120} 120}
121 121
122int zip_compress(const u8 *src, unsigned int slen, 122static int zip_compress(const u8 *src, unsigned int slen,
123 u8 *dst, unsigned int *dlen, 123 u8 *dst, unsigned int *dlen,
124 struct zip_kernel_ctx *zip_ctx) 124 struct zip_kernel_ctx *zip_ctx)
125{ 125{
@@ -155,7 +155,7 @@ int zip_compress(const u8 *src, unsigned int slen,
155 return ret; 155 return ret;
156} 156}
157 157
158int zip_decompress(const u8 *src, unsigned int slen, 158static int zip_decompress(const u8 *src, unsigned int slen,
159 u8 *dst, unsigned int *dlen, 159 u8 *dst, unsigned int *dlen,
160 struct zip_kernel_ctx *zip_ctx) 160 struct zip_kernel_ctx *zip_ctx)
161{ 161{
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index c2ff551d215b..91482ffcac59 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -43,24 +43,11 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
43 struct ccp_crypto_ablkcipher_alg *alg = 43 struct ccp_crypto_ablkcipher_alg *alg =
44 ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm)); 44 ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
45 u32 *flags = &tfm->base.crt_flags; 45 u32 *flags = &tfm->base.crt_flags;
46 int err;
46 47
47 48 err = __des3_verify_key(flags, key);
48 /* From des_generic.c: 49 if (unlikely(err))
49 * 50 return err;
50 * RFC2451:
51 * If the first two or last two independent 64-bit keys are
52 * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
53 * same as DES. Implementers MUST reject keys that exhibit this
54 * property.
55 */
56 const u32 *K = (const u32 *)key;
57
58 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
59 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
60 (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
61 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
62 return -EINVAL;
63 }
64 51
65 /* It's not clear that there is any support for a keysize of 112. 52 /* It's not clear that there is any support for a keysize of 112.
66 * If needed, the caller should make K1 == K3 53 * If needed, the caller should make K1 == K3
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 05850dfd7940..a2570c0c8cdc 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -37,10 +37,9 @@ static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
37 if (buf[nskip]) 37 if (buf[nskip])
38 break; 38 break;
39 *kplen = sz - nskip; 39 *kplen = sz - nskip;
40 *kpbuf = kzalloc(*kplen, GFP_KERNEL); 40 *kpbuf = kmemdup(buf + nskip, *kplen, GFP_KERNEL);
41 if (!*kpbuf) 41 if (!*kpbuf)
42 return -ENOMEM; 42 return -ENOMEM;
43 memcpy(*kpbuf, buf + nskip, *kplen);
44 43
45 return 0; 44 return 0;
46} 45}
@@ -214,8 +213,6 @@ static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
214static struct akcipher_alg ccp_rsa_defaults = { 213static struct akcipher_alg ccp_rsa_defaults = {
215 .encrypt = ccp_rsa_encrypt, 214 .encrypt = ccp_rsa_encrypt,
216 .decrypt = ccp_rsa_decrypt, 215 .decrypt = ccp_rsa_decrypt,
217 .sign = ccp_rsa_decrypt,
218 .verify = ccp_rsa_encrypt,
219 .set_pub_key = ccp_rsa_setpubkey, 216 .set_pub_key = ccp_rsa_setpubkey,
220 .set_priv_key = ccp_rsa_setprivkey, 217 .set_priv_key = ccp_rsa_setprivkey,
221 .max_size = ccp_rsa_maxsize, 218 .max_size = ccp_rsa_maxsize,
@@ -248,7 +245,8 @@ static struct ccp_rsa_def rsa_algs[] = {
248 } 245 }
249}; 246};
250 247
251int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def) 248static int ccp_register_rsa_alg(struct list_head *head,
249 const struct ccp_rsa_def *def)
252{ 250{
253 struct ccp_crypto_akcipher_alg *ccp_alg; 251 struct ccp_crypto_akcipher_alg *ccp_alg;
254 struct akcipher_alg *alg; 252 struct akcipher_alg *alg;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 10a61cd54fce..3e10573f589e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,8 +293,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
293 if (key_len > block_size) { 293 if (key_len > block_size) {
294 /* Must hash the input key */ 294 /* Must hash the input key */
295 sdesc->tfm = shash; 295 sdesc->tfm = shash;
296 sdesc->flags = crypto_ahash_get_flags(tfm) &
297 CRYPTO_TFM_REQ_MAY_SLEEP;
298 296
299 ret = crypto_shash_digest(sdesc, key, key_len, 297 ret = crypto_shash_digest(sdesc, key, key_len,
300 ctx->u.sha.key); 298 ctx->u.sha.key);
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index fadf859a14b8..656838433f2f 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -583,6 +583,69 @@ e_free:
583 return ret; 583 return ret;
584} 584}
585 585
586static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
587{
588 struct sev_user_data_get_id2 input;
589 struct sev_data_get_id *data;
590 void *id_blob = NULL;
591 int ret;
592
593 /* SEV GET_ID is available from SEV API v0.16 and up */
594 if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
595 return -ENOTSUPP;
596
597 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
598 return -EFAULT;
599
600 /* Check if we have write access to the userspace buffer */
601 if (input.address &&
602 input.length &&
603 !access_ok(input.address, input.length))
604 return -EFAULT;
605
606 data = kzalloc(sizeof(*data), GFP_KERNEL);
607 if (!data)
608 return -ENOMEM;
609
610 if (input.address && input.length) {
611 id_blob = kmalloc(input.length, GFP_KERNEL);
612 if (!id_blob) {
613 kfree(data);
614 return -ENOMEM;
615 }
616
617 data->address = __psp_pa(id_blob);
618 data->len = input.length;
619 }
620
621 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
622
623 /*
624 * Firmware will return the length of the ID value (either the minimum
625 * required length or the actual length written), return it to the user.
626 */
627 input.length = data->len;
628
629 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
630 ret = -EFAULT;
631 goto e_free;
632 }
633
634 if (id_blob) {
635 if (copy_to_user((void __user *)input.address,
636 id_blob, data->len)) {
637 ret = -EFAULT;
638 goto e_free;
639 }
640 }
641
642e_free:
643 kfree(id_blob);
644 kfree(data);
645
646 return ret;
647}
648
586static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) 649static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
587{ 650{
588 struct sev_data_get_id *data; 651 struct sev_data_get_id *data;
@@ -761,8 +824,12 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
761 ret = sev_ioctl_do_pdh_export(&input); 824 ret = sev_ioctl_do_pdh_export(&input);
762 break; 825 break;
763 case SEV_GET_ID: 826 case SEV_GET_ID:
827 pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
764 ret = sev_ioctl_do_get_id(&input); 828 ret = sev_ioctl_do_get_id(&input);
765 break; 829 break;
830 case SEV_GET_ID2:
831 ret = sev_ioctl_do_get_id2(&input);
832 break;
766 default: 833 default:
767 ret = -EINVAL; 834 ret = -EINVAL;
768 goto out; 835 goto out;
@@ -997,7 +1064,7 @@ void psp_pci_init(void)
997 rc = sev_platform_init(&error); 1064 rc = sev_platform_init(&error);
998 if (rc) { 1065 if (rc) {
999 dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error); 1066 dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
1000 goto err; 1067 return;
1001 } 1068 }
1002 1069
1003 dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major, 1070 dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index bdc27970f95f..145e50bdbf16 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,4 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
2 3
3obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o 4obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
4ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o 5ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index a3527c00b29a..7aa4cbe19a86 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/module.h> 5#include <linux/module.h>
@@ -23,12 +23,8 @@
23#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE) 23#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE) 24#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
25 25
26#define AES_CCM_RFC4309_NONCE_SIZE 3
27#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE 26#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
28 27
29/* Value of each ICV_CMP byte (of 8) in case of success */
30#define ICV_VERIF_OK 0x01
31
32struct cc_aead_handle { 28struct cc_aead_handle {
33 cc_sram_addr_t sram_workspace_addr; 29 cc_sram_addr_t sram_workspace_addr;
34 struct list_head aead_list; 30 struct list_head aead_list;
@@ -220,6 +216,10 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
220 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req); 216 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
221 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 217 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
222 218
219 /* BACKLOG notification */
220 if (err == -EINPROGRESS)
221 goto done;
222
223 cc_unmap_aead_request(dev, areq); 223 cc_unmap_aead_request(dev, areq);
224 224
225 /* Restore ordinary iv pointer */ 225 /* Restore ordinary iv pointer */
@@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
424/* This function prepers the user key so it can pass to the hmac processing 424/* This function prepers the user key so it can pass to the hmac processing
425 * (copy to intenral buffer or hash in case of key longer than block 425 * (copy to intenral buffer or hash in case of key longer than block
426 */ 426 */
427static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, 427static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
428 unsigned int keylen) 428 unsigned int keylen)
429{ 429{
430 dma_addr_t key_dma_addr = 0; 430 dma_addr_t key_dma_addr = 0;
@@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
437 unsigned int hashmode; 437 unsigned int hashmode;
438 unsigned int idx = 0; 438 unsigned int idx = 0;
439 int rc = 0; 439 int rc = 0;
440 u8 *key = NULL;
440 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; 441 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
441 dma_addr_t padded_authkey_dma_addr = 442 dma_addr_t padded_authkey_dma_addr =
442 ctx->auth_state.hmac.padded_authkey_dma_addr; 443 ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
455 } 456 }
456 457
457 if (keylen != 0) { 458 if (keylen != 0) {
459
460 key = kmemdup(authkey, keylen, GFP_KERNEL);
461 if (!key)
462 return -ENOMEM;
463
458 key_dma_addr = dma_map_single(dev, (void *)key, keylen, 464 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
459 DMA_TO_DEVICE); 465 DMA_TO_DEVICE);
460 if (dma_mapping_error(dev, key_dma_addr)) { 466 if (dma_mapping_error(dev, key_dma_addr)) {
461 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", 467 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
462 key, keylen); 468 key, keylen);
469 kzfree(key);
463 return -ENOMEM; 470 return -ENOMEM;
464 } 471 }
465 if (keylen > blocksize) { 472 if (keylen > blocksize) {
@@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
542 if (key_dma_addr) 549 if (key_dma_addr)
543 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE); 550 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
544 551
552 kzfree(key);
553
545 return rc; 554 return rc;
546} 555}
547 556
@@ -650,6 +659,39 @@ setkey_error:
650 return rc; 659 return rc;
651} 660}
652 661
662static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
663 unsigned int keylen)
664{
665 struct crypto_authenc_keys keys;
666 u32 flags;
667 int err;
668
669 err = crypto_authenc_extractkeys(&keys, key, keylen);
670 if (unlikely(err))
671 goto badkey;
672
673 err = -EINVAL;
674 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
675 goto badkey;
676
677 flags = crypto_aead_get_flags(aead);
678 err = __des3_verify_key(&flags, keys.enckey);
679 if (unlikely(err)) {
680 crypto_aead_set_flags(aead, flags);
681 goto out;
682 }
683
684 err = cc_aead_setkey(aead, key, keylen);
685
686out:
687 memzero_explicit(&keys, sizeof(keys));
688 return err;
689
690badkey:
691 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
692 goto out;
693}
694
653static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, 695static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
654 unsigned int keylen) 696 unsigned int keylen)
655{ 697{
@@ -731,7 +773,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
731 dev_dbg(dev, "ASSOC buffer type DLLI\n"); 773 dev_dbg(dev, "ASSOC buffer type DLLI\n");
732 hw_desc_init(&desc[idx]); 774 hw_desc_init(&desc[idx]);
733 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src), 775 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
734 areq->assoclen, NS_BIT); 776 areq_ctx->assoclen, NS_BIT);
735 set_flow_mode(&desc[idx], flow_mode); 777 set_flow_mode(&desc[idx], flow_mode);
736 if (ctx->auth_mode == DRV_HASH_XCBC_MAC && 778 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
737 areq_ctx->cryptlen > 0) 779 areq_ctx->cryptlen > 0)
@@ -1080,9 +1122,11 @@ static void cc_proc_header_desc(struct aead_request *req,
1080 struct cc_hw_desc desc[], 1122 struct cc_hw_desc desc[],
1081 unsigned int *seq_size) 1123 unsigned int *seq_size)
1082{ 1124{
1125 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1083 unsigned int idx = *seq_size; 1126 unsigned int idx = *seq_size;
1127
1084 /* Hash associated data */ 1128 /* Hash associated data */
1085 if (req->assoclen > 0) 1129 if (areq_ctx->assoclen > 0)
1086 cc_set_assoc_desc(req, DIN_HASH, desc, &idx); 1130 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1087 1131
1088 /* Hash IV */ 1132 /* Hash IV */
@@ -1159,9 +1203,9 @@ static void cc_mlli_to_sram(struct aead_request *req,
1159 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 1203 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1160 struct device *dev = drvdata_to_dev(ctx->drvdata); 1204 struct device *dev = drvdata_to_dev(ctx->drvdata);
1161 1205
1162 if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || 1206 if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1163 req_ctx->data_buff_type == CC_DMA_BUF_MLLI || 1207 req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1164 !req_ctx->is_single_pass) { 1208 !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1165 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n", 1209 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1166 (unsigned int)ctx->drvdata->mlli_sram_addr, 1210 (unsigned int)ctx->drvdata->mlli_sram_addr,
1167 req_ctx->mlli_params.mlli_len); 1211 req_ctx->mlli_params.mlli_len);
@@ -1310,7 +1354,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
1310{ 1354{
1311 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 1355 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1312 struct device *dev = drvdata_to_dev(ctx->drvdata); 1356 struct device *dev = drvdata_to_dev(ctx->drvdata);
1313 unsigned int assoclen = req->assoclen; 1357 unsigned int assoclen = areq_ctx->assoclen;
1314 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ? 1358 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1315 (req->cryptlen - ctx->authsize) : req->cryptlen; 1359 (req->cryptlen - ctx->authsize) : req->cryptlen;
1316 1360
@@ -1469,7 +1513,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1469 idx++; 1513 idx++;
1470 1514
1471 /* process assoc data */ 1515 /* process assoc data */
1472 if (req->assoclen > 0) { 1516 if (req_ctx->assoclen > 0) {
1473 cc_set_assoc_desc(req, DIN_HASH, desc, &idx); 1517 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1474 } else { 1518 } else {
1475 hw_desc_init(&desc[idx]); 1519 hw_desc_init(&desc[idx]);
@@ -1561,7 +1605,7 @@ static int config_ccm_adata(struct aead_request *req)
1561 * NIST Special Publication 800-38C 1605 * NIST Special Publication 800-38C
1562 */ 1606 */
1563 *b0 |= (8 * ((m - 2) / 2)); 1607 *b0 |= (8 * ((m - 2) / 2));
1564 if (req->assoclen > 0) 1608 if (req_ctx->assoclen > 0)
1565 *b0 |= 64; /* Enable bit 6 if Adata exists. */ 1609 *b0 |= 64; /* Enable bit 6 if Adata exists. */
1566 1610
1567 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */ 1611 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
@@ -1572,7 +1616,7 @@ static int config_ccm_adata(struct aead_request *req)
1572 /* END of "taken from crypto/ccm.c" */ 1616 /* END of "taken from crypto/ccm.c" */
1573 1617
1574 /* l(a) - size of associated data. */ 1618 /* l(a) - size of associated data. */
1575 req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen); 1619 req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1576 1620
1577 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1); 1621 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1578 req->iv[15] = 1; 1622 req->iv[15] = 1;
@@ -1604,7 +1648,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
1604 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, 1648 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1605 CCM_BLOCK_IV_SIZE); 1649 CCM_BLOCK_IV_SIZE);
1606 req->iv = areq_ctx->ctr_iv; 1650 req->iv = areq_ctx->ctr_iv;
1607 req->assoclen -= CCM_BLOCK_IV_SIZE; 1651 areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
1608} 1652}
1609 1653
1610static void cc_set_ghash_desc(struct aead_request *req, 1654static void cc_set_ghash_desc(struct aead_request *req,
@@ -1812,7 +1856,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1812 // for gcm and rfc4106. 1856 // for gcm and rfc4106.
1813 cc_set_ghash_desc(req, desc, seq_size); 1857 cc_set_ghash_desc(req, desc, seq_size);
1814 /* process(ghash) assoc data */ 1858 /* process(ghash) assoc data */
1815 if (req->assoclen > 0) 1859 if (req_ctx->assoclen > 0)
1816 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size); 1860 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1817 cc_set_gctr_desc(req, desc, seq_size); 1861 cc_set_gctr_desc(req, desc, seq_size);
1818 /* process(gctr+ghash) */ 1862 /* process(gctr+ghash) */
@@ -1836,8 +1880,8 @@ static int config_gcm_context(struct aead_request *req)
1836 (req->cryptlen - ctx->authsize); 1880 (req->cryptlen - ctx->authsize);
1837 __be32 counter = cpu_to_be32(2); 1881 __be32 counter = cpu_to_be32(2);
1838 1882
1839 dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", 1883 dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1840 __func__, cryptlen, req->assoclen, ctx->authsize); 1884 __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1841 1885
1842 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE); 1886 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1843 1887
@@ -1853,7 +1897,7 @@ static int config_gcm_context(struct aead_request *req)
1853 if (!req_ctx->plaintext_authenticate_only) { 1897 if (!req_ctx->plaintext_authenticate_only) {
1854 __be64 temp64; 1898 __be64 temp64;
1855 1899
1856 temp64 = cpu_to_be64(req->assoclen * 8); 1900 temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1857 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); 1901 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1858 temp64 = cpu_to_be64(cryptlen * 8); 1902 temp64 = cpu_to_be64(cryptlen * 8);
1859 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); 1903 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1863,8 +1907,8 @@ static int config_gcm_context(struct aead_request *req)
1863 */ 1907 */
1864 __be64 temp64; 1908 __be64 temp64;
1865 1909
1866 temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + 1910 temp64 = cpu_to_be64((req_ctx->assoclen +
1867 cryptlen) * 8); 1911 GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1868 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); 1912 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1869 temp64 = 0; 1913 temp64 = 0;
1870 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); 1914 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1884,7 +1928,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
1884 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, 1928 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1885 GCM_BLOCK_RFC4_IV_SIZE); 1929 GCM_BLOCK_RFC4_IV_SIZE);
1886 req->iv = areq_ctx->ctr_iv; 1930 req->iv = areq_ctx->ctr_iv;
1887 req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE; 1931 areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1888} 1932}
1889 1933
1890static int cc_proc_aead(struct aead_request *req, 1934static int cc_proc_aead(struct aead_request *req,
@@ -1909,7 +1953,7 @@ static int cc_proc_aead(struct aead_request *req,
1909 /* Check data length according to mode */ 1953 /* Check data length according to mode */
1910 if (validate_data_size(ctx, direct, req)) { 1954 if (validate_data_size(ctx, direct, req)) {
1911 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n", 1955 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1912 req->cryptlen, req->assoclen); 1956 req->cryptlen, areq_ctx->assoclen);
1913 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); 1957 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1914 return -EINVAL; 1958 return -EINVAL;
1915 } 1959 }
@@ -2058,8 +2102,11 @@ static int cc_aead_encrypt(struct aead_request *req)
2058 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2102 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2059 int rc; 2103 int rc;
2060 2104
2105 memset(areq_ctx, 0, sizeof(*areq_ctx));
2106
2061 /* No generated IV required */ 2107 /* No generated IV required */
2062 areq_ctx->backup_iv = req->iv; 2108 areq_ctx->backup_iv = req->iv;
2109 areq_ctx->assoclen = req->assoclen;
2063 areq_ctx->backup_giv = NULL; 2110 areq_ctx->backup_giv = NULL;
2064 areq_ctx->is_gcm4543 = false; 2111 areq_ctx->is_gcm4543 = false;
2065 2112
@@ -2087,8 +2134,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2087 goto out; 2134 goto out;
2088 } 2135 }
2089 2136
2137 memset(areq_ctx, 0, sizeof(*areq_ctx));
2138
2090 /* No generated IV required */ 2139 /* No generated IV required */
2091 areq_ctx->backup_iv = req->iv; 2140 areq_ctx->backup_iv = req->iv;
2141 areq_ctx->assoclen = req->assoclen;
2092 areq_ctx->backup_giv = NULL; 2142 areq_ctx->backup_giv = NULL;
2093 areq_ctx->is_gcm4543 = true; 2143 areq_ctx->is_gcm4543 = true;
2094 2144
@@ -2106,8 +2156,11 @@ static int cc_aead_decrypt(struct aead_request *req)
2106 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2156 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2107 int rc; 2157 int rc;
2108 2158
2159 memset(areq_ctx, 0, sizeof(*areq_ctx));
2160
2109 /* No generated IV required */ 2161 /* No generated IV required */
2110 areq_ctx->backup_iv = req->iv; 2162 areq_ctx->backup_iv = req->iv;
2163 areq_ctx->assoclen = req->assoclen;
2111 areq_ctx->backup_giv = NULL; 2164 areq_ctx->backup_giv = NULL;
2112 areq_ctx->is_gcm4543 = false; 2165 areq_ctx->is_gcm4543 = false;
2113 2166
@@ -2133,8 +2186,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2133 goto out; 2186 goto out;
2134 } 2187 }
2135 2188
2189 memset(areq_ctx, 0, sizeof(*areq_ctx));
2190
2136 /* No generated IV required */ 2191 /* No generated IV required */
2137 areq_ctx->backup_iv = req->iv; 2192 areq_ctx->backup_iv = req->iv;
2193 areq_ctx->assoclen = req->assoclen;
2138 areq_ctx->backup_giv = NULL; 2194 areq_ctx->backup_giv = NULL;
2139 2195
2140 areq_ctx->is_gcm4543 = true; 2196 areq_ctx->is_gcm4543 = true;
@@ -2250,8 +2306,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2250 goto out; 2306 goto out;
2251 } 2307 }
2252 2308
2309 memset(areq_ctx, 0, sizeof(*areq_ctx));
2310
2253 /* No generated IV required */ 2311 /* No generated IV required */
2254 areq_ctx->backup_iv = req->iv; 2312 areq_ctx->backup_iv = req->iv;
2313 areq_ctx->assoclen = req->assoclen;
2255 areq_ctx->backup_giv = NULL; 2314 areq_ctx->backup_giv = NULL;
2256 2315
2257 areq_ctx->plaintext_authenticate_only = false; 2316 areq_ctx->plaintext_authenticate_only = false;
@@ -2273,11 +2332,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2273 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2332 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2274 int rc; 2333 int rc;
2275 2334
2335 memset(areq_ctx, 0, sizeof(*areq_ctx));
2336
2276 //plaintext is not encryped with rfc4543 2337 //plaintext is not encryped with rfc4543
2277 areq_ctx->plaintext_authenticate_only = true; 2338 areq_ctx->plaintext_authenticate_only = true;
2278 2339
2279 /* No generated IV required */ 2340 /* No generated IV required */
2280 areq_ctx->backup_iv = req->iv; 2341 areq_ctx->backup_iv = req->iv;
2342 areq_ctx->assoclen = req->assoclen;
2281 areq_ctx->backup_giv = NULL; 2343 areq_ctx->backup_giv = NULL;
2282 2344
2283 cc_proc_rfc4_gcm(req); 2345 cc_proc_rfc4_gcm(req);
@@ -2305,8 +2367,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2305 goto out; 2367 goto out;
2306 } 2368 }
2307 2369
2370 memset(areq_ctx, 0, sizeof(*areq_ctx));
2371
2308 /* No generated IV required */ 2372 /* No generated IV required */
2309 areq_ctx->backup_iv = req->iv; 2373 areq_ctx->backup_iv = req->iv;
2374 areq_ctx->assoclen = req->assoclen;
2310 areq_ctx->backup_giv = NULL; 2375 areq_ctx->backup_giv = NULL;
2311 2376
2312 areq_ctx->plaintext_authenticate_only = false; 2377 areq_ctx->plaintext_authenticate_only = false;
@@ -2328,11 +2393,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2328 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 2393 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2329 int rc; 2394 int rc;
2330 2395
2396 memset(areq_ctx, 0, sizeof(*areq_ctx));
2397
2331 //plaintext is not decryped with rfc4543 2398 //plaintext is not decryped with rfc4543
2332 areq_ctx->plaintext_authenticate_only = true; 2399 areq_ctx->plaintext_authenticate_only = true;
2333 2400
2334 /* No generated IV required */ 2401 /* No generated IV required */
2335 areq_ctx->backup_iv = req->iv; 2402 areq_ctx->backup_iv = req->iv;
2403 areq_ctx->assoclen = req->assoclen;
2336 areq_ctx->backup_giv = NULL; 2404 areq_ctx->backup_giv = NULL;
2337 2405
2338 cc_proc_rfc4_gcm(req); 2406 cc_proc_rfc4_gcm(req);
@@ -2372,7 +2440,7 @@ static struct cc_alg_template aead_algs[] = {
2372 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree", 2440 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2373 .blocksize = DES3_EDE_BLOCK_SIZE, 2441 .blocksize = DES3_EDE_BLOCK_SIZE,
2374 .template_aead = { 2442 .template_aead = {
2375 .setkey = cc_aead_setkey, 2443 .setkey = cc_des3_aead_setkey,
2376 .setauthsize = cc_aead_setauthsize, 2444 .setauthsize = cc_aead_setauthsize,
2377 .encrypt = cc_aead_encrypt, 2445 .encrypt = cc_aead_encrypt,
2378 .decrypt = cc_aead_decrypt, 2446 .decrypt = cc_aead_decrypt,
@@ -2412,7 +2480,7 @@ static struct cc_alg_template aead_algs[] = {
2412 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree", 2480 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2413 .blocksize = DES3_EDE_BLOCK_SIZE, 2481 .blocksize = DES3_EDE_BLOCK_SIZE,
2414 .template_aead = { 2482 .template_aead = {
2415 .setkey = cc_aead_setkey, 2483 .setkey = cc_des3_aead_setkey,
2416 .setauthsize = cc_aead_setauthsize, 2484 .setauthsize = cc_aead_setauthsize,
2417 .encrypt = cc_aead_encrypt, 2485 .encrypt = cc_aead_encrypt,
2418 .decrypt = cc_aead_decrypt, 2486 .decrypt = cc_aead_decrypt,
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index 5edf3b351fa4..e51724b96c56 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4/* \file cc_aead.h 4/* \file cc_aead.h
5 * ARM CryptoCell AEAD Crypto API 5 * ARM CryptoCell AEAD Crypto API
@@ -67,6 +67,7 @@ struct aead_req_ctx {
67 u8 backup_mac[MAX_MAC_SIZE]; 67 u8 backup_mac[MAX_MAC_SIZE];
68 u8 *backup_iv; /*store iv for generated IV flow*/ 68 u8 *backup_iv; /*store iv for generated IV flow*/
69 u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/ 69 u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
70 u32 assoclen; /* internal assoclen */
70 dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */ 71 dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
71 /* buffer for internal ccm configurations */ 72 /* buffer for internal ccm configurations */
72 dma_addr_t ccm_iv0_dma_addr; 73 dma_addr_t ccm_iv0_dma_addr;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 0ee1c52da0a4..c81ad33f9115 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#include <crypto/internal/aead.h> 4#include <crypto/internal/aead.h>
5#include <crypto/authenc.h> 5#include <crypto/authenc.h>
@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
65{ 65{
66 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 66 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
67 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 67 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
68 u32 skip = req->assoclen + req->cryptlen; 68 u32 skip = areq_ctx->assoclen + req->cryptlen;
69 69
70 if (areq_ctx->is_gcm4543) 70 if (areq_ctx->is_gcm4543)
71 skip += crypto_aead_ivsize(tfm); 71 skip += crypto_aead_ivsize(tfm);
@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
83 */ 83 */
84static unsigned int cc_get_sgl_nents(struct device *dev, 84static unsigned int cc_get_sgl_nents(struct device *dev,
85 struct scatterlist *sg_list, 85 struct scatterlist *sg_list,
86 unsigned int nbytes, u32 *lbytes, 86 unsigned int nbytes, u32 *lbytes)
87 bool *is_chained)
88{ 87{
89 unsigned int nents = 0; 88 unsigned int nents = 0;
90 89
91 while (nbytes && sg_list) { 90 while (nbytes && sg_list) {
92 if (sg_list->length) { 91 nents++;
93 nents++; 92 /* get the number of bytes in the last entry */
94 /* get the number of bytes in the last entry */ 93 *lbytes = nbytes;
95 *lbytes = nbytes; 94 nbytes -= (sg_list->length > nbytes) ?
96 nbytes -= (sg_list->length > nbytes) ? 95 nbytes : sg_list->length;
97 nbytes : sg_list->length; 96 sg_list = sg_next(sg_list);
98 sg_list = sg_next(sg_list);
99 } else {
100 sg_list = (struct scatterlist *)sg_page(sg_list);
101 if (is_chained)
102 *is_chained = true;
103 }
104 } 97 }
105 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); 98 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
106 return nents; 99 return nents;
@@ -140,9 +133,9 @@ void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
140void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, 133void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
141 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) 134 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
142{ 135{
143 u32 nents, lbytes; 136 u32 nents;
144 137
145 nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL); 138 nents = sg_nents_for_len(sg, end);
146 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, 139 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
147 (direct == CC_SG_TO_BUF)); 140 (direct == CC_SG_TO_BUF));
148} 141}
@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
314 sgl_data->num_of_buffers++; 307 sgl_data->num_of_buffers++;
315} 308}
316 309
317static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
318 enum dma_data_direction direction)
319{
320 u32 i, j;
321 struct scatterlist *l_sg = sg;
322
323 for (i = 0; i < nents; i++) {
324 if (!l_sg)
325 break;
326 if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
327 dev_err(dev, "dma_map_page() sg buffer failed\n");
328 goto err;
329 }
330 l_sg = sg_next(l_sg);
331 }
332 return nents;
333
334err:
335 /* Restore mapped parts */
336 for (j = 0; j < i; j++) {
337 if (!sg)
338 break;
339 dma_unmap_sg(dev, sg, 1, direction);
340 sg = sg_next(sg);
341 }
342 return 0;
343}
344
345static int cc_map_sg(struct device *dev, struct scatterlist *sg, 310static int cc_map_sg(struct device *dev, struct scatterlist *sg,
346 unsigned int nbytes, int direction, u32 *nents, 311 unsigned int nbytes, int direction, u32 *nents,
347 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) 312 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
348{ 313{
349 bool is_chained = false;
350
351 if (sg_is_last(sg)) { 314 if (sg_is_last(sg)) {
352 /* One entry only case -set to DLLI */ 315 /* One entry only case -set to DLLI */
353 if (dma_map_sg(dev, sg, 1, direction) != 1) { 316 if (dma_map_sg(dev, sg, 1, direction) != 1) {
@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
361 *nents = 1; 324 *nents = 1;
362 *mapped_nents = 1; 325 *mapped_nents = 1;
363 } else { /*sg_is_last*/ 326 } else { /*sg_is_last*/
364 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes, 327 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
365 &is_chained);
366 if (*nents > max_sg_nents) { 328 if (*nents > max_sg_nents) {
367 *nents = 0; 329 *nents = 0;
368 dev_err(dev, "Too many fragments. current %d max %d\n", 330 dev_err(dev, "Too many fragments. current %d max %d\n",
369 *nents, max_sg_nents); 331 *nents, max_sg_nents);
370 return -ENOMEM; 332 return -ENOMEM;
371 } 333 }
372 if (!is_chained) { 334 /* In case of mmu the number of mapped nents might
373 /* In case of mmu the number of mapped nents might 335 * be changed from the original sgl nents
374 * be changed from the original sgl nents 336 */
375 */ 337 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
376 *mapped_nents = dma_map_sg(dev, sg, *nents, direction); 338 if (*mapped_nents == 0) {
377 if (*mapped_nents == 0) { 339 *nents = 0;
378 *nents = 0; 340 dev_err(dev, "dma_map_sg() sg buffer failed\n");
379 dev_err(dev, "dma_map_sg() sg buffer failed\n"); 341 return -ENOMEM;
380 return -ENOMEM;
381 }
382 } else {
383 /*In this case the driver maps entry by entry so it
384 * must have the same nents before and after map
385 */
386 *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
387 direction);
388 if (*mapped_nents != *nents) {
389 *nents = *mapped_nents;
390 dev_err(dev, "dma_map_sg() sg buffer failed\n");
391 return -ENOMEM;
392 }
393 } 342 }
394 } 343 }
395 344
@@ -457,7 +406,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
457 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", 406 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
458 &req_ctx->gen_ctx.iv_dma_addr, ivsize); 407 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
459 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, 408 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
460 ivsize, DMA_TO_DEVICE); 409 ivsize, DMA_BIDIRECTIONAL);
461 } 410 }
462 /* Release pool */ 411 /* Release pool */
463 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && 412 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -499,7 +448,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
499 dump_byte_array("iv", (u8 *)info, ivsize); 448 dump_byte_array("iv", (u8 *)info, ivsize);
500 req_ctx->gen_ctx.iv_dma_addr = 449 req_ctx->gen_ctx.iv_dma_addr =
501 dma_map_single(dev, (void *)info, 450 dma_map_single(dev, (void *)info,
502 ivsize, DMA_TO_DEVICE); 451 ivsize, DMA_BIDIRECTIONAL);
503 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { 452 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
504 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", 453 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
505 ivsize, info); 454 ivsize, info);
@@ -568,11 +517,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
568{ 517{
569 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 518 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
570 unsigned int hw_iv_size = areq_ctx->hw_iv_size; 519 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
571 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
572 struct cc_drvdata *drvdata = dev_get_drvdata(dev); 520 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
573 u32 dummy;
574 bool chained;
575 u32 size_to_unmap = 0;
576 521
577 if (areq_ctx->mac_buf_dma_addr) { 522 if (areq_ctx->mac_buf_dma_addr) {
578 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, 523 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -612,6 +557,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
612 if (areq_ctx->gen_ctx.iv_dma_addr) { 557 if (areq_ctx->gen_ctx.iv_dma_addr) {
613 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, 558 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
614 hw_iv_size, DMA_BIDIRECTIONAL); 559 hw_iv_size, DMA_BIDIRECTIONAL);
560 kzfree(areq_ctx->gen_ctx.iv);
615 } 561 }
616 562
617 /* Release pool */ 563 /* Release pool */
@@ -628,23 +574,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
628 574
629 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", 575 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
630 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, 576 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
631 req->assoclen, req->cryptlen); 577 areq_ctx->assoclen, req->cryptlen);
632 size_to_unmap = req->assoclen + req->cryptlen;
633 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
634 size_to_unmap += areq_ctx->req_authsize;
635 if (areq_ctx->is_gcm4543)
636 size_to_unmap += crypto_aead_ivsize(tfm);
637 578
638 dma_unmap_sg(dev, req->src, 579 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
639 cc_get_sgl_nents(dev, req->src, size_to_unmap,
640 &dummy, &chained),
641 DMA_BIDIRECTIONAL);
642 if (req->src != req->dst) { 580 if (req->src != req->dst) {
643 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", 581 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
644 sg_virt(req->dst)); 582 sg_virt(req->dst));
645 dma_unmap_sg(dev, req->dst, 583 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
646 cc_get_sgl_nents(dev, req->dst, size_to_unmap,
647 &dummy, &chained),
648 DMA_BIDIRECTIONAL); 584 DMA_BIDIRECTIONAL);
649 } 585 }
650 if (drvdata->coherent && 586 if (drvdata->coherent &&
@@ -658,55 +594,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
658 } 594 }
659} 595}
660 596
661static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl, 597static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
662 unsigned int sgl_nents, unsigned int authsize, 598 u32 last_entry_data_size)
663 u32 last_entry_data_size,
664 bool *is_icv_fragmented)
665{ 599{
666 unsigned int icv_max_size = 0; 600 return ((sgl_nents > 1) && (last_entry_data_size < authsize));
667 unsigned int icv_required_size = authsize > last_entry_data_size ?
668 (authsize - last_entry_data_size) :
669 authsize;
670 unsigned int nents;
671 unsigned int i;
672
673 if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
674 *is_icv_fragmented = false;
675 return 0;
676 }
677
678 for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
679 if (!sgl)
680 break;
681 sgl = sg_next(sgl);
682 }
683
684 if (sgl)
685 icv_max_size = sgl->length;
686
687 if (last_entry_data_size > authsize) {
688 /* ICV attached to data in last entry (not fragmented!) */
689 nents = 0;
690 *is_icv_fragmented = false;
691 } else if (last_entry_data_size == authsize) {
692 /* ICV placed in whole last entry (not fragmented!) */
693 nents = 1;
694 *is_icv_fragmented = false;
695 } else if (icv_max_size > icv_required_size) {
696 nents = 1;
697 *is_icv_fragmented = true;
698 } else if (icv_max_size == icv_required_size) {
699 nents = 2;
700 *is_icv_fragmented = true;
701 } else {
702 dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
703 MAX_ICV_NENTS_SUPPORTED);
704 nents = -1; /*unsupported*/
705 }
706 dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
707 (*is_icv_fragmented ? "true" : "false"), nents);
708
709 return nents;
710} 601}
711 602
712static int cc_aead_chain_iv(struct cc_drvdata *drvdata, 603static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
@@ -717,19 +608,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
717 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 608 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
718 unsigned int hw_iv_size = areq_ctx->hw_iv_size; 609 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
719 struct device *dev = drvdata_to_dev(drvdata); 610 struct device *dev = drvdata_to_dev(drvdata);
611 gfp_t flags = cc_gfp_flags(&req->base);
720 int rc = 0; 612 int rc = 0;
721 613
722 if (!req->iv) { 614 if (!req->iv) {
723 areq_ctx->gen_ctx.iv_dma_addr = 0; 615 areq_ctx->gen_ctx.iv_dma_addr = 0;
616 areq_ctx->gen_ctx.iv = NULL;
724 goto chain_iv_exit; 617 goto chain_iv_exit;
725 } 618 }
726 619
727 areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, 620 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
728 hw_iv_size, 621 if (!areq_ctx->gen_ctx.iv)
729 DMA_BIDIRECTIONAL); 622 return -ENOMEM;
623
624 areq_ctx->gen_ctx.iv_dma_addr =
625 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
626 DMA_BIDIRECTIONAL);
730 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { 627 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
731 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", 628 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
732 hw_iv_size, req->iv); 629 hw_iv_size, req->iv);
630 kzfree(areq_ctx->gen_ctx.iv);
631 areq_ctx->gen_ctx.iv = NULL;
733 rc = -ENOMEM; 632 rc = -ENOMEM;
734 goto chain_iv_exit; 633 goto chain_iv_exit;
735 } 634 }
@@ -760,11 +659,9 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
760{ 659{
761 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 660 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
762 int rc = 0; 661 int rc = 0;
763 u32 mapped_nents = 0; 662 int mapped_nents = 0;
764 struct scatterlist *current_sg = req->src;
765 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 663 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
766 unsigned int sg_index = 0; 664 unsigned int size_of_assoc = areq_ctx->assoclen;
767 u32 size_of_assoc = req->assoclen;
768 struct device *dev = drvdata_to_dev(drvdata); 665 struct device *dev = drvdata_to_dev(drvdata);
769 666
770 if (areq_ctx->is_gcm4543) 667 if (areq_ctx->is_gcm4543)
@@ -775,7 +672,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
775 goto chain_assoc_exit; 672 goto chain_assoc_exit;
776 } 673 }
777 674
778 if (req->assoclen == 0) { 675 if (areq_ctx->assoclen == 0) {
779 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; 676 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
780 areq_ctx->assoc.nents = 0; 677 areq_ctx->assoc.nents = 0;
781 areq_ctx->assoc.mlli_nents = 0; 678 areq_ctx->assoc.mlli_nents = 0;
@@ -785,26 +682,10 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
785 goto chain_assoc_exit; 682 goto chain_assoc_exit;
786 } 683 }
787 684
788 //iterate over the sgl to see how many entries are for associated data 685 mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
789 //it is assumed that if we reach here , the sgl is already mapped 686 if (mapped_nents < 0)
790 sg_index = current_sg->length; 687 return mapped_nents;
791 //the first entry in the scatter list contains all the associated data 688
792 if (sg_index > size_of_assoc) {
793 mapped_nents++;
794 } else {
795 while (sg_index <= size_of_assoc) {
796 current_sg = sg_next(current_sg);
797 /* if have reached the end of the sgl, then this is
798 * unexpected
799 */
800 if (!current_sg) {
801 dev_err(dev, "reached end of sg list. unexpected\n");
802 return -EINVAL;
803 }
804 sg_index += current_sg->length;
805 mapped_nents++;
806 }
807 }
808 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { 689 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
809 dev_err(dev, "Too many fragments. current %d max %d\n", 690 dev_err(dev, "Too many fragments. current %d max %d\n",
810 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); 691 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -835,7 +716,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
835 cc_dma_buf_type(areq_ctx->assoc_buff_type), 716 cc_dma_buf_type(areq_ctx->assoc_buff_type),
836 areq_ctx->assoc.nents); 717 areq_ctx->assoc.nents);
837 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, 718 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
838 req->assoclen, 0, is_last, 719 areq_ctx->assoclen, 0, is_last,
839 &areq_ctx->assoc.mlli_nents); 720 &areq_ctx->assoc.mlli_nents);
840 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; 721 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
841 } 722 }
@@ -850,39 +731,32 @@ static void cc_prepare_aead_data_dlli(struct aead_request *req,
850 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 731 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
851 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; 732 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
852 unsigned int authsize = areq_ctx->req_authsize; 733 unsigned int authsize = areq_ctx->req_authsize;
734 struct scatterlist *sg;
735 ssize_t offset;
853 736
854 areq_ctx->is_icv_fragmented = false; 737 areq_ctx->is_icv_fragmented = false;
855 if (req->src == req->dst) { 738
856 /*INPLACE*/ 739 if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
857 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) + 740 sg = areq_ctx->src_sgl;
858 (*src_last_bytes - authsize); 741 offset = *src_last_bytes - authsize;
859 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
860 (*src_last_bytes - authsize);
861 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
862 /*NON-INPLACE and DECRYPT*/
863 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
864 (*src_last_bytes - authsize);
865 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
866 (*src_last_bytes - authsize);
867 } else { 742 } else {
868 /*NON-INPLACE and ENCRYPT*/ 743 sg = areq_ctx->dst_sgl;
869 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) + 744 offset = *dst_last_bytes - authsize;
870 (*dst_last_bytes - authsize);
871 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
872 (*dst_last_bytes - authsize);
873 } 745 }
746
747 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
748 areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
874} 749}
875 750
876static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, 751static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
877 struct aead_request *req, 752 struct aead_request *req,
878 struct buffer_array *sg_data, 753 struct buffer_array *sg_data,
879 u32 *src_last_bytes, u32 *dst_last_bytes, 754 u32 *src_last_bytes, u32 *dst_last_bytes,
880 bool is_last_table) 755 bool is_last_table)
881{ 756{
882 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 757 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
883 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; 758 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
884 unsigned int authsize = areq_ctx->req_authsize; 759 unsigned int authsize = areq_ctx->req_authsize;
885 int rc = 0, icv_nents;
886 struct device *dev = drvdata_to_dev(drvdata); 760 struct device *dev = drvdata_to_dev(drvdata);
887 struct scatterlist *sg; 761 struct scatterlist *sg;
888 762
@@ -893,14 +767,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
893 areq_ctx->src_offset, is_last_table, 767 areq_ctx->src_offset, is_last_table,
894 &areq_ctx->src.mlli_nents); 768 &areq_ctx->src.mlli_nents);
895 769
896 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl, 770 areq_ctx->is_icv_fragmented =
897 areq_ctx->src.nents, 771 cc_is_icv_frag(areq_ctx->src.nents, authsize,
898 authsize, *src_last_bytes, 772 *src_last_bytes);
899 &areq_ctx->is_icv_fragmented);
900 if (icv_nents < 0) {
901 rc = -ENOTSUPP;
902 goto prepare_data_mlli_exit;
903 }
904 773
905 if (areq_ctx->is_icv_fragmented) { 774 if (areq_ctx->is_icv_fragmented) {
906 /* Backup happens only when ICV is fragmented, ICV 775 /* Backup happens only when ICV is fragmented, ICV
@@ -942,16 +811,11 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
942 areq_ctx->dst_offset, is_last_table, 811 areq_ctx->dst_offset, is_last_table,
943 &areq_ctx->dst.mlli_nents); 812 &areq_ctx->dst.mlli_nents);
944 813
945 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl, 814 areq_ctx->is_icv_fragmented =
946 areq_ctx->src.nents, 815 cc_is_icv_frag(areq_ctx->src.nents, authsize,
947 authsize, *src_last_bytes, 816 *src_last_bytes);
948 &areq_ctx->is_icv_fragmented);
949 if (icv_nents < 0) {
950 rc = -ENOTSUPP;
951 goto prepare_data_mlli_exit;
952 }
953
954 /* Backup happens only when ICV is fragmented, ICV 817 /* Backup happens only when ICV is fragmented, ICV
818
955 * verification is made by CPU compare in order to simplify 819 * verification is made by CPU compare in order to simplify
956 * MAC verification upon request completion 820 * MAC verification upon request completion
957 */ 821 */
@@ -979,14 +843,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
979 areq_ctx->src_offset, is_last_table, 843 areq_ctx->src_offset, is_last_table,
980 &areq_ctx->src.mlli_nents); 844 &areq_ctx->src.mlli_nents);
981 845
982 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl, 846 areq_ctx->is_icv_fragmented =
983 areq_ctx->dst.nents, 847 cc_is_icv_frag(areq_ctx->dst.nents, authsize,
984 authsize, *dst_last_bytes, 848 *dst_last_bytes);
985 &areq_ctx->is_icv_fragmented);
986 if (icv_nents < 0) {
987 rc = -ENOTSUPP;
988 goto prepare_data_mlli_exit;
989 }
990 849
991 if (!areq_ctx->is_icv_fragmented) { 850 if (!areq_ctx->is_icv_fragmented) {
992 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; 851 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
@@ -1000,9 +859,6 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
1000 areq_ctx->icv_virt_addr = areq_ctx->mac_buf; 859 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1001 } 860 }
1002 } 861 }
1003
1004prepare_data_mlli_exit:
1005 return rc;
1006} 862}
1007 863
1008static int cc_aead_chain_data(struct cc_drvdata *drvdata, 864static int cc_aead_chain_data(struct cc_drvdata *drvdata,
@@ -1019,12 +875,12 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1019 u32 src_mapped_nents = 0, dst_mapped_nents = 0; 875 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1020 u32 offset = 0; 876 u32 offset = 0;
1021 /* non-inplace mode */ 877 /* non-inplace mode */
1022 unsigned int size_for_map = req->assoclen + req->cryptlen; 878 unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
1023 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 879 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1024 u32 sg_index = 0; 880 u32 sg_index = 0;
1025 bool chained = false;
1026 bool is_gcm4543 = areq_ctx->is_gcm4543; 881 bool is_gcm4543 = areq_ctx->is_gcm4543;
1027 u32 size_to_skip = req->assoclen; 882 u32 size_to_skip = areq_ctx->assoclen;
883 struct scatterlist *sgl;
1028 884
1029 if (is_gcm4543) 885 if (is_gcm4543)
1030 size_to_skip += crypto_aead_ivsize(tfm); 886 size_to_skip += crypto_aead_ivsize(tfm);
@@ -1043,19 +899,17 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1043 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 899 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1044 authsize : 0; 900 authsize : 0;
1045 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, 901 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
1046 &src_last_bytes, &chained); 902 &src_last_bytes);
1047 sg_index = areq_ctx->src_sgl->length; 903 sg_index = areq_ctx->src_sgl->length;
1048 //check where the data starts 904 //check where the data starts
1049 while (sg_index <= size_to_skip) { 905 while (sg_index <= size_to_skip) {
906 src_mapped_nents--;
1050 offset -= areq_ctx->src_sgl->length; 907 offset -= areq_ctx->src_sgl->length;
1051 areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl); 908 sgl = sg_next(areq_ctx->src_sgl);
1052 //if have reached the end of the sgl, then this is unexpected 909 if (!sgl)
1053 if (!areq_ctx->src_sgl) { 910 break;
1054 dev_err(dev, "reached end of sg list. unexpected\n"); 911 areq_ctx->src_sgl = sgl;
1055 return -EINVAL;
1056 }
1057 sg_index += areq_ctx->src_sgl->length; 912 sg_index += areq_ctx->src_sgl->length;
1058 src_mapped_nents--;
1059 } 913 }
1060 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { 914 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1061 dev_err(dev, "Too many fragments. current %d max %d\n", 915 dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1068,7 +922,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1068 areq_ctx->src_offset = offset; 922 areq_ctx->src_offset = offset;
1069 923
1070 if (req->src != req->dst) { 924 if (req->src != req->dst) {
1071 size_for_map = req->assoclen + req->cryptlen; 925 size_for_map = areq_ctx->assoclen + req->cryptlen;
1072 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 926 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1073 authsize : 0; 927 authsize : 0;
1074 if (is_gcm4543) 928 if (is_gcm4543)
@@ -1083,21 +937,19 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1083 } 937 }
1084 938
1085 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, 939 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
1086 &dst_last_bytes, &chained); 940 &dst_last_bytes);
1087 sg_index = areq_ctx->dst_sgl->length; 941 sg_index = areq_ctx->dst_sgl->length;
1088 offset = size_to_skip; 942 offset = size_to_skip;
1089 943
1090 //check where the data starts 944 //check where the data starts
1091 while (sg_index <= size_to_skip) { 945 while (sg_index <= size_to_skip) {
946 dst_mapped_nents--;
1092 offset -= areq_ctx->dst_sgl->length; 947 offset -= areq_ctx->dst_sgl->length;
1093 areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl); 948 sgl = sg_next(areq_ctx->dst_sgl);
1094 //if have reached the end of the sgl, then this is unexpected 949 if (!sgl)
1095 if (!areq_ctx->dst_sgl) { 950 break;
1096 dev_err(dev, "reached end of sg list. unexpected\n"); 951 areq_ctx->dst_sgl = sgl;
1097 return -EINVAL;
1098 }
1099 sg_index += areq_ctx->dst_sgl->length; 952 sg_index += areq_ctx->dst_sgl->length;
1100 dst_mapped_nents--;
1101 } 953 }
1102 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { 954 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1103 dev_err(dev, "Too many fragments. current %d max %d\n", 955 dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1110,9 +962,9 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1110 dst_mapped_nents > 1 || 962 dst_mapped_nents > 1 ||
1111 do_chain) { 963 do_chain) {
1112 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; 964 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
1113 rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data, 965 cc_prepare_aead_data_mlli(drvdata, req, sg_data,
1114 &src_last_bytes, 966 &src_last_bytes, &dst_last_bytes,
1115 &dst_last_bytes, is_last_table); 967 is_last_table);
1116 } else { 968 } else {
1117 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; 969 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
1118 cc_prepare_aead_data_dlli(req, &src_last_bytes, 970 cc_prepare_aead_data_dlli(req, &src_last_bytes,
@@ -1234,7 +1086,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1234 areq_ctx->ccm_iv0_dma_addr = dma_addr; 1086 areq_ctx->ccm_iv0_dma_addr = dma_addr;
1235 1087
1236 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, 1088 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1237 &sg_data, req->assoclen); 1089 &sg_data, areq_ctx->assoclen);
1238 if (rc) 1090 if (rc)
1239 goto aead_map_failure; 1091 goto aead_map_failure;
1240 } 1092 }
@@ -1285,7 +1137,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1285 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; 1137 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1286 } 1138 }
1287 1139
1288 size_to_map = req->cryptlen + req->assoclen; 1140 size_to_map = req->cryptlen + areq_ctx->assoclen;
1289 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) 1141 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1290 size_to_map += authsize; 1142 size_to_map += authsize;
1291 1143
@@ -1483,8 +1335,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1483 if (total_in_len < block_size) { 1335 if (total_in_len < block_size) {
1484 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", 1336 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1485 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); 1337 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1486 areq_ctx->in_nents = 1338 areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1487 cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
1488 sg_copy_to_buffer(src, areq_ctx->in_nents, 1339 sg_copy_to_buffer(src, areq_ctx->in_nents,
1489 &curr_buff[*curr_buff_cnt], nbytes); 1340 &curr_buff[*curr_buff_cnt], nbytes);
1490 *curr_buff_cnt += nbytes; 1341 *curr_buff_cnt += nbytes;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 3ec4b4db5247..a726016bdbc1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4/* \file cc_buffer_mgr.h 4/* \file cc_buffer_mgr.h
5 * Buffer Manager 5 * Buffer Manager
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d9c17078517b..5b58226ea24d 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/module.h> 5#include <linux/module.h>
@@ -34,6 +34,18 @@ struct cc_hw_key_info {
34 enum cc_hw_crypto_key key2_slot; 34 enum cc_hw_crypto_key key2_slot;
35}; 35};
36 36
37struct cc_cpp_key_info {
38 u8 slot;
39 enum cc_cpp_alg alg;
40};
41
42enum cc_key_type {
43 CC_UNPROTECTED_KEY, /* User key */
44 CC_HW_PROTECTED_KEY, /* HW (FDE) key */
45 CC_POLICY_PROTECTED_KEY, /* CPP key */
46 CC_INVALID_PROTECTED_KEY /* Invalid key */
47};
48
37struct cc_cipher_ctx { 49struct cc_cipher_ctx {
38 struct cc_drvdata *drvdata; 50 struct cc_drvdata *drvdata;
39 int keylen; 51 int keylen;
@@ -41,19 +53,22 @@ struct cc_cipher_ctx {
41 int cipher_mode; 53 int cipher_mode;
42 int flow_mode; 54 int flow_mode;
43 unsigned int flags; 55 unsigned int flags;
44 bool hw_key; 56 enum cc_key_type key_type;
45 struct cc_user_key_info user; 57 struct cc_user_key_info user;
46 struct cc_hw_key_info hw; 58 union {
59 struct cc_hw_key_info hw;
60 struct cc_cpp_key_info cpp;
61 };
47 struct crypto_shash *shash_tfm; 62 struct crypto_shash *shash_tfm;
48}; 63};
49 64
50static void cc_cipher_complete(struct device *dev, void *cc_req, int err); 65static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
51 66
52static inline bool cc_is_hw_key(struct crypto_tfm *tfm) 67static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
53{ 68{
54 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); 69 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
55 70
56 return ctx_p->hw_key; 71 return ctx_p->key_type;
57} 72}
58 73
59static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) 74static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
@@ -232,7 +247,7 @@ struct tdes_keys {
232 u8 key3[DES_KEY_SIZE]; 247 u8 key3[DES_KEY_SIZE];
233}; 248};
234 249
235static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num) 250static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
236{ 251{
237 switch (slot_num) { 252 switch (slot_num) {
238 case 0: 253 case 0:
@@ -247,6 +262,22 @@ static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
247 return END_OF_KEYS; 262 return END_OF_KEYS;
248} 263}
249 264
265static u8 cc_slot_to_cpp_key(u8 slot_num)
266{
267 return (slot_num - CC_FIRST_CPP_KEY_SLOT);
268}
269
270static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num)
271{
272 if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT)
273 return CC_HW_PROTECTED_KEY;
274 else if (slot_num >= CC_FIRST_CPP_KEY_SLOT &&
275 slot_num <= CC_LAST_CPP_KEY_SLOT)
276 return CC_POLICY_PROTECTED_KEY;
277 else
278 return CC_INVALID_PROTECTED_KEY;
279}
280
250static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, 281static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
251 unsigned int keylen) 282 unsigned int keylen)
252{ 283{
@@ -261,18 +292,13 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
261 292
262 /* STAT_PHASE_0: Init and sanity checks */ 293 /* STAT_PHASE_0: Init and sanity checks */
263 294
264 /* This check the size of the hardware key token */ 295 /* This check the size of the protected key token */
265 if (keylen != sizeof(hki)) { 296 if (keylen != sizeof(hki)) {
266 dev_err(dev, "Unsupported HW key size %d.\n", keylen); 297 dev_err(dev, "Unsupported protected key size %d.\n", keylen);
267 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 298 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
268 return -EINVAL; 299 return -EINVAL;
269 } 300 }
270 301
271 if (ctx_p->flow_mode != S_DIN_to_AES) {
272 dev_err(dev, "HW key not supported for non-AES flows\n");
273 return -EINVAL;
274 }
275
276 memcpy(&hki, key, keylen); 302 memcpy(&hki, key, keylen);
277 303
278 /* The real key len for crypto op is the size of the HW key 304 /* The real key len for crypto op is the size of the HW key
@@ -286,31 +312,70 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
286 return -EINVAL; 312 return -EINVAL;
287 } 313 }
288 314
289 ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1); 315 ctx_p->keylen = keylen;
290 if (ctx_p->hw.key1_slot == END_OF_KEYS) {
291 dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
292 return -EINVAL;
293 }
294 316
295 if (ctx_p->cipher_mode == DRV_CIPHER_XTS || 317 switch (cc_slot_to_key_type(hki.hw_key1)) {
296 ctx_p->cipher_mode == DRV_CIPHER_ESSIV || 318 case CC_HW_PROTECTED_KEY:
297 ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) { 319 if (ctx_p->flow_mode == S_DIN_to_SM4) {
298 if (hki.hw_key1 == hki.hw_key2) { 320 dev_err(dev, "Only AES HW protected keys are supported\n");
299 dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
300 hki.hw_key1, hki.hw_key2);
301 return -EINVAL; 321 return -EINVAL;
302 } 322 }
303 ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2); 323
304 if (ctx_p->hw.key2_slot == END_OF_KEYS) { 324 ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
305 dev_err(dev, "Unsupported hw key2 number (%d)\n", 325 if (ctx_p->hw.key1_slot == END_OF_KEYS) {
306 hki.hw_key2); 326 dev_err(dev, "Unsupported hw key1 number (%d)\n",
327 hki.hw_key1);
307 return -EINVAL; 328 return -EINVAL;
308 } 329 }
309 }
310 330
311 ctx_p->keylen = keylen; 331 if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
312 ctx_p->hw_key = true; 332 ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
313 dev_dbg(dev, "cc_is_hw_key ret 0"); 333 ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
334 if (hki.hw_key1 == hki.hw_key2) {
335 dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
336 hki.hw_key1, hki.hw_key2);
337 return -EINVAL;
338 }
339
340 ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
341 if (ctx_p->hw.key2_slot == END_OF_KEYS) {
342 dev_err(dev, "Unsupported hw key2 number (%d)\n",
343 hki.hw_key2);
344 return -EINVAL;
345 }
346 }
347
348 ctx_p->key_type = CC_HW_PROTECTED_KEY;
349 dev_dbg(dev, "HW protected key %d/%d set\n.",
350 ctx_p->hw.key1_slot, ctx_p->hw.key2_slot);
351 break;
352
353 case CC_POLICY_PROTECTED_KEY:
354 if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) {
355 dev_err(dev, "CPP keys not supported in this hardware revision.\n");
356 return -EINVAL;
357 }
358
359 if (ctx_p->cipher_mode != DRV_CIPHER_CBC &&
360 ctx_p->cipher_mode != DRV_CIPHER_CTR) {
361 dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n");
362 return -EINVAL;
363 }
364
365 ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1);
366 if (ctx_p->flow_mode == S_DIN_to_AES)
367 ctx_p->cpp.alg = CC_CPP_AES;
368 else /* Must be SM4 since due to sethkey registration */
369 ctx_p->cpp.alg = CC_CPP_SM4;
370 ctx_p->key_type = CC_POLICY_PROTECTED_KEY;
371 dev_dbg(dev, "policy protected key alg: %d slot: %d.\n",
372 ctx_p->cpp.alg, ctx_p->cpp.slot);
373 break;
374
375 default:
376 dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1);
377 return -EINVAL;
378 }
314 379
315 return 0; 380 return 0;
316} 381}
@@ -321,7 +386,6 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
321 struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm); 386 struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
322 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); 387 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
323 struct device *dev = drvdata_to_dev(ctx_p->drvdata); 388 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
324 u32 tmp[DES3_EDE_EXPKEY_WORDS];
325 struct cc_crypto_alg *cc_alg = 389 struct cc_crypto_alg *cc_alg =
326 container_of(tfm->__crt_alg, struct cc_crypto_alg, 390 container_of(tfm->__crt_alg, struct cc_crypto_alg,
327 skcipher_alg.base); 391 skcipher_alg.base);
@@ -339,7 +403,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
339 return -EINVAL; 403 return -EINVAL;
340 } 404 }
341 405
342 ctx_p->hw_key = false; 406 ctx_p->key_type = CC_UNPROTECTED_KEY;
343 407
344 /* 408 /*
345 * Verify DES weak keys 409 * Verify DES weak keys
@@ -347,6 +411,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
347 * HW does the expansion on its own. 411 * HW does the expansion on its own.
348 */ 412 */
349 if (ctx_p->flow_mode == S_DIN_to_DES) { 413 if (ctx_p->flow_mode == S_DIN_to_DES) {
414 u32 tmp[DES3_EDE_EXPKEY_WORDS];
350 if (keylen == DES3_EDE_KEY_SIZE && 415 if (keylen == DES3_EDE_KEY_SIZE &&
351 __des3_ede_setkey(tmp, &tfm->crt_flags, key, 416 __des3_ede_setkey(tmp, &tfm->crt_flags, key,
352 DES3_EDE_KEY_SIZE)) { 417 DES3_EDE_KEY_SIZE)) {
@@ -399,7 +464,77 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
399 return 0; 464 return 0;
400} 465}
401 466
402static void cc_setup_cipher_desc(struct crypto_tfm *tfm, 467static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
468{
469 switch (ctx_p->flow_mode) {
470 case S_DIN_to_AES:
471 return S_AES_to_DOUT;
472 case S_DIN_to_DES:
473 return S_DES_to_DOUT;
474 case S_DIN_to_SM4:
475 return S_SM4_to_DOUT;
476 default:
477 return ctx_p->flow_mode;
478 }
479}
480
481static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
482 struct cipher_req_ctx *req_ctx,
483 unsigned int ivsize, struct cc_hw_desc desc[],
484 unsigned int *seq_size)
485{
486 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
487 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
488 int cipher_mode = ctx_p->cipher_mode;
489 int flow_mode = cc_out_setup_mode(ctx_p);
490 int direction = req_ctx->gen_ctx.op_type;
491 dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
492
493 if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
494 return;
495
496 switch (cipher_mode) {
497 case DRV_CIPHER_ECB:
498 break;
499 case DRV_CIPHER_CBC:
500 case DRV_CIPHER_CBC_CTS:
501 case DRV_CIPHER_CTR:
502 case DRV_CIPHER_OFB:
503 /* Read next IV */
504 hw_desc_init(&desc[*seq_size]);
505 set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
506 set_cipher_config0(&desc[*seq_size], direction);
507 set_flow_mode(&desc[*seq_size], flow_mode);
508 set_cipher_mode(&desc[*seq_size], cipher_mode);
509 if (cipher_mode == DRV_CIPHER_CTR ||
510 cipher_mode == DRV_CIPHER_OFB) {
511 set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
512 } else {
513 set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
514 }
515 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
516 (*seq_size)++;
517 break;
518 case DRV_CIPHER_XTS:
519 case DRV_CIPHER_ESSIV:
520 case DRV_CIPHER_BITLOCKER:
521 /* IV */
522 hw_desc_init(&desc[*seq_size]);
523 set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
524 set_cipher_mode(&desc[*seq_size], cipher_mode);
525 set_cipher_config0(&desc[*seq_size], direction);
526 set_flow_mode(&desc[*seq_size], flow_mode);
527 set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
528 NS_BIT, 1);
529 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
530 (*seq_size)++;
531 break;
532 default:
533 dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
534 }
535}
536
537static void cc_setup_state_desc(struct crypto_tfm *tfm,
403 struct cipher_req_ctx *req_ctx, 538 struct cipher_req_ctx *req_ctx,
404 unsigned int ivsize, unsigned int nbytes, 539 unsigned int ivsize, unsigned int nbytes,
405 struct cc_hw_desc desc[], 540 struct cc_hw_desc desc[],
@@ -423,11 +558,13 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
423 du_size = cc_alg->data_unit; 558 du_size = cc_alg->data_unit;
424 559
425 switch (cipher_mode) { 560 switch (cipher_mode) {
561 case DRV_CIPHER_ECB:
562 break;
426 case DRV_CIPHER_CBC: 563 case DRV_CIPHER_CBC:
427 case DRV_CIPHER_CBC_CTS: 564 case DRV_CIPHER_CBC_CTS:
428 case DRV_CIPHER_CTR: 565 case DRV_CIPHER_CTR:
429 case DRV_CIPHER_OFB: 566 case DRV_CIPHER_OFB:
430 /* Load cipher state */ 567 /* Load IV */
431 hw_desc_init(&desc[*seq_size]); 568 hw_desc_init(&desc[*seq_size]);
432 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize, 569 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
433 NS_BIT); 570 NS_BIT);
@@ -441,57 +578,15 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
441 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0); 578 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
442 } 579 }
443 (*seq_size)++; 580 (*seq_size)++;
444 /*FALLTHROUGH*/
445 case DRV_CIPHER_ECB:
446 /* Load key */
447 hw_desc_init(&desc[*seq_size]);
448 set_cipher_mode(&desc[*seq_size], cipher_mode);
449 set_cipher_config0(&desc[*seq_size], direction);
450 if (flow_mode == S_DIN_to_AES) {
451 if (cc_is_hw_key(tfm)) {
452 set_hw_crypto_key(&desc[*seq_size],
453 ctx_p->hw.key1_slot);
454 } else {
455 set_din_type(&desc[*seq_size], DMA_DLLI,
456 key_dma_addr, ((key_len == 24) ?
457 AES_MAX_KEY_SIZE :
458 key_len), NS_BIT);
459 }
460 set_key_size_aes(&desc[*seq_size], key_len);
461 } else {
462 /*des*/
463 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
464 key_len, NS_BIT);
465 set_key_size_des(&desc[*seq_size], key_len);
466 }
467 set_flow_mode(&desc[*seq_size], flow_mode);
468 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
469 (*seq_size)++;
470 break; 581 break;
471 case DRV_CIPHER_XTS: 582 case DRV_CIPHER_XTS:
472 case DRV_CIPHER_ESSIV: 583 case DRV_CIPHER_ESSIV:
473 case DRV_CIPHER_BITLOCKER: 584 case DRV_CIPHER_BITLOCKER:
474 /* Load AES key */
475 hw_desc_init(&desc[*seq_size]);
476 set_cipher_mode(&desc[*seq_size], cipher_mode);
477 set_cipher_config0(&desc[*seq_size], direction);
478 if (cc_is_hw_key(tfm)) {
479 set_hw_crypto_key(&desc[*seq_size],
480 ctx_p->hw.key1_slot);
481 } else {
482 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
483 (key_len / 2), NS_BIT);
484 }
485 set_key_size_aes(&desc[*seq_size], (key_len / 2));
486 set_flow_mode(&desc[*seq_size], flow_mode);
487 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
488 (*seq_size)++;
489
490 /* load XEX key */ 585 /* load XEX key */
491 hw_desc_init(&desc[*seq_size]); 586 hw_desc_init(&desc[*seq_size]);
492 set_cipher_mode(&desc[*seq_size], cipher_mode); 587 set_cipher_mode(&desc[*seq_size], cipher_mode);
493 set_cipher_config0(&desc[*seq_size], direction); 588 set_cipher_config0(&desc[*seq_size], direction);
494 if (cc_is_hw_key(tfm)) { 589 if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
495 set_hw_crypto_key(&desc[*seq_size], 590 set_hw_crypto_key(&desc[*seq_size],
496 ctx_p->hw.key2_slot); 591 ctx_p->hw.key2_slot);
497 } else { 592 } else {
@@ -505,7 +600,7 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
505 set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY); 600 set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
506 (*seq_size)++; 601 (*seq_size)++;
507 602
508 /* Set state */ 603 /* Load IV */
509 hw_desc_init(&desc[*seq_size]); 604 hw_desc_init(&desc[*seq_size]);
510 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1); 605 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
511 set_cipher_mode(&desc[*seq_size], cipher_mode); 606 set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -521,48 +616,113 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
521 } 616 }
522} 617}
523 618
524static void cc_setup_cipher_data(struct crypto_tfm *tfm, 619static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
525 struct cipher_req_ctx *req_ctx,
526 struct scatterlist *dst,
527 struct scatterlist *src, unsigned int nbytes,
528 void *areq, struct cc_hw_desc desc[],
529 unsigned int *seq_size)
530{ 620{
531 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
532 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
533 unsigned int flow_mode = ctx_p->flow_mode;
534
535 switch (ctx_p->flow_mode) { 621 switch (ctx_p->flow_mode) {
536 case S_DIN_to_AES: 622 case S_DIN_to_AES:
537 flow_mode = DIN_AES_DOUT; 623 return DIN_AES_DOUT;
538 break;
539 case S_DIN_to_DES: 624 case S_DIN_to_DES:
540 flow_mode = DIN_DES_DOUT; 625 return DIN_DES_DOUT;
541 break;
542 case S_DIN_to_SM4: 626 case S_DIN_to_SM4:
543 flow_mode = DIN_SM4_DOUT; 627 return DIN_SM4_DOUT;
544 break;
545 default: 628 default:
546 dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode); 629 return ctx_p->flow_mode;
547 return;
548 } 630 }
549 /* Process */ 631}
550 if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) { 632
551 dev_dbg(dev, " data params addr %pad length 0x%X\n", 633static void cc_setup_key_desc(struct crypto_tfm *tfm,
552 &sg_dma_address(src), nbytes); 634 struct cipher_req_ctx *req_ctx,
553 dev_dbg(dev, " data params addr %pad length 0x%X\n", 635 unsigned int nbytes, struct cc_hw_desc desc[],
554 &sg_dma_address(dst), nbytes); 636 unsigned int *seq_size)
637{
638 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
639 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
640 int cipher_mode = ctx_p->cipher_mode;
641 int flow_mode = ctx_p->flow_mode;
642 int direction = req_ctx->gen_ctx.op_type;
643 dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
644 unsigned int key_len = ctx_p->keylen;
645 unsigned int din_size;
646
647 switch (cipher_mode) {
648 case DRV_CIPHER_CBC:
649 case DRV_CIPHER_CBC_CTS:
650 case DRV_CIPHER_CTR:
651 case DRV_CIPHER_OFB:
652 case DRV_CIPHER_ECB:
653 /* Load key */
555 hw_desc_init(&desc[*seq_size]); 654 hw_desc_init(&desc[*seq_size]);
556 set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src), 655 set_cipher_mode(&desc[*seq_size], cipher_mode);
557 nbytes, NS_BIT); 656 set_cipher_config0(&desc[*seq_size], direction);
558 set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
559 nbytes, NS_BIT, (!areq ? 0 : 1));
560 if (areq)
561 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
562 657
658 if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
659 /* We use the AES key size coding for all CPP algs */
660 set_key_size_aes(&desc[*seq_size], key_len);
661 set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot);
662 flow_mode = cc_out_flow_mode(ctx_p);
663 } else {
664 if (flow_mode == S_DIN_to_AES) {
665 if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
666 set_hw_crypto_key(&desc[*seq_size],
667 ctx_p->hw.key1_slot);
668 } else {
669 /* CC_POLICY_UNPROTECTED_KEY
670 * Invalid keys are filtered out in
671 * sethkey()
672 */
673 din_size = (key_len == 24) ?
674 AES_MAX_KEY_SIZE : key_len;
675
676 set_din_type(&desc[*seq_size], DMA_DLLI,
677 key_dma_addr, din_size,
678 NS_BIT);
679 }
680 set_key_size_aes(&desc[*seq_size], key_len);
681 } else {
682 /*des*/
683 set_din_type(&desc[*seq_size], DMA_DLLI,
684 key_dma_addr, key_len, NS_BIT);
685 set_key_size_des(&desc[*seq_size], key_len);
686 }
687 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
688 }
563 set_flow_mode(&desc[*seq_size], flow_mode); 689 set_flow_mode(&desc[*seq_size], flow_mode);
564 (*seq_size)++; 690 (*seq_size)++;
565 } else { 691 break;
692 case DRV_CIPHER_XTS:
693 case DRV_CIPHER_ESSIV:
694 case DRV_CIPHER_BITLOCKER:
695 /* Load AES key */
696 hw_desc_init(&desc[*seq_size]);
697 set_cipher_mode(&desc[*seq_size], cipher_mode);
698 set_cipher_config0(&desc[*seq_size], direction);
699 if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
700 set_hw_crypto_key(&desc[*seq_size],
701 ctx_p->hw.key1_slot);
702 } else {
703 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
704 (key_len / 2), NS_BIT);
705 }
706 set_key_size_aes(&desc[*seq_size], (key_len / 2));
707 set_flow_mode(&desc[*seq_size], flow_mode);
708 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
709 (*seq_size)++;
710 break;
711 default:
712 dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
713 }
714}
715
716static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
717 struct cipher_req_ctx *req_ctx,
718 struct scatterlist *dst, struct scatterlist *src,
719 unsigned int nbytes, void *areq,
720 struct cc_hw_desc desc[], unsigned int *seq_size)
721{
722 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
723 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
724
725 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
566 /* bypass */ 726 /* bypass */
567 dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n", 727 dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
568 &req_ctx->mlli_params.mlli_dma_addr, 728 &req_ctx->mlli_params.mlli_dma_addr,
@@ -577,7 +737,38 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
577 req_ctx->mlli_params.mlli_len); 737 req_ctx->mlli_params.mlli_len);
578 set_flow_mode(&desc[*seq_size], BYPASS); 738 set_flow_mode(&desc[*seq_size], BYPASS);
579 (*seq_size)++; 739 (*seq_size)++;
740 }
741}
742
743static void cc_setup_flow_desc(struct crypto_tfm *tfm,
744 struct cipher_req_ctx *req_ctx,
745 struct scatterlist *dst, struct scatterlist *src,
746 unsigned int nbytes, struct cc_hw_desc desc[],
747 unsigned int *seq_size)
748{
749 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
750 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
751 unsigned int flow_mode = cc_out_flow_mode(ctx_p);
752 bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
753 ctx_p->cipher_mode == DRV_CIPHER_ECB);
580 754
755 /* Process */
756 if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
757 dev_dbg(dev, " data params addr %pad length 0x%X\n",
758 &sg_dma_address(src), nbytes);
759 dev_dbg(dev, " data params addr %pad length 0x%X\n",
760 &sg_dma_address(dst), nbytes);
761 hw_desc_init(&desc[*seq_size]);
762 set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
763 nbytes, NS_BIT);
764 set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
765 nbytes, NS_BIT, (!last_desc ? 0 : 1));
766 if (last_desc)
767 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
768
769 set_flow_mode(&desc[*seq_size], flow_mode);
770 (*seq_size)++;
771 } else {
581 hw_desc_init(&desc[*seq_size]); 772 hw_desc_init(&desc[*seq_size]);
582 set_din_type(&desc[*seq_size], DMA_MLLI, 773 set_din_type(&desc[*seq_size], DMA_MLLI,
583 ctx_p->drvdata->mlli_sram_addr, 774 ctx_p->drvdata->mlli_sram_addr,
@@ -589,7 +780,7 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
589 set_dout_mlli(&desc[*seq_size], 780 set_dout_mlli(&desc[*seq_size],
590 ctx_p->drvdata->mlli_sram_addr, 781 ctx_p->drvdata->mlli_sram_addr,
591 req_ctx->in_mlli_nents, NS_BIT, 782 req_ctx->in_mlli_nents, NS_BIT,
592 (!areq ? 0 : 1)); 783 (!last_desc ? 0 : 1));
593 } else { 784 } else {
594 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n", 785 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
595 (unsigned int)ctx_p->drvdata->mlli_sram_addr, 786 (unsigned int)ctx_p->drvdata->mlli_sram_addr,
@@ -600,9 +791,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
600 (LLI_ENTRY_BYTE_SIZE * 791 (LLI_ENTRY_BYTE_SIZE *
601 req_ctx->in_mlli_nents)), 792 req_ctx->in_mlli_nents)),
602 req_ctx->out_mlli_nents, NS_BIT, 793 req_ctx->out_mlli_nents, NS_BIT,
603 (!areq ? 0 : 1)); 794 (!last_desc ? 0 : 1));
604 } 795 }
605 if (areq) 796 if (last_desc)
606 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]); 797 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
607 798
608 set_flow_mode(&desc[*seq_size], flow_mode); 799 set_flow_mode(&desc[*seq_size], flow_mode);
@@ -610,38 +801,6 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
610 } 801 }
611} 802}
612 803
613/*
614 * Update a CTR-AES 128 bit counter
615 */
616static void cc_update_ctr(u8 *ctr, unsigned int increment)
617{
618 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
619 IS_ALIGNED((unsigned long)ctr, 8)) {
620
621 __be64 *high_be = (__be64 *)ctr;
622 __be64 *low_be = high_be + 1;
623 u64 orig_low = __be64_to_cpu(*low_be);
624 u64 new_low = orig_low + (u64)increment;
625
626 *low_be = __cpu_to_be64(new_low);
627
628 if (new_low < orig_low)
629 *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
630 } else {
631 u8 *pos = (ctr + AES_BLOCK_SIZE);
632 u8 val;
633 unsigned int size;
634
635 for (; increment; increment--)
636 for (size = AES_BLOCK_SIZE; size; size--) {
637 val = *--pos + 1;
638 *pos = val;
639 if (val)
640 break;
641 }
642 }
643}
644
645static void cc_cipher_complete(struct device *dev, void *cc_req, int err) 804static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
646{ 805{
647 struct skcipher_request *req = (struct skcipher_request *)cc_req; 806 struct skcipher_request *req = (struct skcipher_request *)cc_req;
@@ -649,44 +808,15 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
649 struct scatterlist *src = req->src; 808 struct scatterlist *src = req->src;
650 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); 809 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
651 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req); 810 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
652 struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
653 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
654 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); 811 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
655 unsigned int len;
656 812
657 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); 813 if (err != -EINPROGRESS) {
658 814 /* Not a BACKLOG notification */
659 switch (ctx_p->cipher_mode) { 815 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
660 case DRV_CIPHER_CBC: 816 memcpy(req->iv, req_ctx->iv, ivsize);
661 /* 817 kzfree(req_ctx->iv);
662 * The crypto API expects us to set the req->iv to the last
663 * ciphertext block. For encrypt, simply copy from the result.
664 * For decrypt, we must copy from a saved buffer since this
665 * could be an in-place decryption operation and the src is
666 * lost by this point.
667 */
668 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
669 memcpy(req->iv, req_ctx->backup_info, ivsize);
670 kzfree(req_ctx->backup_info);
671 } else if (!err) {
672 len = req->cryptlen - ivsize;
673 scatterwalk_map_and_copy(req->iv, req->dst, len,
674 ivsize, 0);
675 }
676 break;
677
678 case DRV_CIPHER_CTR:
679 /* Compute the counter of the last block */
680 len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
681 cc_update_ctr((u8 *)req->iv, len);
682 break;
683
684 default:
685 break;
686 } 818 }
687 819
688 kzfree(req_ctx->iv);
689
690 skcipher_request_complete(req, err); 820 skcipher_request_complete(req, err);
691} 821}
692 822
@@ -741,6 +871,13 @@ static int cc_cipher_process(struct skcipher_request *req,
741 cc_req.user_cb = (void *)cc_cipher_complete; 871 cc_req.user_cb = (void *)cc_cipher_complete;
742 cc_req.user_arg = (void *)req; 872 cc_req.user_arg = (void *)req;
743 873
874 /* Setup CPP operation details */
875 if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
876 cc_req.cpp.is_cpp = true;
877 cc_req.cpp.alg = ctx_p->cpp.alg;
878 cc_req.cpp.slot = ctx_p->cpp.slot;
879 }
880
744 /* Setup request context */ 881 /* Setup request context */
745 req_ctx->gen_ctx.op_type = direction; 882 req_ctx->gen_ctx.op_type = direction;
746 883
@@ -755,11 +892,16 @@ static int cc_cipher_process(struct skcipher_request *req,
755 892
756 /* STAT_PHASE_2: Create sequence */ 893 /* STAT_PHASE_2: Create sequence */
757 894
758 /* Setup processing */ 895 /* Setup IV and XEX key used */
759 cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); 896 cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
897 /* Setup MLLI line, if needed */
898 cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
899 /* Setup key */
900 cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
760 /* Data processing */ 901 /* Data processing */
761 cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc, 902 cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
762 &seq_len); 903 /* Read next IV */
904 cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
763 905
764 /* STAT_PHASE_3: Lock HW and push sequence */ 906 /* STAT_PHASE_3: Lock HW and push sequence */
765 907
@@ -774,7 +916,6 @@ static int cc_cipher_process(struct skcipher_request *req,
774 916
775exit_process: 917exit_process:
776 if (rc != -EINPROGRESS && rc != -EBUSY) { 918 if (rc != -EINPROGRESS && rc != -EBUSY) {
777 kzfree(req_ctx->backup_info);
778 kzfree(req_ctx->iv); 919 kzfree(req_ctx->iv);
779 } 920 }
780 921
@@ -792,31 +933,10 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
792 933
793static int cc_cipher_decrypt(struct skcipher_request *req) 934static int cc_cipher_decrypt(struct skcipher_request *req)
794{ 935{
795 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
796 struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
797 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
798 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); 936 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
799 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
800 gfp_t flags = cc_gfp_flags(&req->base);
801 unsigned int len;
802 937
803 memset(req_ctx, 0, sizeof(*req_ctx)); 938 memset(req_ctx, 0, sizeof(*req_ctx));
804 939
805 if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
806 (req->cryptlen >= ivsize)) {
807
808 /* Allocate and save the last IV sized bytes of the source,
809 * which will be lost in case of in-place decryption.
810 */
811 req_ctx->backup_info = kzalloc(ivsize, flags);
812 if (!req_ctx->backup_info)
813 return -ENOMEM;
814
815 len = req->cryptlen - ivsize;
816 scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
817 ivsize, 0);
818 }
819
820 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT); 940 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
821} 941}
822 942
@@ -838,6 +958,7 @@ static const struct cc_alg_template skcipher_algs[] = {
838 .flow_mode = S_DIN_to_AES, 958 .flow_mode = S_DIN_to_AES,
839 .min_hw_rev = CC_HW_REV_630, 959 .min_hw_rev = CC_HW_REV_630,
840 .std_body = CC_STD_NIST, 960 .std_body = CC_STD_NIST,
961 .sec_func = true,
841 }, 962 },
842 { 963 {
843 .name = "xts512(paes)", 964 .name = "xts512(paes)",
@@ -856,6 +977,7 @@ static const struct cc_alg_template skcipher_algs[] = {
856 .data_unit = 512, 977 .data_unit = 512,
857 .min_hw_rev = CC_HW_REV_712, 978 .min_hw_rev = CC_HW_REV_712,
858 .std_body = CC_STD_NIST, 979 .std_body = CC_STD_NIST,
980 .sec_func = true,
859 }, 981 },
860 { 982 {
861 .name = "xts4096(paes)", 983 .name = "xts4096(paes)",
@@ -874,6 +996,7 @@ static const struct cc_alg_template skcipher_algs[] = {
874 .data_unit = 4096, 996 .data_unit = 4096,
875 .min_hw_rev = CC_HW_REV_712, 997 .min_hw_rev = CC_HW_REV_712,
876 .std_body = CC_STD_NIST, 998 .std_body = CC_STD_NIST,
999 .sec_func = true,
877 }, 1000 },
878 { 1001 {
879 .name = "essiv(paes)", 1002 .name = "essiv(paes)",
@@ -891,6 +1014,7 @@ static const struct cc_alg_template skcipher_algs[] = {
891 .flow_mode = S_DIN_to_AES, 1014 .flow_mode = S_DIN_to_AES,
892 .min_hw_rev = CC_HW_REV_712, 1015 .min_hw_rev = CC_HW_REV_712,
893 .std_body = CC_STD_NIST, 1016 .std_body = CC_STD_NIST,
1017 .sec_func = true,
894 }, 1018 },
895 { 1019 {
896 .name = "essiv512(paes)", 1020 .name = "essiv512(paes)",
@@ -909,6 +1033,7 @@ static const struct cc_alg_template skcipher_algs[] = {
909 .data_unit = 512, 1033 .data_unit = 512,
910 .min_hw_rev = CC_HW_REV_712, 1034 .min_hw_rev = CC_HW_REV_712,
911 .std_body = CC_STD_NIST, 1035 .std_body = CC_STD_NIST,
1036 .sec_func = true,
912 }, 1037 },
913 { 1038 {
914 .name = "essiv4096(paes)", 1039 .name = "essiv4096(paes)",
@@ -927,6 +1052,7 @@ static const struct cc_alg_template skcipher_algs[] = {
927 .data_unit = 4096, 1052 .data_unit = 4096,
928 .min_hw_rev = CC_HW_REV_712, 1053 .min_hw_rev = CC_HW_REV_712,
929 .std_body = CC_STD_NIST, 1054 .std_body = CC_STD_NIST,
1055 .sec_func = true,
930 }, 1056 },
931 { 1057 {
932 .name = "bitlocker(paes)", 1058 .name = "bitlocker(paes)",
@@ -944,6 +1070,7 @@ static const struct cc_alg_template skcipher_algs[] = {
944 .flow_mode = S_DIN_to_AES, 1070 .flow_mode = S_DIN_to_AES,
945 .min_hw_rev = CC_HW_REV_712, 1071 .min_hw_rev = CC_HW_REV_712,
946 .std_body = CC_STD_NIST, 1072 .std_body = CC_STD_NIST,
1073 .sec_func = true,
947 }, 1074 },
948 { 1075 {
949 .name = "bitlocker512(paes)", 1076 .name = "bitlocker512(paes)",
@@ -962,6 +1089,7 @@ static const struct cc_alg_template skcipher_algs[] = {
962 .data_unit = 512, 1089 .data_unit = 512,
963 .min_hw_rev = CC_HW_REV_712, 1090 .min_hw_rev = CC_HW_REV_712,
964 .std_body = CC_STD_NIST, 1091 .std_body = CC_STD_NIST,
1092 .sec_func = true,
965 }, 1093 },
966 { 1094 {
967 .name = "bitlocker4096(paes)", 1095 .name = "bitlocker4096(paes)",
@@ -980,6 +1108,7 @@ static const struct cc_alg_template skcipher_algs[] = {
980 .data_unit = 4096, 1108 .data_unit = 4096,
981 .min_hw_rev = CC_HW_REV_712, 1109 .min_hw_rev = CC_HW_REV_712,
982 .std_body = CC_STD_NIST, 1110 .std_body = CC_STD_NIST,
1111 .sec_func = true,
983 }, 1112 },
984 { 1113 {
985 .name = "ecb(paes)", 1114 .name = "ecb(paes)",
@@ -997,6 +1126,7 @@ static const struct cc_alg_template skcipher_algs[] = {
997 .flow_mode = S_DIN_to_AES, 1126 .flow_mode = S_DIN_to_AES,
998 .min_hw_rev = CC_HW_REV_712, 1127 .min_hw_rev = CC_HW_REV_712,
999 .std_body = CC_STD_NIST, 1128 .std_body = CC_STD_NIST,
1129 .sec_func = true,
1000 }, 1130 },
1001 { 1131 {
1002 .name = "cbc(paes)", 1132 .name = "cbc(paes)",
@@ -1014,6 +1144,7 @@ static const struct cc_alg_template skcipher_algs[] = {
1014 .flow_mode = S_DIN_to_AES, 1144 .flow_mode = S_DIN_to_AES,
1015 .min_hw_rev = CC_HW_REV_712, 1145 .min_hw_rev = CC_HW_REV_712,
1016 .std_body = CC_STD_NIST, 1146 .std_body = CC_STD_NIST,
1147 .sec_func = true,
1017 }, 1148 },
1018 { 1149 {
1019 .name = "ofb(paes)", 1150 .name = "ofb(paes)",
@@ -1031,6 +1162,7 @@ static const struct cc_alg_template skcipher_algs[] = {
1031 .flow_mode = S_DIN_to_AES, 1162 .flow_mode = S_DIN_to_AES,
1032 .min_hw_rev = CC_HW_REV_712, 1163 .min_hw_rev = CC_HW_REV_712,
1033 .std_body = CC_STD_NIST, 1164 .std_body = CC_STD_NIST,
1165 .sec_func = true,
1034 }, 1166 },
1035 { 1167 {
1036 .name = "cts(cbc(paes))", 1168 .name = "cts(cbc(paes))",
@@ -1048,6 +1180,7 @@ static const struct cc_alg_template skcipher_algs[] = {
1048 .flow_mode = S_DIN_to_AES, 1180 .flow_mode = S_DIN_to_AES,
1049 .min_hw_rev = CC_HW_REV_712, 1181 .min_hw_rev = CC_HW_REV_712,
1050 .std_body = CC_STD_NIST, 1182 .std_body = CC_STD_NIST,
1183 .sec_func = true,
1051 }, 1184 },
1052 { 1185 {
1053 .name = "ctr(paes)", 1186 .name = "ctr(paes)",
@@ -1065,6 +1198,7 @@ static const struct cc_alg_template skcipher_algs[] = {
1065 .flow_mode = S_DIN_to_AES, 1198 .flow_mode = S_DIN_to_AES,
1066 .min_hw_rev = CC_HW_REV_712, 1199 .min_hw_rev = CC_HW_REV_712,
1067 .std_body = CC_STD_NIST, 1200 .std_body = CC_STD_NIST,
1201 .sec_func = true,
1068 }, 1202 },
1069 { 1203 {
1070 .name = "xts(aes)", 1204 .name = "xts(aes)",
@@ -1429,6 +1563,42 @@ static const struct cc_alg_template skcipher_algs[] = {
1429 .min_hw_rev = CC_HW_REV_713, 1563 .min_hw_rev = CC_HW_REV_713,
1430 .std_body = CC_STD_OSCCA, 1564 .std_body = CC_STD_OSCCA,
1431 }, 1565 },
1566 {
1567 .name = "cbc(psm4)",
1568 .driver_name = "cbc-psm4-ccree",
1569 .blocksize = SM4_BLOCK_SIZE,
1570 .template_skcipher = {
1571 .setkey = cc_cipher_sethkey,
1572 .encrypt = cc_cipher_encrypt,
1573 .decrypt = cc_cipher_decrypt,
1574 .min_keysize = CC_HW_KEY_SIZE,
1575 .max_keysize = CC_HW_KEY_SIZE,
1576 .ivsize = SM4_BLOCK_SIZE,
1577 },
1578 .cipher_mode = DRV_CIPHER_CBC,
1579 .flow_mode = S_DIN_to_SM4,
1580 .min_hw_rev = CC_HW_REV_713,
1581 .std_body = CC_STD_OSCCA,
1582 .sec_func = true,
1583 },
1584 {
1585 .name = "ctr(psm4)",
1586 .driver_name = "ctr-psm4-ccree",
1587 .blocksize = SM4_BLOCK_SIZE,
1588 .template_skcipher = {
1589 .setkey = cc_cipher_sethkey,
1590 .encrypt = cc_cipher_encrypt,
1591 .decrypt = cc_cipher_decrypt,
1592 .min_keysize = CC_HW_KEY_SIZE,
1593 .max_keysize = CC_HW_KEY_SIZE,
1594 .ivsize = SM4_BLOCK_SIZE,
1595 },
1596 .cipher_mode = DRV_CIPHER_CTR,
1597 .flow_mode = S_DIN_to_SM4,
1598 .min_hw_rev = CC_HW_REV_713,
1599 .std_body = CC_STD_OSCCA,
1600 .sec_func = true,
1601 },
1432}; 1602};
1433 1603
1434static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl, 1604static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
@@ -1504,7 +1674,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
1504 ARRAY_SIZE(skcipher_algs)); 1674 ARRAY_SIZE(skcipher_algs));
1505 for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) { 1675 for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
1506 if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) || 1676 if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
1507 !(drvdata->std_bodies & skcipher_algs[alg].std_body)) 1677 !(drvdata->std_bodies & skcipher_algs[alg].std_body) ||
1678 (drvdata->sec_disabled && skcipher_algs[alg].sec_func))
1508 continue; 1679 continue;
1509 1680
1510 dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name); 1681 dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 4dbc0a1e6d5c..da3a38707fae 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4/* \file cc_cipher.h 4/* \file cc_cipher.h
5 * ARM CryptoCell Cipher Crypto API 5 * ARM CryptoCell Cipher Crypto API
@@ -20,7 +20,6 @@ struct cipher_req_ctx {
20 u32 in_mlli_nents; 20 u32 in_mlli_nents;
21 u32 out_nents; 21 u32 out_nents;
22 u32 out_mlli_nents; 22 u32 out_mlli_nents;
23 u8 *backup_info; /*store iv for generated IV flow*/
24 u8 *iv; 23 u8 *iv;
25 struct mlli_params mlli_params; 24 struct mlli_params mlli_params;
26}; 25};
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index c8dac273c563..ccf960a0d989 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#ifndef _CC_CRYPTO_CTX_H_ 4#ifndef _CC_CRYPTO_CTX_H_
5#define _CC_CRYPTO_CTX_H_ 5#define _CC_CRYPTO_CTX_H_
@@ -55,6 +55,14 @@
55 55
56#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX 56#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
57 57
58#define CC_CPP_NUM_SLOTS 8
59#define CC_CPP_NUM_ALGS 2
60
61enum cc_cpp_alg {
62 CC_CPP_SM4 = 1,
63 CC_CPP_AES = 0
64};
65
58enum drv_engine_type { 66enum drv_engine_type {
59 DRV_ENGINE_NULL = 0, 67 DRV_ENGINE_NULL = 0,
60 DRV_ENGINE_AES = 1, 68 DRV_ENGINE_AES = 1,
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5fa05a7bcf36..566999738698 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/debugfs.h> 5#include <linux/debugfs.h>
@@ -25,9 +25,24 @@ struct cc_debugfs_ctx {
25 */ 25 */
26static struct dentry *cc_debugfs_dir; 26static struct dentry *cc_debugfs_dir;
27 27
28static struct debugfs_reg32 debug_regs[] = { 28static struct debugfs_reg32 ver_sig_regs[] = {
29 { .name = "SIGNATURE" }, /* Must be 0th */ 29 { .name = "SIGNATURE" }, /* Must be 0th */
30 { .name = "VERSION" }, /* Must be 1st */ 30 { .name = "VERSION" }, /* Must be 1st */
31};
32
33static struct debugfs_reg32 pid_cid_regs[] = {
34 CC_DEBUG_REG(PERIPHERAL_ID_0),
35 CC_DEBUG_REG(PERIPHERAL_ID_1),
36 CC_DEBUG_REG(PERIPHERAL_ID_2),
37 CC_DEBUG_REG(PERIPHERAL_ID_3),
38 CC_DEBUG_REG(PERIPHERAL_ID_4),
39 CC_DEBUG_REG(COMPONENT_ID_0),
40 CC_DEBUG_REG(COMPONENT_ID_1),
41 CC_DEBUG_REG(COMPONENT_ID_2),
42 CC_DEBUG_REG(COMPONENT_ID_3),
43};
44
45static struct debugfs_reg32 debug_regs[] = {
31 CC_DEBUG_REG(HOST_IRR), 46 CC_DEBUG_REG(HOST_IRR),
32 CC_DEBUG_REG(HOST_POWER_DOWN_EN), 47 CC_DEBUG_REG(HOST_POWER_DOWN_EN),
33 CC_DEBUG_REG(AXIM_MON_ERR), 48 CC_DEBUG_REG(AXIM_MON_ERR),
@@ -53,10 +68,7 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
53{ 68{
54 struct device *dev = drvdata_to_dev(drvdata); 69 struct device *dev = drvdata_to_dev(drvdata);
55 struct cc_debugfs_ctx *ctx; 70 struct cc_debugfs_ctx *ctx;
56 struct debugfs_regset32 *regset; 71 struct debugfs_regset32 *regset, *verset;
57
58 debug_regs[0].offset = drvdata->sig_offset;
59 debug_regs[1].offset = drvdata->ver_offset;
60 72
61 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 73 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
62 if (!ctx) 74 if (!ctx)
@@ -75,8 +87,26 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
75 debugfs_create_regset32("regs", 0400, ctx->dir, regset); 87 debugfs_create_regset32("regs", 0400, ctx->dir, regset);
76 debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent); 88 debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
77 89
78 drvdata->debugfs = ctx; 90 verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
91 /* Failing here is not important enough to fail the module load */
92 if (!verset)
93 goto out;
94
95 if (drvdata->hw_rev <= CC_HW_REV_712) {
96 ver_sig_regs[0].offset = drvdata->sig_offset;
97 ver_sig_regs[1].offset = drvdata->ver_offset;
98 verset->regs = ver_sig_regs;
99 verset->nregs = ARRAY_SIZE(ver_sig_regs);
100 } else {
101 verset->regs = pid_cid_regs;
102 verset->nregs = ARRAY_SIZE(pid_cid_regs);
103 }
104 verset->base = drvdata->cc_base;
79 105
106 debugfs_create_regset32("version", 0400, ctx->dir, verset);
107
108out:
109 drvdata->debugfs = ctx;
80 return 0; 110 return 0;
81} 111}
82 112
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 01cbd9a95659..664ff402e0e9 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#ifndef __CC_DEBUGFS_H__ 4#ifndef __CC_DEBUGFS_H__
5#define __CC_DEBUGFS_H__ 5#define __CC_DEBUGFS_H__
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 3bcc6c76e090..86ac7b443355 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/module.h> 5#include <linux/module.h>
@@ -30,27 +30,47 @@
30bool cc_dump_desc; 30bool cc_dump_desc;
31module_param_named(dump_desc, cc_dump_desc, bool, 0600); 31module_param_named(dump_desc, cc_dump_desc, bool, 0600);
32MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid"); 32MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
33
34bool cc_dump_bytes; 33bool cc_dump_bytes;
35module_param_named(dump_bytes, cc_dump_bytes, bool, 0600); 34module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
36MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid"); 35MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
37 36
37static bool cc_sec_disable;
38module_param_named(sec_disable, cc_sec_disable, bool, 0600);
39MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
40
38struct cc_hw_data { 41struct cc_hw_data {
39 char *name; 42 char *name;
40 enum cc_hw_rev rev; 43 enum cc_hw_rev rev;
41 u32 sig; 44 u32 sig;
45 u32 cidr_0123;
46 u32 pidr_0124;
42 int std_bodies; 47 int std_bodies;
43}; 48};
44 49
50#define CC_NUM_IDRS 4
51
52/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
53static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
54 CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
55 CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
56};
57
58static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
59 CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
60 CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
61};
62
45/* Hardware revisions defs. */ 63/* Hardware revisions defs. */
46 64
47/* The 703 is a OSCCA only variant of the 713 */ 65/* The 703 is a OSCCA only variant of the 713 */
48static const struct cc_hw_data cc703_hw = { 66static const struct cc_hw_data cc703_hw = {
49 .name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA 67 .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
68 .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
50}; 69};
51 70
52static const struct cc_hw_data cc713_hw = { 71static const struct cc_hw_data cc713_hw = {
53 .name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL 72 .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
73 .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
54}; 74};
55 75
56static const struct cc_hw_data cc712_hw = { 76static const struct cc_hw_data cc712_hw = {
@@ -78,6 +98,20 @@ static const struct of_device_id arm_ccree_dev_of_match[] = {
78}; 98};
79MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match); 99MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
80 100
101static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
102{
103 int i;
104 union {
105 u8 regs[CC_NUM_IDRS];
106 __le32 val;
107 } idr;
108
109 for (i = 0; i < CC_NUM_IDRS; ++i)
110 idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
111
112 return le32_to_cpu(idr.val);
113}
114
81void __dump_byte_array(const char *name, const u8 *buf, size_t len) 115void __dump_byte_array(const char *name, const u8 *buf, size_t len)
82{ 116{
83 char prefix[64]; 117 char prefix[64];
@@ -114,12 +148,12 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
114 148
115 drvdata->irq = irr; 149 drvdata->irq = irr;
116 /* Completion interrupt - most probable */ 150 /* Completion interrupt - most probable */
117 if (irr & CC_COMP_IRQ_MASK) { 151 if (irr & drvdata->comp_mask) {
118 /* Mask AXI completion interrupt - will be unmasked in 152 /* Mask all completion interrupts - will be unmasked in
119 * Deferred service handler 153 * deferred service handler
120 */ 154 */
121 cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK); 155 cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
122 irr &= ~CC_COMP_IRQ_MASK; 156 irr &= ~drvdata->comp_mask;
123 complete_request(drvdata); 157 complete_request(drvdata);
124 } 158 }
125#ifdef CONFIG_CRYPTO_FIPS 159#ifdef CONFIG_CRYPTO_FIPS
@@ -159,11 +193,14 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
159 unsigned int val, cache_params; 193 unsigned int val, cache_params;
160 struct device *dev = drvdata_to_dev(drvdata); 194 struct device *dev = drvdata_to_dev(drvdata);
161 195
162 /* Unmask all AXI interrupt sources AXI_CFG1 register */ 196 /* Unmask all AXI interrupt sources AXI_CFG1 register */
163 val = cc_ioread(drvdata, CC_REG(AXIM_CFG)); 197 /* AXI interrupt config are obsoleted startign at cc7x3 */
164 cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK); 198 if (drvdata->hw_rev <= CC_HW_REV_712) {
165 dev_dbg(dev, "AXIM_CFG=0x%08X\n", 199 val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
166 cc_ioread(drvdata, CC_REG(AXIM_CFG))); 200 cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
201 dev_dbg(dev, "AXIM_CFG=0x%08X\n",
202 cc_ioread(drvdata, CC_REG(AXIM_CFG)));
203 }
167 204
168 /* Clear all pending interrupts */ 205 /* Clear all pending interrupts */
169 val = cc_ioread(drvdata, CC_REG(HOST_IRR)); 206 val = cc_ioread(drvdata, CC_REG(HOST_IRR));
@@ -171,7 +208,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
171 cc_iowrite(drvdata, CC_REG(HOST_ICR), val); 208 cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
172 209
173 /* Unmask relevant interrupt cause */ 210 /* Unmask relevant interrupt cause */
174 val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK; 211 val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
175 212
176 if (drvdata->hw_rev >= CC_HW_REV_712) 213 if (drvdata->hw_rev >= CC_HW_REV_712)
177 val |= CC_GPR0_IRQ_MASK; 214 val |= CC_GPR0_IRQ_MASK;
@@ -201,7 +238,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
201 struct cc_drvdata *new_drvdata; 238 struct cc_drvdata *new_drvdata;
202 struct device *dev = &plat_dev->dev; 239 struct device *dev = &plat_dev->dev;
203 struct device_node *np = dev->of_node; 240 struct device_node *np = dev->of_node;
204 u32 signature_val; 241 u32 val, hw_rev_pidr, sig_cidr;
205 u64 dma_mask; 242 u64 dma_mask;
206 const struct cc_hw_data *hw_rev; 243 const struct cc_hw_data *hw_rev;
207 const struct of_device_id *dev_id; 244 const struct of_device_id *dev_id;
@@ -231,6 +268,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
231 new_drvdata->ver_offset = CC_REG(HOST_VERSION_630); 268 new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
232 } 269 }
233 270
271 new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
272
234 platform_set_drvdata(plat_dev, new_drvdata); 273 platform_set_drvdata(plat_dev, new_drvdata);
235 new_drvdata->plat_dev = plat_dev; 274 new_drvdata->plat_dev = plat_dev;
236 275
@@ -311,22 +350,57 @@ static int init_cc_resources(struct platform_device *plat_dev)
311 return rc; 350 return rc;
312 } 351 }
313 352
353 new_drvdata->sec_disabled = cc_sec_disable;
354
314 if (hw_rev->rev <= CC_HW_REV_712) { 355 if (hw_rev->rev <= CC_HW_REV_712) {
315 /* Verify correct mapping */ 356 /* Verify correct mapping */
316 signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset); 357 val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
317 if (signature_val != hw_rev->sig) { 358 if (val != hw_rev->sig) {
318 dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", 359 dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
319 signature_val, hw_rev->sig); 360 val, hw_rev->sig);
361 rc = -EINVAL;
362 goto post_clk_err;
363 }
364 sig_cidr = val;
365 hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
366 } else {
367 /* Verify correct mapping */
368 val = cc_read_idr(new_drvdata, pidr_0124_offsets);
369 if (val != hw_rev->pidr_0124) {
370 dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
371 val, hw_rev->pidr_0124);
372 rc = -EINVAL;
373 goto post_clk_err;
374 }
375 hw_rev_pidr = val;
376
377 val = cc_read_idr(new_drvdata, cidr_0123_offsets);
378 if (val != hw_rev->cidr_0123) {
379 dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
380 val, hw_rev->cidr_0123);
320 rc = -EINVAL; 381 rc = -EINVAL;
321 goto post_clk_err; 382 goto post_clk_err;
322 } 383 }
323 dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val); 384 sig_cidr = val;
385
386 /* Check security disable state */
387 val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
388 val &= CC_SECURITY_DISABLED_MASK;
389 new_drvdata->sec_disabled |= !!val;
390
391 if (!new_drvdata->sec_disabled) {
392 new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
393 if (new_drvdata->std_bodies & CC_STD_NIST)
394 new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
395 }
324 } 396 }
325 397
398 if (new_drvdata->sec_disabled)
399 dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
400
326 /* Display HW versions */ 401 /* Display HW versions */
327 dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", 402 dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
328 hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset), 403 hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
329 DRV_MODULE_VERSION);
330 404
331 rc = init_cc_regs(new_drvdata, true); 405 rc = init_cc_regs(new_drvdata, true);
332 if (rc) { 406 if (rc) {
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 33dbf3e6d15d..b76181335c08 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4/* \file cc_driver.h 4/* \file cc_driver.h
5 * ARM CryptoCell Linux Crypto Driver 5 * ARM CryptoCell Linux Crypto Driver
@@ -65,10 +65,32 @@ enum cc_std_body {
65 65
66#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT) 66#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
67 67
68#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
69
68#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \ 70#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
69 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \ 71 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
70 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT) 72 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
71 73
74#define CC_CPP_AES_ABORT_MASK ( \
75 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
76 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
77 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
78 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
79 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
80 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
81 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
82 BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
83
84#define CC_CPP_SM4_ABORT_MASK ( \
85 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
86 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
87 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
88 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
89 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
90 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
91 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
92 BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
93
72/* Register name mangling macro */ 94/* Register name mangling macro */
73#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET 95#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
74 96
@@ -81,7 +103,6 @@ enum cc_std_body {
81 103
82#define MAX_REQUEST_QUEUE_SIZE 4096 104#define MAX_REQUEST_QUEUE_SIZE 4096
83#define MAX_MLLI_BUFF_SIZE 2080 105#define MAX_MLLI_BUFF_SIZE 2080
84#define MAX_ICV_NENTS_SUPPORTED 2
85 106
86/* Definitions for HW descriptors DIN/DOUT fields */ 107/* Definitions for HW descriptors DIN/DOUT fields */
87#define NS_BIT 1 108#define NS_BIT 1
@@ -90,6 +111,12 @@ enum cc_std_body {
90 * field in the HW descriptor. The DMA engine +8 that value. 111 * field in the HW descriptor. The DMA engine +8 that value.
91 */ 112 */
92 113
114struct cc_cpp_req {
115 bool is_cpp;
116 enum cc_cpp_alg alg;
117 u8 slot;
118};
119
93#define CC_MAX_IVGEN_DMA_ADDRESSES 3 120#define CC_MAX_IVGEN_DMA_ADDRESSES 3
94struct cc_crypto_req { 121struct cc_crypto_req {
95 void (*user_cb)(struct device *dev, void *req, int err); 122 void (*user_cb)(struct device *dev, void *req, int err);
@@ -104,6 +131,7 @@ struct cc_crypto_req {
104 /* The generated IV size required, 8/16 B allowed. */ 131 /* The generated IV size required, 8/16 B allowed. */
105 unsigned int ivgen_size; 132 unsigned int ivgen_size;
106 struct completion seq_compl; /* request completion */ 133 struct completion seq_compl; /* request completion */
134 struct cc_cpp_req cpp;
107}; 135};
108 136
109/** 137/**
@@ -136,6 +164,8 @@ struct cc_drvdata {
136 u32 sig_offset; 164 u32 sig_offset;
137 u32 ver_offset; 165 u32 ver_offset;
138 int std_bodies; 166 int std_bodies;
167 bool sec_disabled;
168 u32 comp_mask;
139}; 169};
140 170
141struct cc_crypto_alg { 171struct cc_crypto_alg {
@@ -162,12 +192,14 @@ struct cc_alg_template {
162 int auth_mode; 192 int auth_mode;
163 u32 min_hw_rev; 193 u32 min_hw_rev;
164 enum cc_std_body std_body; 194 enum cc_std_body std_body;
195 bool sec_func;
165 unsigned int data_unit; 196 unsigned int data_unit;
166 struct cc_drvdata *drvdata; 197 struct cc_drvdata *drvdata;
167}; 198};
168 199
169struct async_gen_req_ctx { 200struct async_gen_req_ctx {
170 dma_addr_t iv_dma_addr; 201 dma_addr_t iv_dma_addr;
202 u8 *iv;
171 enum drv_crypto_direction op_type; 203 enum drv_crypto_direction op_type;
172}; 204};
173 205
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index b4d0a6d983e0..5ad3ffb7acaa 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/fips.h> 5#include <linux/fips.h>
@@ -49,8 +49,6 @@ void cc_fips_fini(struct cc_drvdata *drvdata)
49 49
50 /* Kill tasklet */ 50 /* Kill tasklet */
51 tasklet_kill(&fips_h->tasklet); 51 tasklet_kill(&fips_h->tasklet);
52
53 kfree(fips_h);
54 drvdata->fips_handle = NULL; 52 drvdata->fips_handle = NULL;
55} 53}
56 54
@@ -72,20 +70,28 @@ static inline void tee_fips_error(struct device *dev)
72 dev_err(dev, "TEE reported error!\n"); 70 dev_err(dev, "TEE reported error!\n");
73} 71}
74 72
73/*
74 * This function check if cryptocell tee fips error occurred
75 * and in such case triggers system error
76 */
77void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
78{
79 struct device *dev = drvdata_to_dev(p_drvdata);
80
81 if (!cc_get_tee_fips_status(p_drvdata))
82 tee_fips_error(dev);
83}
84
75/* Deferred service handler, run as interrupt-fired tasklet */ 85/* Deferred service handler, run as interrupt-fired tasklet */
76static void fips_dsr(unsigned long devarg) 86static void fips_dsr(unsigned long devarg)
77{ 87{
78 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; 88 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
79 struct device *dev = drvdata_to_dev(drvdata); 89 u32 irq, val;
80 u32 irq, state, val;
81 90
82 irq = (drvdata->irq & (CC_GPR0_IRQ_MASK)); 91 irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
83 92
84 if (irq) { 93 if (irq) {
85 state = cc_ioread(drvdata, CC_REG(GPR_HOST)); 94 cc_tee_handle_fips_error(drvdata);
86
87 if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
88 tee_fips_error(dev);
89 } 95 }
90 96
91 /* after verifing that there is nothing to do, 97 /* after verifing that there is nothing to do,
@@ -104,7 +110,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
104 if (p_drvdata->hw_rev < CC_HW_REV_712) 110 if (p_drvdata->hw_rev < CC_HW_REV_712)
105 return 0; 111 return 0;
106 112
107 fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL); 113 fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL);
108 if (!fips_h) 114 if (!fips_h)
109 return -ENOMEM; 115 return -ENOMEM;
110 116
@@ -113,8 +119,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
113 dev_dbg(dev, "Initializing fips tasklet\n"); 119 dev_dbg(dev, "Initializing fips tasklet\n");
114 tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); 120 tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
115 121
116 if (!cc_get_tee_fips_status(p_drvdata)) 122 cc_tee_handle_fips_error(p_drvdata);
117 tee_fips_error(dev);
118 123
119 return 0; 124 return 0;
120} 125}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
index 645e096a7a82..fc33eeb4d566 100644
--- a/drivers/crypto/ccree/cc_fips.h
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#ifndef __CC_FIPS_H__ 4#ifndef __CC_FIPS_H__
5#define __CC_FIPS_H__ 5#define __CC_FIPS_H__
@@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
18void cc_fips_fini(struct cc_drvdata *drvdata); 18void cc_fips_fini(struct cc_drvdata *drvdata);
19void fips_handler(struct cc_drvdata *drvdata); 19void fips_handler(struct cc_drvdata *drvdata);
20void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok); 20void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
21void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
21 22
22#else /* CONFIG_CRYPTO_FIPS */ 23#else /* CONFIG_CRYPTO_FIPS */
23 24
@@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
30static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata, 31static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
31 bool ok) {} 32 bool ok) {}
32static inline void fips_handler(struct cc_drvdata *drvdata) {} 33static inline void fips_handler(struct cc_drvdata *drvdata) {}
34static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
33 35
34#endif /* CONFIG_CRYPTO_FIPS */ 36#endif /* CONFIG_CRYPTO_FIPS */
35 37
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 2c4ddc8fb76b..a6abe4e3bb0e 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/module.h> 5#include <linux/module.h>
@@ -69,6 +69,7 @@ struct cc_hash_alg {
69struct hash_key_req_ctx { 69struct hash_key_req_ctx {
70 u32 keylen; 70 u32 keylen;
71 dma_addr_t key_dma_addr; 71 dma_addr_t key_dma_addr;
72 u8 *key;
72}; 73};
73 74
74/* hash per-session context */ 75/* hash per-session context */
@@ -280,9 +281,13 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
280 281
281 dev_dbg(dev, "req=%pK\n", req); 282 dev_dbg(dev, "req=%pK\n", req);
282 283
283 cc_unmap_hash_request(dev, state, req->src, false); 284 if (err != -EINPROGRESS) {
284 cc_unmap_req(dev, state, ctx); 285 /* Not a BACKLOG notification */
285 req->base.complete(&req->base, err); 286 cc_unmap_hash_request(dev, state, req->src, false);
287 cc_unmap_req(dev, state, ctx);
288 }
289
290 ahash_request_complete(req, err);
286} 291}
287 292
288static void cc_digest_complete(struct device *dev, void *cc_req, int err) 293static void cc_digest_complete(struct device *dev, void *cc_req, int err)
@@ -295,10 +300,14 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
295 300
296 dev_dbg(dev, "req=%pK\n", req); 301 dev_dbg(dev, "req=%pK\n", req);
297 302
298 cc_unmap_hash_request(dev, state, req->src, false); 303 if (err != -EINPROGRESS) {
299 cc_unmap_result(dev, state, digestsize, req->result); 304 /* Not a BACKLOG notification */
300 cc_unmap_req(dev, state, ctx); 305 cc_unmap_hash_request(dev, state, req->src, false);
301 req->base.complete(&req->base, err); 306 cc_unmap_result(dev, state, digestsize, req->result);
307 cc_unmap_req(dev, state, ctx);
308 }
309
310 ahash_request_complete(req, err);
302} 311}
303 312
304static void cc_hash_complete(struct device *dev, void *cc_req, int err) 313static void cc_hash_complete(struct device *dev, void *cc_req, int err)
@@ -311,10 +320,14 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
311 320
312 dev_dbg(dev, "req=%pK\n", req); 321 dev_dbg(dev, "req=%pK\n", req);
313 322
314 cc_unmap_hash_request(dev, state, req->src, false); 323 if (err != -EINPROGRESS) {
315 cc_unmap_result(dev, state, digestsize, req->result); 324 /* Not a BACKLOG notification */
316 cc_unmap_req(dev, state, ctx); 325 cc_unmap_hash_request(dev, state, req->src, false);
317 req->base.complete(&req->base, err); 326 cc_unmap_result(dev, state, digestsize, req->result);
327 cc_unmap_req(dev, state, ctx);
328 }
329
330 ahash_request_complete(req, err);
318} 331}
319 332
320static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req, 333static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
@@ -730,13 +743,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
730 ctx->key_params.keylen = keylen; 743 ctx->key_params.keylen = keylen;
731 ctx->key_params.key_dma_addr = 0; 744 ctx->key_params.key_dma_addr = 0;
732 ctx->is_hmac = true; 745 ctx->is_hmac = true;
746 ctx->key_params.key = NULL;
733 747
734 if (keylen) { 748 if (keylen) {
749 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
750 if (!ctx->key_params.key)
751 return -ENOMEM;
752
735 ctx->key_params.key_dma_addr = 753 ctx->key_params.key_dma_addr =
736 dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); 754 dma_map_single(dev, (void *)ctx->key_params.key, keylen,
755 DMA_TO_DEVICE);
737 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { 756 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
738 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", 757 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
739 key, keylen); 758 ctx->key_params.key, keylen);
759 kzfree(ctx->key_params.key);
740 return -ENOMEM; 760 return -ENOMEM;
741 } 761 }
742 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", 762 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -887,6 +907,9 @@ out:
887 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", 907 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
888 &ctx->key_params.key_dma_addr, ctx->key_params.keylen); 908 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
889 } 909 }
910
911 kzfree(ctx->key_params.key);
912
890 return rc; 913 return rc;
891} 914}
892 915
@@ -913,11 +936,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
913 936
914 ctx->key_params.keylen = keylen; 937 ctx->key_params.keylen = keylen;
915 938
939 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
940 if (!ctx->key_params.key)
941 return -ENOMEM;
942
916 ctx->key_params.key_dma_addr = 943 ctx->key_params.key_dma_addr =
917 dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); 944 dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
918 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { 945 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
919 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", 946 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
920 key, keylen); 947 key, keylen);
948 kzfree(ctx->key_params.key);
921 return -ENOMEM; 949 return -ENOMEM;
922 } 950 }
923 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", 951 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -969,6 +997,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
969 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", 997 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
970 &ctx->key_params.key_dma_addr, ctx->key_params.keylen); 998 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
971 999
1000 kzfree(ctx->key_params.key);
1001
972 return rc; 1002 return rc;
973} 1003}
974 1004
@@ -1621,7 +1651,7 @@ static struct cc_hash_template driver_hash[] = {
1621 .setkey = cc_hash_setkey, 1651 .setkey = cc_hash_setkey,
1622 .halg = { 1652 .halg = {
1623 .digestsize = SHA224_DIGEST_SIZE, 1653 .digestsize = SHA224_DIGEST_SIZE,
1624 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE), 1654 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1625 }, 1655 },
1626 }, 1656 },
1627 .hash_mode = DRV_HASH_SHA224, 1657 .hash_mode = DRV_HASH_SHA224,
@@ -1648,7 +1678,7 @@ static struct cc_hash_template driver_hash[] = {
1648 .setkey = cc_hash_setkey, 1678 .setkey = cc_hash_setkey,
1649 .halg = { 1679 .halg = {
1650 .digestsize = SHA384_DIGEST_SIZE, 1680 .digestsize = SHA384_DIGEST_SIZE,
1651 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE), 1681 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1652 }, 1682 },
1653 }, 1683 },
1654 .hash_mode = DRV_HASH_SHA384, 1684 .hash_mode = DRV_HASH_SHA384,
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 2e5bf8b0bbb6..0d6dc61484d7 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4/* \file cc_hash.h 4/* \file cc_hash.h
5 * ARM CryptoCell Hash Crypto API 5 * ARM CryptoCell Hash Crypto API
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index 616b2e1c41ba..d0764147573f 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
3 3
4#ifndef __CC_HOST_H__ 4#ifndef __CC_HOST_H__
5#define __CC_HOST_H__ 5#define __CC_HOST_H__
@@ -7,33 +7,102 @@
7// -------------------------------------- 7// --------------------------------------
8// BLOCK: HOST_P 8// BLOCK: HOST_P
9// -------------------------------------- 9// --------------------------------------
10
11
12/* IRR */
10#define CC_HOST_IRR_REG_OFFSET 0xA00UL 13#define CC_HOST_IRR_REG_OFFSET 0xA00UL
14#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT 0x1UL
15#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SIZE 0x1UL
11#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL 16#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
12#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL 17#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
18#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT 0x3UL
19#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SIZE 0x1UL
20#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT 0x4UL
21#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SIZE 0x1UL
22#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT 0x5UL
23#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SIZE 0x1UL
24#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT 0x6UL
25#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SIZE 0x1UL
26#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT 0x7UL
27#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SIZE 0x1UL
13#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL 28#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
14#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL 29#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
30#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT 0x9UL
31#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SIZE 0x1UL
32#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT 0xAUL
33#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SIZE 0x1UL
15#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL 34#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
16#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL 35#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
36#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT 0xCUL
37#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SIZE 0x1UL
38#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT 0xDUL
39#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SIZE 0x1UL
40#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT 0xEUL
41#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SIZE 0x1UL
42#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT 0xFUL
43#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SIZE 0x1UL
44#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT 0x10UL
45#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SIZE 0x1UL
46#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT 0x11UL
47#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SIZE 0x1UL
48#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT 0x12UL
49#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SIZE 0x1UL
17#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL 50#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
18#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL 51#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
52#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT 0x14UL
53#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SIZE 0x1UL
19#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL 54#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
20#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL 55#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
21#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL 56#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
22#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL 57#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
23#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL 58#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
24#define CC_HOST_IMR_REG_OFFSET 0xA04UL 59
25#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL 60/* IMR */
26#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL 61#define CC_HOST_IMR_REG_OFFSET 0x0A04UL
62#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT 0x1UL
63#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SIZE 0x1UL
27#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL 64#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
28#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL 65#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
66#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT 0x3UL
67#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SIZE 0x1UL
68#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT 0x4UL
69#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SIZE 0x1UL
70#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT 0x5UL
71#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SIZE 0x1UL
72#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT 0x6UL
73#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SIZE 0x1UL
74#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT 0x7UL
75#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SIZE 0x1UL
29#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL 76#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
30#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL 77#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
78#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT 0x9UL
79#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SIZE 0x1UL
80#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT 0xAUL
81#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SIZE 0x1UL
31#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL 82#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
32#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL 83#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
84#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT 0xCUL
85#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SIZE 0x1UL
86#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT 0xDUL
87#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SIZE 0x1UL
88#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT 0xEUL
89#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SIZE 0x1UL
90#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT 0xFUL
91#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SIZE 0x1UL
92#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT 0x10UL
93#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SIZE 0x1UL
94#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT 0x11UL
95#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SIZE 0x1UL
96#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT 0x12UL
97#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SIZE 0x1UL
33#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL 98#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
34#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL 99#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
100#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT 0x14UL
101#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SIZE 0x1UL
35#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL 102#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
36#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL 103#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
104
105/* ICR */
37#define CC_HOST_ICR_REG_OFFSET 0xA08UL 106#define CC_HOST_ICR_REG_OFFSET 0xA08UL
38#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL 107#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
39#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL 108#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
@@ -45,6 +114,9 @@
45#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL 114#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
46#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL 115#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
47#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL 116#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
117#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
118#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
119#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
48#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL 120#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
49#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL 121#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
50#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL 122#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
@@ -132,6 +204,49 @@
132#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL 204#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
133#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL 205#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
134// -------------------------------------- 206// --------------------------------------
207// BLOCK: ID_REGISTERS
208// --------------------------------------
209#define CC_PERIPHERAL_ID_4_REG_OFFSET 0x0FD0UL
210#define CC_PERIPHERAL_ID_4_VALUE_BIT_SHIFT 0x0UL
211#define CC_PERIPHERAL_ID_4_VALUE_BIT_SIZE 0x4UL
212#define CC_PIDRESERVED0_REG_OFFSET 0x0FD4UL
213#define CC_PIDRESERVED1_REG_OFFSET 0x0FD8UL
214#define CC_PIDRESERVED2_REG_OFFSET 0x0FDCUL
215#define CC_PERIPHERAL_ID_0_REG_OFFSET 0x0FE0UL
216#define CC_PERIPHERAL_ID_0_VALUE_BIT_SHIFT 0x0UL
217#define CC_PERIPHERAL_ID_0_VALUE_BIT_SIZE 0x8UL
218#define CC_PERIPHERAL_ID_1_REG_OFFSET 0x0FE4UL
219#define CC_PERIPHERAL_ID_1_PART_1_BIT_SHIFT 0x0UL
220#define CC_PERIPHERAL_ID_1_PART_1_BIT_SIZE 0x4UL
221#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SHIFT 0x4UL
222#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SIZE 0x4UL
223#define CC_PERIPHERAL_ID_2_REG_OFFSET 0x0FE8UL
224#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SHIFT 0x0UL
225#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SIZE 0x3UL
226#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SHIFT 0x3UL
227#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SIZE 0x1UL
228#define CC_PERIPHERAL_ID_2_REVISION_BIT_SHIFT 0x4UL
229#define CC_PERIPHERAL_ID_2_REVISION_BIT_SIZE 0x4UL
230#define CC_PERIPHERAL_ID_3_REG_OFFSET 0x0FECUL
231#define CC_PERIPHERAL_ID_3_CMOD_BIT_SHIFT 0x0UL
232#define CC_PERIPHERAL_ID_3_CMOD_BIT_SIZE 0x4UL
233#define CC_PERIPHERAL_ID_3_REVAND_BIT_SHIFT 0x4UL
234#define CC_PERIPHERAL_ID_3_REVAND_BIT_SIZE 0x4UL
235#define CC_COMPONENT_ID_0_REG_OFFSET 0x0FF0UL
236#define CC_COMPONENT_ID_0_VALUE_BIT_SHIFT 0x0UL
237#define CC_COMPONENT_ID_0_VALUE_BIT_SIZE 0x8UL
238#define CC_COMPONENT_ID_1_REG_OFFSET 0x0FF4UL
239#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SHIFT 0x0UL
240#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SIZE 0x4UL
241#define CC_COMPONENT_ID_1_CLASS_BIT_SHIFT 0x4UL
242#define CC_COMPONENT_ID_1_CLASS_BIT_SIZE 0x4UL
243#define CC_COMPONENT_ID_2_REG_OFFSET 0x0FF8UL
244#define CC_COMPONENT_ID_2_VALUE_BIT_SHIFT 0x0UL
245#define CC_COMPONENT_ID_2_VALUE_BIT_SIZE 0x8UL
246#define CC_COMPONENT_ID_3_REG_OFFSET 0x0FFCUL
247#define CC_COMPONENT_ID_3_VALUE_BIT_SHIFT 0x0UL
248#define CC_COMPONENT_ID_3_VALUE_BIT_SIZE 0x8UL
249// --------------------------------------
135// BLOCK: HOST_SRAM 250// BLOCK: HOST_SRAM
136// -------------------------------------- 251// --------------------------------------
137#define CC_SRAM_DATA_REG_OFFSET 0xF00UL 252#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 7a9b90db7db7..9f4db9956e91 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#ifndef __CC_HW_QUEUE_DEFS_H__ 4#ifndef __CC_HW_QUEUE_DEFS_H__
5#define __CC_HW_QUEUE_DEFS_H__ 5#define __CC_HW_QUEUE_DEFS_H__
@@ -28,11 +28,13 @@
28 GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name)) 28 GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
29 29
30#define WORD0_VALUE CC_GENMASK(0, VALUE) 30#define WORD0_VALUE CC_GENMASK(0, VALUE)
31#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
31#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE) 32#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
32#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE) 33#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
33#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE) 34#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
34#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST) 35#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
35#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT) 36#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
37#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
36#define WORD2_VALUE CC_GENMASK(2, VALUE) 38#define WORD2_VALUE CC_GENMASK(2, VALUE)
37#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE) 39#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
38#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND) 40#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
@@ -176,6 +178,15 @@ enum cc_hw_crypto_key {
176 END_OF_KEYS = S32_MAX, 178 END_OF_KEYS = S32_MAX,
177}; 179};
178 180
181#define CC_NUM_HW_KEY_SLOTS 4
182#define CC_FIRST_HW_KEY_SLOT 0
183#define CC_LAST_HW_KEY_SLOT (CC_FIRST_HW_KEY_SLOT + CC_NUM_HW_KEY_SLOTS - 1)
184
185#define CC_NUM_CPP_KEY_SLOTS 8
186#define CC_FIRST_CPP_KEY_SLOT 16
187#define CC_LAST_CPP_KEY_SLOT (CC_FIRST_CPP_KEY_SLOT + \
188 CC_NUM_CPP_KEY_SLOTS - 1)
189
179enum cc_hw_aes_key_size { 190enum cc_hw_aes_key_size {
180 AES_128_KEY = 0, 191 AES_128_KEY = 0,
181 AES_192_KEY = 1, 192 AES_192_KEY = 1,
@@ -189,6 +200,9 @@ enum cc_hash_cipher_pad {
189 HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX, 200 HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
190}; 201};
191 202
203#define CC_CPP_DIN_ADDR 0xFF00FF00UL
204#define CC_CPP_DIN_SIZE 0xFF00FFUL
205
192/*****************************/ 206/*****************************/
193/* Descriptor packing macros */ 207/* Descriptor packing macros */
194/*****************************/ 208/*****************************/
@@ -249,6 +263,25 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
249} 263}
250 264
251/* 265/*
266 * Setup the special CPP descriptor
267 *
268 * @pdesc: pointer HW descriptor struct
269 * @alg: cipher used (AES / SM4)
270 * @mode: mode used (CTR or CBC)
271 * @slot: slot number
272 * @ksize: key size
273 */
274static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
275{
276 pdesc->word[0] |= CC_CPP_DIN_ADDR;
277
278 pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DIN_SIZE);
279 pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
280
281 pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
282}
283
284/*
252 * Set the DIN field of a HW descriptors to SRAM mode. 285 * Set the DIN field of a HW descriptors to SRAM mode.
253 * Note: No need to check SRAM alignment since host requests do not use SRAM and 286 * Note: No need to check SRAM alignment since host requests do not use SRAM and
254 * adaptor will enforce alignment check. 287 * adaptor will enforce alignment check.
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
index 769458323394..99dc69383e20 100644
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#include <crypto/ctr.h> 4#include <crypto/ctr.h>
5#include "cc_driver.h" 5#include "cc_driver.h"
@@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
154 } 154 }
155 155
156 ivgen_ctx->pool = NULL_SRAM_ADDR; 156 ivgen_ctx->pool = NULL_SRAM_ADDR;
157
158 /* release "this" context */
159 kfree(ivgen_ctx);
160} 157}
161 158
162/*! 159/*!
@@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
174 int rc; 171 int rc;
175 172
176 /* Allocate "this" context */ 173 /* Allocate "this" context */
177 ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL); 174 ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
178 if (!ivgen_ctx) 175 if (!ivgen_ctx)
179 return -ENOMEM; 176 return -ENOMEM;
180 177
178 drvdata->ivgen_handle = ivgen_ctx;
179
181 /* Allocate pool's header for initial enc. key/IV */ 180 /* Allocate pool's header for initial enc. key/IV */
182 ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE, 181 ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
183 &ivgen_ctx->pool_meta_dma, 182 &ivgen_ctx->pool_meta_dma,
@@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
196 goto out; 195 goto out;
197 } 196 }
198 197
199 drvdata->ivgen_handle = ivgen_ctx;
200
201 return cc_init_iv_sram(drvdata); 198 return cc_init_iv_sram(drvdata);
202 199
203out: 200out:
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
index b6ac16903dda..a9f5e8bba4f1 100644
--- a/drivers/crypto/ccree/cc_ivgen.h
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#ifndef __CC_IVGEN_H__ 4#ifndef __CC_IVGEN_H__
5#define __CC_IVGEN_H__ 5#define __CC_IVGEN_H__
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
index 8d7262a35156..582bae450596 100644
--- a/drivers/crypto/ccree/cc_kernel_regs.h
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#ifndef __CC_CRYS_KERNEL_H__ 4#ifndef __CC_CRYS_KERNEL_H__
5#define __CC_CRYS_KERNEL_H__ 5#define __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
index 64b15ac9f1d3..f891ab813f41 100644
--- a/drivers/crypto/ccree/cc_lli_defs.h
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#ifndef _CC_LLI_DEFS_H_ 4#ifndef _CC_LLI_DEFS_H_
5#define _CC_LLI_DEFS_H_ 5#define _CC_LLI_DEFS_H_
@@ -14,7 +14,7 @@
14#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF 14#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
15 15
16#define LLI_MAX_NUM_OF_DATA_ENTRIES 128 16#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
17#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4 17#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 8
18#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */ 18#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
19#define MAX_NUM_OF_BUFFERS_IN_MLLI 4 19#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
20#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \ 20#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6ff7e75ad90e..2dad9c9543c6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/interrupt.h> 5#include <linux/interrupt.h>
@@ -11,6 +11,7 @@
11#include "cc_ivgen.h" 11#include "cc_ivgen.h"
12#include "cc_hash.h" 12#include "cc_hash.h"
13#include "cc_pm.h" 13#include "cc_pm.h"
14#include "cc_fips.h"
14 15
15#define POWER_DOWN_ENABLE 0x01 16#define POWER_DOWN_ENABLE 0x01
16#define POWER_DOWN_DISABLE 0x00 17#define POWER_DOWN_DISABLE 0x00
@@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
25 int rc; 26 int rc;
26 27
27 dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); 28 dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
28 cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
29 rc = cc_suspend_req_queue(drvdata); 29 rc = cc_suspend_req_queue(drvdata);
30 if (rc) { 30 if (rc) {
31 dev_err(dev, "cc_suspend_req_queue (%x)\n", rc); 31 dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
32 return rc; 32 return rc;
33 } 33 }
34 fini_cc_regs(drvdata); 34 fini_cc_regs(drvdata);
35 cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
35 cc_clk_off(drvdata); 36 cc_clk_off(drvdata);
36 return 0; 37 return 0;
37} 38}
@@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
42 struct cc_drvdata *drvdata = dev_get_drvdata(dev); 43 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
43 44
44 dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n"); 45 dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
45 cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); 46 /* Enables the device source clk */
46
47 rc = cc_clk_on(drvdata); 47 rc = cc_clk_on(drvdata);
48 if (rc) { 48 if (rc) {
49 dev_err(dev, "failed getting clock back on. We're toast.\n"); 49 dev_err(dev, "failed getting clock back on. We're toast.\n");
50 return rc; 50 return rc;
51 } 51 }
52 52
53 cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
53 rc = init_cc_regs(drvdata, false); 54 rc = init_cc_regs(drvdata, false);
54 if (rc) { 55 if (rc) {
55 dev_err(dev, "init_cc_regs (%x)\n", rc); 56 dev_err(dev, "init_cc_regs (%x)\n", rc);
56 return rc; 57 return rc;
57 } 58 }
59 /* check if tee fips error occurred during power down */
60 cc_tee_handle_fips_error(drvdata);
58 61
59 rc = cc_resume_req_queue(drvdata); 62 rc = cc_resume_req_queue(drvdata);
60 if (rc) { 63 if (rc) {
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 907a6db4d6c0..6190cdba5dad 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4/* \file cc_pm.h 4/* \file cc_pm.h
5 */ 5 */
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 83a8aaae61c7..0bc6ccb0b899 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -1,7 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/nospec.h>
5#include "cc_driver.h" 6#include "cc_driver.h"
6#include "cc_buffer_mgr.h" 7#include "cc_buffer_mgr.h"
7#include "cc_request_mgr.h" 8#include "cc_request_mgr.h"
@@ -52,11 +53,38 @@ struct cc_bl_item {
52 bool notif; 53 bool notif;
53}; 54};
54 55
56static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
57 { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
58 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
59 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
60 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
61 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
62 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
63 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
64 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
65 { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
66 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
67 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
68 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
69 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
70 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
71 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
72 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
73};
74
55static void comp_handler(unsigned long devarg); 75static void comp_handler(unsigned long devarg);
56#ifdef COMP_IN_WQ 76#ifdef COMP_IN_WQ
57static void comp_work_handler(struct work_struct *work); 77static void comp_work_handler(struct work_struct *work);
58#endif 78#endif
59 79
80static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
81{
82 alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
83 slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
84
85 return cc_cpp_int_masks[alg][slot];
86}
87
60void cc_req_mgr_fini(struct cc_drvdata *drvdata) 88void cc_req_mgr_fini(struct cc_drvdata *drvdata)
61{ 89{
62 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; 90 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
@@ -336,10 +364,12 @@ static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
336 struct cc_bl_item *bli) 364 struct cc_bl_item *bli)
337{ 365{
338 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 366 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
367 struct device *dev = drvdata_to_dev(drvdata);
339 368
340 spin_lock_bh(&mgr->bl_lock); 369 spin_lock_bh(&mgr->bl_lock);
341 list_add_tail(&bli->list, &mgr->backlog); 370 list_add_tail(&bli->list, &mgr->backlog);
342 ++mgr->bl_len; 371 ++mgr->bl_len;
372 dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
343 spin_unlock_bh(&mgr->bl_lock); 373 spin_unlock_bh(&mgr->bl_lock);
344 tasklet_schedule(&mgr->comptask); 374 tasklet_schedule(&mgr->comptask);
345} 375}
@@ -349,7 +379,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
349 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 379 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
350 struct cc_bl_item *bli; 380 struct cc_bl_item *bli;
351 struct cc_crypto_req *creq; 381 struct cc_crypto_req *creq;
352 struct crypto_async_request *req; 382 void *req;
353 bool ivgen; 383 bool ivgen;
354 unsigned int total_len; 384 unsigned int total_len;
355 struct device *dev = drvdata_to_dev(drvdata); 385 struct device *dev = drvdata_to_dev(drvdata);
@@ -359,17 +389,20 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
359 389
360 while (mgr->bl_len) { 390 while (mgr->bl_len) {
361 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list); 391 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
392 dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
393
362 spin_unlock(&mgr->bl_lock); 394 spin_unlock(&mgr->bl_lock);
363 395
396
364 creq = &bli->creq; 397 creq = &bli->creq;
365 req = (struct crypto_async_request *)creq->user_arg; 398 req = creq->user_arg;
366 399
367 /* 400 /*
368 * Notify the request we're moving out of the backlog 401 * Notify the request we're moving out of the backlog
369 * but only if we haven't done so already. 402 * but only if we haven't done so already.
370 */ 403 */
371 if (!bli->notif) { 404 if (!bli->notif) {
372 req->complete(req, -EINPROGRESS); 405 creq->user_cb(dev, req, -EINPROGRESS);
373 bli->notif = true; 406 bli->notif = true;
374 } 407 }
375 408
@@ -579,6 +612,8 @@ static void proc_completions(struct cc_drvdata *drvdata)
579 drvdata->request_mgr_handle; 612 drvdata->request_mgr_handle;
580 unsigned int *tail = &request_mgr_handle->req_queue_tail; 613 unsigned int *tail = &request_mgr_handle->req_queue_tail;
581 unsigned int *head = &request_mgr_handle->req_queue_head; 614 unsigned int *head = &request_mgr_handle->req_queue_head;
615 int rc;
616 u32 mask;
582 617
583 while (request_mgr_handle->axi_completed) { 618 while (request_mgr_handle->axi_completed) {
584 request_mgr_handle->axi_completed--; 619 request_mgr_handle->axi_completed--;
@@ -596,8 +631,22 @@ static void proc_completions(struct cc_drvdata *drvdata)
596 631
597 cc_req = &request_mgr_handle->req_queue[*tail]; 632 cc_req = &request_mgr_handle->req_queue[*tail];
598 633
634 if (cc_req->cpp.is_cpp) {
635
636 dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
637 cc_req->cpp.slot, cc_req->cpp.alg);
638 mask = cc_cpp_int_mask(cc_req->cpp.alg,
639 cc_req->cpp.slot);
640 rc = (drvdata->irq & mask ? -EPERM : 0);
641 dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
642 drvdata->irq, rc);
643 } else {
644 dev_dbg(dev, "None CPP request completion\n");
645 rc = 0;
646 }
647
599 if (cc_req->user_cb) 648 if (cc_req->user_cb)
600 cc_req->user_cb(dev, cc_req->user_arg, 0); 649 cc_req->user_cb(dev, cc_req->user_arg, rc);
601 *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); 650 *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
602 dev_dbg(dev, "Dequeue request tail=%u\n", *tail); 651 dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
603 dev_dbg(dev, "Request completed. axi_completed=%d\n", 652 dev_dbg(dev, "Request completed. axi_completed=%d\n",
@@ -618,47 +667,50 @@ static void comp_handler(unsigned long devarg)
618 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; 667 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
619 struct cc_req_mgr_handle *request_mgr_handle = 668 struct cc_req_mgr_handle *request_mgr_handle =
620 drvdata->request_mgr_handle; 669 drvdata->request_mgr_handle;
621 670 struct device *dev = drvdata_to_dev(drvdata);
622 u32 irq; 671 u32 irq;
623 672
624 irq = (drvdata->irq & CC_COMP_IRQ_MASK); 673 dev_dbg(dev, "Completion handler called!\n");
674 irq = (drvdata->irq & drvdata->comp_mask);
625 675
626 if (irq & CC_COMP_IRQ_MASK) { 676 /* To avoid the interrupt from firing as we unmask it,
627 /* To avoid the interrupt from firing as we unmask it, 677 * we clear it now
628 * we clear it now 678 */
629 */ 679 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
630 cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
631 680
632 /* Avoid race with above clear: Test completion counter 681 /* Avoid race with above clear: Test completion counter once more */
633 * once more
634 */
635 request_mgr_handle->axi_completed +=
636 cc_axi_comp_count(drvdata);
637
638 while (request_mgr_handle->axi_completed) {
639 do {
640 proc_completions(drvdata);
641 /* At this point (after proc_completions()),
642 * request_mgr_handle->axi_completed is 0.
643 */
644 request_mgr_handle->axi_completed =
645 cc_axi_comp_count(drvdata);
646 } while (request_mgr_handle->axi_completed > 0);
647 682
648 cc_iowrite(drvdata, CC_REG(HOST_ICR), 683 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
649 CC_COMP_IRQ_MASK); 684
685 dev_dbg(dev, "AXI completion after updated: %d\n",
686 request_mgr_handle->axi_completed);
687
688 while (request_mgr_handle->axi_completed) {
689 do {
690 drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
691 irq = (drvdata->irq & drvdata->comp_mask);
692 proc_completions(drvdata);
650 693
694 /* At this point (after proc_completions()),
695 * request_mgr_handle->axi_completed is 0.
696 */
651 request_mgr_handle->axi_completed += 697 request_mgr_handle->axi_completed +=
652 cc_axi_comp_count(drvdata); 698 cc_axi_comp_count(drvdata);
653 } 699 } while (request_mgr_handle->axi_completed > 0);
700
701 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
702
703 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
654 } 704 }
705
655 /* after verifing that there is nothing to do, 706 /* after verifing that there is nothing to do,
656 * unmask AXI completion interrupt 707 * unmask AXI completion interrupt
657 */ 708 */
658 cc_iowrite(drvdata, CC_REG(HOST_IMR), 709 cc_iowrite(drvdata, CC_REG(HOST_IMR),
659 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq); 710 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
660 711
661 cc_proc_backlog(drvdata); 712 cc_proc_backlog(drvdata);
713 dev_dbg(dev, "Comp. handler done.\n");
662} 714}
663 715
664/* 716/*
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index 573cb97af085..f46cf766fe4d 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4/* \file cc_request_mgr.h 4/* \file cc_request_mgr.h
5 * Request Manager 5 * Request Manager
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index c8c276f6dee9..62c885e6e791 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#include "cc_driver.h" 4#include "cc_driver.h"
5#include "cc_sram_mgr.h" 5#include "cc_sram_mgr.h"
@@ -19,8 +19,7 @@ struct cc_sram_ctx {
19 */ 19 */
20void cc_sram_mgr_fini(struct cc_drvdata *drvdata) 20void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
21{ 21{
22 /* Free "this" context */ 22 /* Nothing needed */
23 kfree(drvdata->sram_mgr_handle);
24} 23}
25 24
26/** 25/**
@@ -48,7 +47,7 @@ int cc_sram_mgr_init(struct cc_drvdata *drvdata)
48 } 47 }
49 48
50 /* Allocate "this" context */ 49 /* Allocate "this" context */
51 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 50 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
52 51
53 if (!ctx) 52 if (!ctx)
54 return -ENOMEM; 53 return -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
index d48649fb3323..1d14de9ee8c3 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.h
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 3
4#ifndef __CC_SRAM_MGR_H__ 4#ifndef __CC_SRAM_MGR_H__
5#define __CC_SRAM_MGR_H__ 5#define __CC_SRAM_MGR_H__
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 8d8cf80b9294..8a76fce22943 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2130,7 +2130,6 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2130 * ipad in hmacctx->ipad and opad in hmacctx->opad location 2130 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2131 */ 2131 */
2132 shash->tfm = hmacctx->base_hash; 2132 shash->tfm = hmacctx->base_hash;
2133 shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
2134 if (keylen > bs) { 2133 if (keylen > bs) {
2135 err = crypto_shash_digest(shash, key, keylen, 2134 err = crypto_shash_digest(shash, key, keylen,
2136 hmacctx->ipad); 2135 hmacctx->ipad);
@@ -3517,7 +3516,6 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3517 SHASH_DESC_ON_STACK(shash, base_hash); 3516 SHASH_DESC_ON_STACK(shash, base_hash);
3518 3517
3519 shash->tfm = base_hash; 3518 shash->tfm = base_hash;
3520 shash->flags = crypto_shash_get_flags(base_hash);
3521 bs = crypto_shash_blocksize(base_hash); 3519 bs = crypto_shash_blocksize(base_hash);
3522 align = KEYCTX_ALIGN_PAD(max_authsize); 3520 align = KEYCTX_ALIGN_PAD(max_authsize);
3523 o_ptr = actx->h_iopad + param.result_size + align; 3521 o_ptr = actx->h_iopad + param.result_size + align;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index dad212cabe63..d656be0a142b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1976,6 +1976,29 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1976 return 0; 1976 return 0;
1977} 1977}
1978 1978
1979static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1980 unsigned int len)
1981{
1982 struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
1983 struct hifn_device *dev = ctx->dev;
1984 u32 flags;
1985 int err;
1986
1987 flags = crypto_ablkcipher_get_flags(cipher);
1988 err = __des3_verify_key(&flags, key);
1989 if (unlikely(err)) {
1990 crypto_ablkcipher_set_flags(cipher, flags);
1991 return err;
1992 }
1993
1994 dev->flags &= ~HIFN_FLAG_OLD_KEY;
1995
1996 memcpy(ctx->key, key, len);
1997 ctx->keysize = len;
1998
1999 return 0;
2000}
2001
1979static int hifn_handle_req(struct ablkcipher_request *req) 2002static int hifn_handle_req(struct ablkcipher_request *req)
1980{ 2003{
1981 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm); 2004 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -2240,7 +2263,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2240 .ablkcipher = { 2263 .ablkcipher = {
2241 .min_keysize = HIFN_3DES_KEY_LENGTH, 2264 .min_keysize = HIFN_3DES_KEY_LENGTH,
2242 .max_keysize = HIFN_3DES_KEY_LENGTH, 2265 .max_keysize = HIFN_3DES_KEY_LENGTH,
2243 .setkey = hifn_setkey, 2266 .setkey = hifn_des3_setkey,
2244 .encrypt = hifn_encrypt_3des_cfb, 2267 .encrypt = hifn_encrypt_3des_cfb,
2245 .decrypt = hifn_decrypt_3des_cfb, 2268 .decrypt = hifn_decrypt_3des_cfb,
2246 }, 2269 },
@@ -2250,7 +2273,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2250 .ablkcipher = { 2273 .ablkcipher = {
2251 .min_keysize = HIFN_3DES_KEY_LENGTH, 2274 .min_keysize = HIFN_3DES_KEY_LENGTH,
2252 .max_keysize = HIFN_3DES_KEY_LENGTH, 2275 .max_keysize = HIFN_3DES_KEY_LENGTH,
2253 .setkey = hifn_setkey, 2276 .setkey = hifn_des3_setkey,
2254 .encrypt = hifn_encrypt_3des_ofb, 2277 .encrypt = hifn_encrypt_3des_ofb,
2255 .decrypt = hifn_decrypt_3des_ofb, 2278 .decrypt = hifn_decrypt_3des_ofb,
2256 }, 2279 },
@@ -2261,7 +2284,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2261 .ivsize = HIFN_IV_LENGTH, 2284 .ivsize = HIFN_IV_LENGTH,
2262 .min_keysize = HIFN_3DES_KEY_LENGTH, 2285 .min_keysize = HIFN_3DES_KEY_LENGTH,
2263 .max_keysize = HIFN_3DES_KEY_LENGTH, 2286 .max_keysize = HIFN_3DES_KEY_LENGTH,
2264 .setkey = hifn_setkey, 2287 .setkey = hifn_des3_setkey,
2265 .encrypt = hifn_encrypt_3des_cbc, 2288 .encrypt = hifn_encrypt_3des_cbc,
2266 .decrypt = hifn_decrypt_3des_cbc, 2289 .decrypt = hifn_decrypt_3des_cbc,
2267 }, 2290 },
@@ -2271,7 +2294,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
2271 .ablkcipher = { 2294 .ablkcipher = {
2272 .min_keysize = HIFN_3DES_KEY_LENGTH, 2295 .min_keysize = HIFN_3DES_KEY_LENGTH,
2273 .max_keysize = HIFN_3DES_KEY_LENGTH, 2296 .max_keysize = HIFN_3DES_KEY_LENGTH,
2274 .setkey = hifn_setkey, 2297 .setkey = hifn_des3_setkey,
2275 .encrypt = hifn_encrypt_3des_ecb, 2298 .encrypt = hifn_encrypt_3des_ecb,
2276 .decrypt = hifn_decrypt_3des_ecb, 2299 .decrypt = hifn_decrypt_3des_ecb,
2277 }, 2300 },
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index adc0cd8ae97b..02768af0dccd 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -365,20 +365,16 @@ static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
365static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, 365static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
366 const u8 *key, unsigned int keylen) 366 const u8 *key, unsigned int keylen)
367{ 367{
368 if (keylen != DES_KEY_SIZE * 3) 368 return unlikely(des3_verify_key(tfm, key)) ?:
369 return -EINVAL; 369 sec_alg_skcipher_setkey(tfm, key, keylen,
370
371 return sec_alg_skcipher_setkey(tfm, key, keylen,
372 SEC_C_3DES_ECB_192_3KEY); 370 SEC_C_3DES_ECB_192_3KEY);
373} 371}
374 372
375static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm, 373static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
376 const u8 *key, unsigned int keylen) 374 const u8 *key, unsigned int keylen)
377{ 375{
378 if (keylen != DES3_EDE_KEY_SIZE) 376 return unlikely(des3_verify_key(tfm, key)) ?:
379 return -EINVAL; 377 sec_alg_skcipher_setkey(tfm, key, keylen,
380
381 return sec_alg_skcipher_setkey(tfm, key, keylen,
382 SEC_C_3DES_CBC_192_3KEY); 378 SEC_C_3DES_CBC_192_3KEY);
383} 379}
384 380
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 7ef30a98cb24..de4be10b172f 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1039,13 +1039,12 @@ static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
1039static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm, 1039static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
1040 const u8 *key, unsigned int len) 1040 const u8 *key, unsigned int len)
1041{ 1041{
1042 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); 1042 struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
1043 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 1043 int err;
1044 1044
1045 if (len != DES3_EDE_KEY_SIZE) { 1045 err = des3_verify_key(ctfm, key);
1046 crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1046 if (unlikely(err))
1047 return -EINVAL; 1047 return err;
1048 }
1049 1048
1050 /* if context exits and key changed, need to invalidate it */ 1049 /* if context exits and key changed, need to invalidate it */
1051 if (ctx->base.ctxr_dma) { 1050 if (ctx->base.ctxr_dma) {
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 5c4659b04d70..9bbde2f26cac 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -758,14 +758,6 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
758 return -EINVAL; 758 return -EINVAL;
759 } 759 }
760 cipher_cfg |= keylen_cfg; 760 cipher_cfg |= keylen_cfg;
761 } else if (cipher_cfg & MOD_3DES) {
762 const u32 *K = (const u32 *)key;
763 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
764 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
765 {
766 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
767 return -EINVAL;
768 }
769 } else { 761 } else {
770 u32 tmp[DES_EXPKEY_WORDS]; 762 u32 tmp[DES_EXPKEY_WORDS];
771 if (des_ekey(tmp, key) == 0) { 763 if (des_ekey(tmp, key) == 0) {
@@ -859,6 +851,19 @@ out:
859 return ret; 851 return ret;
860} 852}
861 853
854static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
855 unsigned int key_len)
856{
857 u32 flags = crypto_ablkcipher_get_flags(tfm);
858 int err;
859
860 err = __des3_verify_key(&flags, key);
861 if (unlikely(err))
862 crypto_ablkcipher_set_flags(tfm, flags);
863
864 return ablk_setkey(tfm, key, key_len);
865}
866
862static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 867static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
863 unsigned int key_len) 868 unsigned int key_len)
864{ 869{
@@ -1175,6 +1180,43 @@ badkey:
1175 return -EINVAL; 1180 return -EINVAL;
1176} 1181}
1177 1182
1183static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1184 unsigned int keylen)
1185{
1186 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1187 u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
1188 struct crypto_authenc_keys keys;
1189 int err;
1190
1191 err = crypto_authenc_extractkeys(&keys, key, keylen);
1192 if (unlikely(err))
1193 goto badkey;
1194
1195 err = -EINVAL;
1196 if (keys.authkeylen > sizeof(ctx->authkey))
1197 goto badkey;
1198
1199 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
1200 goto badkey;
1201
1202 flags = crypto_aead_get_flags(tfm);
1203 err = __des3_verify_key(&flags, keys.enckey);
1204 if (unlikely(err))
1205 goto badkey;
1206
1207 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1208 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1209 ctx->authkey_len = keys.authkeylen;
1210 ctx->enckey_len = keys.enckeylen;
1211
1212 memzero_explicit(&keys, sizeof(keys));
1213 return aead_setup(tfm, crypto_aead_authsize(tfm));
1214badkey:
1215 crypto_aead_set_flags(tfm, flags);
1216 memzero_explicit(&keys, sizeof(keys));
1217 return err;
1218}
1219
1178static int aead_encrypt(struct aead_request *req) 1220static int aead_encrypt(struct aead_request *req)
1179{ 1221{
1180 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv); 1222 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
@@ -1220,6 +1262,7 @@ static struct ixp_alg ixp4xx_algos[] = {
1220 .min_keysize = DES3_EDE_KEY_SIZE, 1262 .min_keysize = DES3_EDE_KEY_SIZE,
1221 .max_keysize = DES3_EDE_KEY_SIZE, 1263 .max_keysize = DES3_EDE_KEY_SIZE,
1222 .ivsize = DES3_EDE_BLOCK_SIZE, 1264 .ivsize = DES3_EDE_BLOCK_SIZE,
1265 .setkey = ablk_des3_setkey,
1223 } 1266 }
1224 } 1267 }
1225 }, 1268 },
@@ -1232,6 +1275,7 @@ static struct ixp_alg ixp4xx_algos[] = {
1232 .cra_u = { .ablkcipher = { 1275 .cra_u = { .ablkcipher = {
1233 .min_keysize = DES3_EDE_KEY_SIZE, 1276 .min_keysize = DES3_EDE_KEY_SIZE,
1234 .max_keysize = DES3_EDE_KEY_SIZE, 1277 .max_keysize = DES3_EDE_KEY_SIZE,
1278 .setkey = ablk_des3_setkey,
1235 } 1279 }
1236 } 1280 }
1237 }, 1281 },
@@ -1313,6 +1357,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
1313 }, 1357 },
1314 .ivsize = DES3_EDE_BLOCK_SIZE, 1358 .ivsize = DES3_EDE_BLOCK_SIZE,
1315 .maxauthsize = MD5_DIGEST_SIZE, 1359 .maxauthsize = MD5_DIGEST_SIZE,
1360 .setkey = des3_aead_setkey,
1316 }, 1361 },
1317 .hash = &hash_alg_md5, 1362 .hash = &hash_alg_md5,
1318 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1363 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1337,6 +1382,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
1337 }, 1382 },
1338 .ivsize = DES3_EDE_BLOCK_SIZE, 1383 .ivsize = DES3_EDE_BLOCK_SIZE,
1339 .maxauthsize = SHA1_DIGEST_SIZE, 1384 .maxauthsize = SHA1_DIGEST_SIZE,
1385 .setkey = des3_aead_setkey,
1340 }, 1386 },
1341 .hash = &hash_alg_sha1, 1387 .hash = &hash_alg_sha1,
1342 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1388 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1443,7 +1489,7 @@ static int __init ixp_module_init(void)
1443 /* authenc */ 1489 /* authenc */
1444 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1490 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1445 CRYPTO_ALG_ASYNC; 1491 CRYPTO_ALG_ASYNC;
1446 cra->setkey = aead_setkey; 1492 cra->setkey = cra->setkey ?: aead_setkey;
1447 cra->setauthsize = aead_setauthsize; 1493 cra->setauthsize = aead_setauthsize;
1448 cra->encrypt = aead_encrypt; 1494 cra->encrypt = aead_encrypt;
1449 cra->decrypt = aead_decrypt; 1495 cra->decrypt = aead_decrypt;
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index fb279b3a1ca1..2fd936b19c6d 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -299,13 +299,12 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
299static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, 299static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
300 const u8 *key, unsigned int len) 300 const u8 *key, unsigned int len)
301{ 301{
302 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 302 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
303 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 303 int err;
304 304
305 if (len != DES3_EDE_KEY_SIZE) { 305 err = des3_verify_key(cipher, key);
306 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 306 if (unlikely(err))
307 return -EINVAL; 307 return err;
308 }
309 308
310 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); 309 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
311 310
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 99ff54cc8a15..fd456dd703bf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -135,11 +135,10 @@ static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
135 135
136static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 136static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
137{ 137{
138 unsigned int index, padlen; 138 unsigned int padlen;
139 139
140 buf[0] = 0x80; 140 buf[0] = 0x80;
141 /* Pad out to 56 mod 64 */ 141 /* Pad out to 56 mod 64 */
142 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
143 padlen = mv_cesa_ahash_pad_len(creq); 142 padlen = mv_cesa_ahash_pad_len(creq);
144 memset(buf + 1, 0, padlen - 1); 143 memset(buf + 1, 0, padlen - 1);
145 144
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 5f4f845adbb8..a0806ba40c68 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -365,7 +365,6 @@ static int mtk_sha_finish_hmac(struct ahash_request *req)
365 SHASH_DESC_ON_STACK(shash, bctx->shash); 365 SHASH_DESC_ON_STACK(shash, bctx->shash);
366 366
367 shash->tfm = bctx->shash; 367 shash->tfm = bctx->shash;
368 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
369 368
370 return crypto_shash_init(shash) ?: 369 return crypto_shash_init(shash) ?:
371 crypto_shash_update(shash, bctx->opad, ctx->bs) ?: 370 crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
@@ -810,8 +809,6 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
810 SHASH_DESC_ON_STACK(shash, bctx->shash); 809 SHASH_DESC_ON_STACK(shash, bctx->shash);
811 810
812 shash->tfm = bctx->shash; 811 shash->tfm = bctx->shash;
813 shash->flags = crypto_shash_get_flags(bctx->shash) &
814 CRYPTO_TFM_REQ_MAY_SLEEP;
815 812
816 if (keylen > bs) { 813 if (keylen > bs) {
817 err = crypto_shash_digest(shash, key, keylen, bctx->ipad); 814 err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
deleted file mode 100644
index 519086730791..000000000000
--- a/drivers/crypto/mxc-scc.c
+++ /dev/null
@@ -1,767 +0,0 @@
1/*
2 * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
3 *
4 * The driver is based on information gathered from
5 * drivers/mxc/security/mxc_scc.c which can be found in
6 * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18#include <linux/clk.h>
19#include <linux/crypto.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/mutex.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/platform_device.h>
29
30#include <crypto/algapi.h>
31#include <crypto/des.h>
32
33/* Secure Memory (SCM) registers */
34#define SCC_SCM_RED_START 0x0000
35#define SCC_SCM_BLACK_START 0x0004
36#define SCC_SCM_LENGTH 0x0008
37#define SCC_SCM_CTRL 0x000C
38#define SCC_SCM_STATUS 0x0010
39#define SCC_SCM_ERROR_STATUS 0x0014
40#define SCC_SCM_INTR_CTRL 0x0018
41#define SCC_SCM_CFG 0x001C
42#define SCC_SCM_INIT_VECTOR_0 0x0020
43#define SCC_SCM_INIT_VECTOR_1 0x0024
44#define SCC_SCM_RED_MEMORY 0x0400
45#define SCC_SCM_BLACK_MEMORY 0x0800
46
47/* Security Monitor (SMN) Registers */
48#define SCC_SMN_STATUS 0x1000
49#define SCC_SMN_COMMAND 0x1004
50#define SCC_SMN_SEQ_START 0x1008
51#define SCC_SMN_SEQ_END 0x100C
52#define SCC_SMN_SEQ_CHECK 0x1010
53#define SCC_SMN_BIT_COUNT 0x1014
54#define SCC_SMN_BITBANK_INC_SIZE 0x1018
55#define SCC_SMN_BITBANK_DECREMENT 0x101C
56#define SCC_SMN_COMPARE_SIZE 0x1020
57#define SCC_SMN_PLAINTEXT_CHECK 0x1024
58#define SCC_SMN_CIPHERTEXT_CHECK 0x1028
59#define SCC_SMN_TIMER_IV 0x102C
60#define SCC_SMN_TIMER_CONTROL 0x1030
61#define SCC_SMN_DEBUG_DETECT_STAT 0x1034
62#define SCC_SMN_TIMER 0x1038
63
64#define SCC_SCM_CTRL_START_CIPHER BIT(2)
65#define SCC_SCM_CTRL_CBC_MODE BIT(1)
66#define SCC_SCM_CTRL_DECRYPT_MODE BIT(0)
67
68#define SCC_SCM_STATUS_LEN_ERR BIT(12)
69#define SCC_SCM_STATUS_SMN_UNBLOCKED BIT(11)
70#define SCC_SCM_STATUS_CIPHERING_DONE BIT(10)
71#define SCC_SCM_STATUS_ZEROIZING_DONE BIT(9)
72#define SCC_SCM_STATUS_INTR_STATUS BIT(8)
73#define SCC_SCM_STATUS_SEC_KEY BIT(7)
74#define SCC_SCM_STATUS_INTERNAL_ERR BIT(6)
75#define SCC_SCM_STATUS_BAD_SEC_KEY BIT(5)
76#define SCC_SCM_STATUS_ZEROIZE_FAIL BIT(4)
77#define SCC_SCM_STATUS_SMN_BLOCKED BIT(3)
78#define SCC_SCM_STATUS_CIPHERING BIT(2)
79#define SCC_SCM_STATUS_ZEROIZING BIT(1)
80#define SCC_SCM_STATUS_BUSY BIT(0)
81
82#define SCC_SMN_STATUS_STATE_MASK 0x0000001F
83#define SCC_SMN_STATE_START 0x0
84/* The SMN is zeroizing its RAM during reset */
85#define SCC_SMN_STATE_ZEROIZE_RAM 0x5
86/* SMN has passed internal checks */
87#define SCC_SMN_STATE_HEALTH_CHECK 0x6
88/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
89#define SCC_SMN_STATE_FAIL 0x9
90/* SCC is in secure state. SCM is using secret key. */
91#define SCC_SMN_STATE_SECURE 0xA
92/* SCC is not secure. SCM is using default key. */
93#define SCC_SMN_STATE_NON_SECURE 0xC
94
95#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM BIT(2)
96#define SCC_SCM_INTR_CTRL_CLR_INTR BIT(1)
97#define SCC_SCM_INTR_CTRL_MASK_INTR BIT(0)
98
99/* Size, in blocks, of Red memory. */
100#define SCC_SCM_CFG_BLACK_SIZE_MASK 0x07fe0000
101#define SCC_SCM_CFG_BLACK_SIZE_SHIFT 17
102/* Size, in blocks, of Black memory. */
103#define SCC_SCM_CFG_RED_SIZE_MASK 0x0001ff80
104#define SCC_SCM_CFG_RED_SIZE_SHIFT 7
105/* Number of bytes per block. */
106#define SCC_SCM_CFG_BLOCK_SIZE_MASK 0x0000007f
107
108#define SCC_SMN_COMMAND_TAMPER_LOCK BIT(4)
109#define SCC_SMN_COMMAND_CLR_INTR BIT(3)
110#define SCC_SMN_COMMAND_CLR_BIT_BANK BIT(2)
111#define SCC_SMN_COMMAND_EN_INTR BIT(1)
112#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM BIT(0)
113
114#define SCC_KEY_SLOTS 20
115#define SCC_MAX_KEY_SIZE 32
116#define SCC_KEY_SLOT_SIZE 32
117
118#define SCC_CRC_CCITT_START 0xFFFF
119
120/*
121 * Offset into each RAM of the base of the area which is not
122 * used for Stored Keys.
123 */
124#define SCC_NON_RESERVED_OFFSET (SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
125
126/* Fixed padding for appending to plaintext to fill out a block */
127static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
128
129enum mxc_scc_state {
130 SCC_STATE_OK,
131 SCC_STATE_UNIMPLEMENTED,
132 SCC_STATE_FAILED
133};
134
135struct mxc_scc {
136 struct device *dev;
137 void __iomem *base;
138 struct clk *clk;
139 bool hw_busy;
140 spinlock_t lock;
141 struct crypto_queue queue;
142 struct crypto_async_request *req;
143 int block_size_bytes;
144 int black_ram_size_blocks;
145 int memory_size_bytes;
146 int bytes_remaining;
147
148 void __iomem *red_memory;
149 void __iomem *black_memory;
150};
151
152struct mxc_scc_ctx {
153 struct mxc_scc *scc;
154 struct scatterlist *sg_src;
155 size_t src_nents;
156 struct scatterlist *sg_dst;
157 size_t dst_nents;
158 unsigned int offset;
159 unsigned int size;
160 unsigned int ctrl;
161};
162
163struct mxc_scc_crypto_tmpl {
164 struct mxc_scc *scc;
165 struct crypto_alg alg;
166};
167
168static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
169 struct crypto_async_request *req)
170{
171 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
172 struct mxc_scc *scc = ctx->scc;
173 size_t len;
174 void __iomem *from;
175
176 if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
177 from = scc->red_memory;
178 else
179 from = scc->black_memory;
180
181 dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
182 ctx->dst_nents * 8);
183 len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
184 from, ctx->size, ctx->offset);
185 if (!len) {
186 dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
187 return -EINVAL;
188 }
189
190#ifdef DEBUG
191 print_hex_dump(KERN_ERR,
192 "red memory@"__stringify(__LINE__)": ",
193 DUMP_PREFIX_ADDRESS, 16, 4,
194 scc->red_memory, ctx->size, 1);
195 print_hex_dump(KERN_ERR,
196 "black memory@"__stringify(__LINE__)": ",
197 DUMP_PREFIX_ADDRESS, 16, 4,
198 scc->black_memory, ctx->size, 1);
199#endif
200
201 ctx->offset += len;
202
203 if (ctx->offset < ablkreq->nbytes)
204 return -EINPROGRESS;
205
206 return 0;
207}
208
209static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
210 struct mxc_scc_ctx *ctx)
211{
212 struct mxc_scc *scc = ctx->scc;
213 int nents;
214
215 nents = sg_nents_for_len(req->src, req->nbytes);
216 if (nents < 0) {
217 dev_err(scc->dev, "Invalid number of src SC");
218 return nents;
219 }
220 ctx->src_nents = nents;
221
222 nents = sg_nents_for_len(req->dst, req->nbytes);
223 if (nents < 0) {
224 dev_err(scc->dev, "Invalid number of dst SC");
225 return nents;
226 }
227 ctx->dst_nents = nents;
228
229 ctx->size = 0;
230 ctx->offset = 0;
231
232 return 0;
233}
234
235static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
236 struct mxc_scc_ctx *ctx,
237 int result)
238{
239 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
240 struct mxc_scc *scc = ctx->scc;
241
242 scc->req = NULL;
243 scc->bytes_remaining = scc->memory_size_bytes;
244
245 if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
246 memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
247 scc->block_size_bytes);
248
249 req->complete(req, result);
250 scc->hw_busy = false;
251
252 return 0;
253}
254
255static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
256 struct ablkcipher_request *req)
257{
258 u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
259 size_t len = min_t(size_t, req->nbytes - ctx->offset,
260 ctx->scc->bytes_remaining);
261 unsigned int padding_byte_count = 0;
262 struct mxc_scc *scc = ctx->scc;
263 void __iomem *to;
264
265 if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
266 to = scc->black_memory;
267 else
268 to = scc->red_memory;
269
270 if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
271 memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
272 scc->block_size_bytes);
273
274 len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
275 to, len, ctx->offset);
276 if (!len) {
277 dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
278 return -EINVAL;
279 }
280
281 ctx->size = len;
282
283#ifdef DEBUG
284 dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
285 print_hex_dump(KERN_ERR,
286 "init vector0@"__stringify(__LINE__)": ",
287 DUMP_PREFIX_ADDRESS, 16, 4,
288 scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
289 1);
290 print_hex_dump(KERN_ERR,
291 "red memory@"__stringify(__LINE__)": ",
292 DUMP_PREFIX_ADDRESS, 16, 4,
293 scc->red_memory, ctx->size, 1);
294 print_hex_dump(KERN_ERR,
295 "black memory@"__stringify(__LINE__)": ",
296 DUMP_PREFIX_ADDRESS, 16, 4,
297 scc->black_memory, ctx->size, 1);
298#endif
299
300 scc->bytes_remaining -= len;
301
302 padding_byte_count = len % scc->block_size_bytes;
303
304 if (padding_byte_count) {
305 memcpy(padding_buffer, scc_block_padding, padding_byte_count);
306 memcpy(to + len, padding_buffer, padding_byte_count);
307 ctx->size += padding_byte_count;
308 }
309
310#ifdef DEBUG
311 print_hex_dump(KERN_ERR,
312 "data to encrypt@"__stringify(__LINE__)": ",
313 DUMP_PREFIX_ADDRESS, 16, 4,
314 to, ctx->size, 1);
315#endif
316
317 return 0;
318}
319
320static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
321 struct crypto_async_request *req)
322{
323 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
324 struct mxc_scc *scc = ctx->scc;
325 int err;
326
327 dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
328 ablkreq->nbytes, ablkreq->src, ablkreq->dst);
329
330 writel(0, scc->base + SCC_SCM_ERROR_STATUS);
331
332 err = mxc_scc_put_data(ctx, ablkreq);
333 if (err) {
334 mxc_scc_ablkcipher_req_complete(req, ctx, err);
335 return;
336 }
337
338 dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
339 readl(scc->base + SCC_SCM_RED_START),
340 readl(scc->base + SCC_SCM_BLACK_START));
341
342 /* clear interrupt control registers */
343 writel(SCC_SCM_INTR_CTRL_CLR_INTR,
344 scc->base + SCC_SCM_INTR_CTRL);
345
346 writel((ctx->size / ctx->scc->block_size_bytes) - 1,
347 scc->base + SCC_SCM_LENGTH);
348
349 dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
350 ctx->size / ctx->scc->block_size_bytes,
351 (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
352 scc->red_memory);
353
354 writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
355}
356
357static irqreturn_t mxc_scc_int(int irq, void *priv)
358{
359 struct crypto_async_request *req;
360 struct mxc_scc_ctx *ctx;
361 struct mxc_scc *scc = priv;
362 int status;
363 int ret;
364
365 status = readl(scc->base + SCC_SCM_STATUS);
366
367 /* clear interrupt control registers */
368 writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
369
370 if (status & SCC_SCM_STATUS_BUSY)
371 return IRQ_NONE;
372
373 req = scc->req;
374 if (req) {
375 ctx = crypto_tfm_ctx(req->tfm);
376 ret = mxc_scc_get_data(ctx, req);
377 if (ret != -EINPROGRESS)
378 mxc_scc_ablkcipher_req_complete(req, ctx, ret);
379 else
380 mxc_scc_ablkcipher_next(ctx, req);
381 }
382
383 return IRQ_HANDLED;
384}
385
386static int mxc_scc_cra_init(struct crypto_tfm *tfm)
387{
388 struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
389 struct crypto_alg *alg = tfm->__crt_alg;
390 struct mxc_scc_crypto_tmpl *algt;
391
392 algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
393
394 ctx->scc = algt->scc;
395 return 0;
396}
397
398static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
399{
400 struct crypto_async_request *req, *backlog;
401
402 if (ctx->scc->hw_busy)
403 return;
404
405 spin_lock_bh(&ctx->scc->lock);
406 backlog = crypto_get_backlog(&ctx->scc->queue);
407 req = crypto_dequeue_request(&ctx->scc->queue);
408 ctx->scc->req = req;
409 ctx->scc->hw_busy = true;
410 spin_unlock_bh(&ctx->scc->lock);
411
412 if (!req)
413 return;
414
415 if (backlog)
416 backlog->complete(backlog, -EINPROGRESS);
417
418 mxc_scc_ablkcipher_next(ctx, req);
419}
420
421static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
422 struct crypto_async_request *req)
423{
424 int ret;
425
426 spin_lock_bh(&ctx->scc->lock);
427 ret = crypto_enqueue_request(&ctx->scc->queue, req);
428 spin_unlock_bh(&ctx->scc->lock);
429
430 if (ret != -EINPROGRESS)
431 return ret;
432
433 mxc_scc_dequeue_req_unlocked(ctx);
434
435 return -EINPROGRESS;
436}
437
438static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
439 struct ablkcipher_request *req)
440{
441 int err;
442
443 err = mxc_scc_ablkcipher_req_init(req, ctx);
444 if (err)
445 return err;
446
447 return mxc_scc_queue_req(ctx, &req->base);
448}
449
450static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
451{
452 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
453 struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
454
455 ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
456
457 return mxc_scc_des3_op(ctx, req);
458}
459
460static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
461{
462 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
463 struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
464
465 ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
466 ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
467
468 return mxc_scc_des3_op(ctx, req);
469}
470
471static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
472{
473 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
474 struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
475
476 ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
477 ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
478
479 return mxc_scc_des3_op(ctx, req);
480}
481
482static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
483{
484 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
485 struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
486
487 ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
488 ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
489 ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
490
491 return mxc_scc_des3_op(ctx, req);
492}
493
494static void mxc_scc_hw_init(struct mxc_scc *scc)
495{
496 int offset;
497
498 offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
499
500 /* Fill the RED_START register */
501 writel(offset, scc->base + SCC_SCM_RED_START);
502
503 /* Fill the BLACK_START register */
504 writel(offset, scc->base + SCC_SCM_BLACK_START);
505
506 scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
507 SCC_NON_RESERVED_OFFSET;
508
509 scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
510 SCC_NON_RESERVED_OFFSET;
511
512 scc->bytes_remaining = scc->memory_size_bytes;
513}
514
515static int mxc_scc_get_config(struct mxc_scc *scc)
516{
517 int config;
518
519 config = readl(scc->base + SCC_SCM_CFG);
520
521 scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
522
523 scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
524
525 scc->memory_size_bytes = (scc->block_size_bytes *
526 scc->black_ram_size_blocks) -
527 SCC_NON_RESERVED_OFFSET;
528
529 return 0;
530}
531
532static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
533{
534 enum mxc_scc_state state;
535 int status;
536
537 status = readl(scc->base + SCC_SMN_STATUS) &
538 SCC_SMN_STATUS_STATE_MASK;
539
540 /* If in Health Check, try to bringup to secure state */
541 if (status & SCC_SMN_STATE_HEALTH_CHECK) {
542 /*
543 * Write a simple algorithm to the Algorithm Sequence
544 * Checker (ASC)
545 */
546 writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
547 writel(0x5555, scc->base + SCC_SMN_SEQ_END);
548 writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
549
550 status = readl(scc->base + SCC_SMN_STATUS) &
551 SCC_SMN_STATUS_STATE_MASK;
552 }
553
554 switch (status) {
555 case SCC_SMN_STATE_NON_SECURE:
556 case SCC_SMN_STATE_SECURE:
557 state = SCC_STATE_OK;
558 break;
559 case SCC_SMN_STATE_FAIL:
560 state = SCC_STATE_FAILED;
561 break;
562 default:
563 state = SCC_STATE_UNIMPLEMENTED;
564 break;
565 }
566
567 return state;
568}
569
570static struct mxc_scc_crypto_tmpl scc_ecb_des = {
571 .alg = {
572 .cra_name = "ecb(des3_ede)",
573 .cra_driver_name = "ecb-des3-scc",
574 .cra_priority = 300,
575 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
576 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
577 .cra_ctxsize = sizeof(struct mxc_scc_ctx),
578 .cra_alignmask = 0,
579 .cra_type = &crypto_ablkcipher_type,
580 .cra_module = THIS_MODULE,
581 .cra_init = mxc_scc_cra_init,
582 .cra_u.ablkcipher = {
583 .min_keysize = DES3_EDE_KEY_SIZE,
584 .max_keysize = DES3_EDE_KEY_SIZE,
585 .encrypt = mxc_scc_ecb_des_encrypt,
586 .decrypt = mxc_scc_ecb_des_decrypt,
587 }
588 }
589};
590
591static struct mxc_scc_crypto_tmpl scc_cbc_des = {
592 .alg = {
593 .cra_name = "cbc(des3_ede)",
594 .cra_driver_name = "cbc-des3-scc",
595 .cra_priority = 300,
596 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
597 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
598 .cra_ctxsize = sizeof(struct mxc_scc_ctx),
599 .cra_alignmask = 0,
600 .cra_type = &crypto_ablkcipher_type,
601 .cra_module = THIS_MODULE,
602 .cra_init = mxc_scc_cra_init,
603 .cra_u.ablkcipher = {
604 .min_keysize = DES3_EDE_KEY_SIZE,
605 .max_keysize = DES3_EDE_KEY_SIZE,
606 .encrypt = mxc_scc_cbc_des_encrypt,
607 .decrypt = mxc_scc_cbc_des_decrypt,
608 }
609 }
610};
611
612static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
613 &scc_ecb_des,
614 &scc_cbc_des,
615};
616
617static int mxc_scc_crypto_register(struct mxc_scc *scc)
618{
619 int i;
620 int err = 0;
621
622 for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
623 scc_crypto_algs[i]->scc = scc;
624 err = crypto_register_alg(&scc_crypto_algs[i]->alg);
625 if (err)
626 goto err_out;
627 }
628
629 return 0;
630
631err_out:
632 while (--i >= 0)
633 crypto_unregister_alg(&scc_crypto_algs[i]->alg);
634
635 return err;
636}
637
638static void mxc_scc_crypto_unregister(void)
639{
640 unsigned int i;
641
642 for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
643 crypto_unregister_alg(&scc_crypto_algs[i]->alg);
644}
645
646static int mxc_scc_probe(struct platform_device *pdev)
647{
648 struct device *dev = &pdev->dev;
649 struct resource *res;
650 struct mxc_scc *scc;
651 enum mxc_scc_state state;
652 int irq;
653 int ret;
654 int i;
655
656 scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
657 if (!scc)
658 return -ENOMEM;
659
660 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
661 scc->base = devm_ioremap_resource(dev, res);
662 if (IS_ERR(scc->base))
663 return PTR_ERR(scc->base);
664
665 scc->clk = devm_clk_get(&pdev->dev, "ipg");
666 if (IS_ERR(scc->clk)) {
667 dev_err(dev, "Could not get ipg clock\n");
668 return PTR_ERR(scc->clk);
669 }
670
671 ret = clk_prepare_enable(scc->clk);
672 if (ret)
673 return ret;
674
675 /* clear error status register */
676 writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
677
678 /* clear interrupt control registers */
679 writel(SCC_SCM_INTR_CTRL_CLR_INTR |
680 SCC_SCM_INTR_CTRL_MASK_INTR,
681 scc->base + SCC_SCM_INTR_CTRL);
682
683 writel(SCC_SMN_COMMAND_CLR_INTR |
684 SCC_SMN_COMMAND_EN_INTR,
685 scc->base + SCC_SMN_COMMAND);
686
687 scc->dev = dev;
688 platform_set_drvdata(pdev, scc);
689
690 ret = mxc_scc_get_config(scc);
691 if (ret)
692 goto err_out;
693
694 state = mxc_scc_get_state(scc);
695
696 if (state != SCC_STATE_OK) {
697 dev_err(dev, "SCC in unusable state %d\n", state);
698 ret = -EINVAL;
699 goto err_out;
700 }
701
702 mxc_scc_hw_init(scc);
703
704 spin_lock_init(&scc->lock);
705 /* FIXME: calculate queue from RAM slots */
706 crypto_init_queue(&scc->queue, 50);
707
708 for (i = 0; i < 2; i++) {
709 irq = platform_get_irq(pdev, i);
710 if (irq < 0) {
711 dev_err(dev, "failed to get irq resource: %d\n", irq);
712 ret = irq;
713 goto err_out;
714 }
715
716 ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
717 IRQF_ONESHOT, dev_name(dev), scc);
718 if (ret)
719 goto err_out;
720 }
721
722 ret = mxc_scc_crypto_register(scc);
723 if (ret) {
724 dev_err(dev, "could not register algorithms");
725 goto err_out;
726 }
727
728 dev_info(dev, "registered successfully.\n");
729
730 return 0;
731
732err_out:
733 clk_disable_unprepare(scc->clk);
734
735 return ret;
736}
737
738static int mxc_scc_remove(struct platform_device *pdev)
739{
740 struct mxc_scc *scc = platform_get_drvdata(pdev);
741
742 mxc_scc_crypto_unregister();
743
744 clk_disable_unprepare(scc->clk);
745
746 return 0;
747}
748
749static const struct of_device_id mxc_scc_dt_ids[] = {
750 { .compatible = "fsl,imx25-scc", .data = NULL, },
751 { /* sentinel */ }
752};
753MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
754
755static struct platform_driver mxc_scc_driver = {
756 .probe = mxc_scc_probe,
757 .remove = mxc_scc_remove,
758 .driver = {
759 .name = "mxc-scc",
760 .of_match_table = mxc_scc_dt_ids,
761 },
762};
763
764module_platform_driver(mxc_scc_driver);
765MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
766MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
767MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a2105cf33abb..b4429891e368 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -471,7 +471,7 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
471 471
472 wake_up_process(sdcp->thread[actx->chan]); 472 wake_up_process(sdcp->thread[actx->chan]);
473 473
474 return -EINPROGRESS; 474 return ret;
475} 475}
476 476
477static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req) 477static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
@@ -700,11 +700,7 @@ static int dcp_chan_thread_sha(void *data)
700 700
701 struct crypto_async_request *backlog; 701 struct crypto_async_request *backlog;
702 struct crypto_async_request *arq; 702 struct crypto_async_request *arq;
703 703 int ret;
704 struct dcp_sha_req_ctx *rctx;
705
706 struct ahash_request *req;
707 int ret, fini;
708 704
709 while (!kthread_should_stop()) { 705 while (!kthread_should_stop()) {
710 set_current_state(TASK_INTERRUPTIBLE); 706 set_current_state(TASK_INTERRUPTIBLE);
@@ -725,11 +721,7 @@ static int dcp_chan_thread_sha(void *data)
725 backlog->complete(backlog, -EINPROGRESS); 721 backlog->complete(backlog, -EINPROGRESS);
726 722
727 if (arq) { 723 if (arq) {
728 req = ahash_request_cast(arq);
729 rctx = ahash_request_ctx(req);
730
731 ret = dcp_sha_req_to_buf(arq); 724 ret = dcp_sha_req_to_buf(arq);
732 fini = rctx->fini;
733 arq->complete(arq, ret); 725 arq->complete(arq, ret);
734 } 726 }
735 } 727 }
@@ -797,7 +789,7 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
797 wake_up_process(sdcp->thread[actx->chan]); 789 wake_up_process(sdcp->thread[actx->chan]);
798 mutex_unlock(&actx->mutex); 790 mutex_unlock(&actx->mutex);
799 791
800 return -EINPROGRESS; 792 return ret;
801} 793}
802 794
803static int dcp_sha_update(struct ahash_request *req) 795static int dcp_sha_update(struct ahash_request *req)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 9450c41211b2..0d5d3d8eb680 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -469,8 +469,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
469 return err; 469 return err;
470 470
471 shash->tfm = child_shash; 471 shash->tfm = child_shash;
472 shash->flags = crypto_ahash_get_flags(tfm) &
473 CRYPTO_TFM_REQ_MAY_SLEEP;
474 472
475 bs = crypto_shash_blocksize(child_shash); 473 bs = crypto_shash_blocksize(child_shash);
476 ds = crypto_shash_digestsize(child_shash); 474 ds = crypto_shash_digestsize(child_shash);
@@ -788,13 +786,18 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
788 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 786 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
789 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 787 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
790 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 788 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
789 u32 flags;
790 int err;
791
792 flags = crypto_ablkcipher_get_flags(cipher);
793 err = __des3_verify_key(&flags, key);
794 if (unlikely(err)) {
795 crypto_ablkcipher_set_flags(cipher, flags);
796 return err;
797 }
791 798
792 ctx->enc_type = n2alg->enc_type; 799 ctx->enc_type = n2alg->enc_type;
793 800
794 if (keylen != (3 * DES_KEY_SIZE)) {
795 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
796 return -EINVAL;
797 }
798 ctx->key_len = keylen; 801 ctx->key_len = keylen;
799 memcpy(ctx->key.des3, key, keylen); 802 memcpy(ctx->key.des3, key, keylen);
800 return 0; 803 return 0;
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 66869976cfa2..57932848361b 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -296,7 +296,7 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
296 struct nx842_workmem *workmem; 296 struct nx842_workmem *workmem;
297 struct nx842_scatterlist slin, slout; 297 struct nx842_scatterlist slin, slout;
298 struct nx_csbcpb *csbcpb; 298 struct nx_csbcpb *csbcpb;
299 int ret = 0, max_sync_size; 299 int ret = 0;
300 unsigned long inbuf, outbuf; 300 unsigned long inbuf, outbuf;
301 struct vio_pfo_op op = { 301 struct vio_pfo_op op = {
302 .done = NULL, 302 .done = NULL,
@@ -319,7 +319,6 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
319 rcu_read_unlock(); 319 rcu_read_unlock();
320 return -ENODEV; 320 return -ENODEV;
321 } 321 }
322 max_sync_size = local_devdata->max_sync_size;
323 dev = local_devdata->dev; 322 dev = local_devdata->dev;
324 323
325 /* Init scatterlist */ 324 /* Init scatterlist */
@@ -427,7 +426,7 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
427 struct nx842_workmem *workmem; 426 struct nx842_workmem *workmem;
428 struct nx842_scatterlist slin, slout; 427 struct nx842_scatterlist slin, slout;
429 struct nx_csbcpb *csbcpb; 428 struct nx_csbcpb *csbcpb;
430 int ret = 0, max_sync_size; 429 int ret = 0;
431 unsigned long inbuf, outbuf; 430 unsigned long inbuf, outbuf;
432 struct vio_pfo_op op = { 431 struct vio_pfo_op op = {
433 .done = NULL, 432 .done = NULL,
@@ -451,7 +450,6 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
451 rcu_read_unlock(); 450 rcu_read_unlock();
452 return -ENODEV; 451 return -ENODEV;
453 } 452 }
454 max_sync_size = local_devdata->max_sync_size;
455 dev = local_devdata->dev; 453 dev = local_devdata->dev;
456 454
457 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); 455 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index d94e25df503b..f06565df2a12 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -353,7 +353,7 @@ static int decompress(struct nx842_crypto_ctx *ctx,
353 unsigned int adj_slen = slen; 353 unsigned int adj_slen = slen;
354 u8 *src = p->in, *dst = p->out; 354 u8 *src = p->in, *dst = p->out;
355 u16 padding = be16_to_cpu(g->padding); 355 u16 padding = be16_to_cpu(g->padding);
356 int ret, spadding = 0, dpadding = 0; 356 int ret, spadding = 0;
357 ktime_t timeout; 357 ktime_t timeout;
358 358
359 if (!slen || !required_len) 359 if (!slen || !required_len)
@@ -413,7 +413,6 @@ usesw:
413 spadding = 0; 413 spadding = 0;
414 dst = p->out; 414 dst = p->out;
415 dlen = p->oremain; 415 dlen = p->oremain;
416 dpadding = 0;
417 if (dlen < required_len) { /* have ignore bytes */ 416 if (dlen < required_len) { /* have ignore bytes */
418 dst = ctx->dbounce; 417 dst = ctx->dbounce;
419 dlen = BOUNCE_BUFFER_SIZE; 418 dlen = BOUNCE_BUFFER_SIZE;
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index ad3358e74f5c..8f5820b78a83 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -105,8 +105,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
105 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 105 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
106 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 106 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
107 107
108 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 108 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
109 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
110 if (rc) 109 if (rc)
111 goto out; 110 goto out;
112 atomic_inc(&(nx_ctx->stats->aes_ops)); 111 atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -134,8 +133,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
134 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 133 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
135 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 134 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
136 135
137 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 136 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
138 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
139 if (rc) 137 if (rc)
140 goto out; 138 goto out;
141 atomic_inc(&(nx_ctx->stats->aes_ops)); 139 atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -279,8 +277,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
279 goto out; 277 goto out;
280 } 278 }
281 279
282 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 280 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
283 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
284 if (rc) 281 if (rc)
285 goto out; 282 goto out;
286 283
@@ -361,8 +358,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
361 goto out; 358 goto out;
362 } 359 }
363 360
364 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 361 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
365 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
366 if (rc) 362 if (rc)
367 goto out; 363 goto out;
368 364
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index a6764af83c6d..e06f0431dee5 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -162,8 +162,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
162 goto out; 162 goto out;
163 } 163 }
164 164
165 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 165 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
166 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
167 if (rc) 166 if (rc)
168 goto out; 167 goto out;
169 168
@@ -243,8 +242,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
243 goto out; 242 goto out;
244 } 243 }
245 244
246 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 245 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
247 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
248 if (rc) 246 if (rc)
249 goto out; 247 goto out;
250 248
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 92956bc6e45e..0293b17903d0 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -166,8 +166,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
166 goto out; 166 goto out;
167 } 167 }
168 168
169 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 169 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
170 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
171 if (rc) 170 if (rc)
172 goto out; 171 goto out;
173 172
@@ -249,8 +248,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
249 goto out; 248 goto out;
250 } 249 }
251 250
252 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 251 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
253 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
254 if (rc) 252 if (rc)
255 goto out; 253 goto out;
256 254
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 1ba2633e90d6..3d82d18ff810 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -656,9 +656,6 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
656 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); 656 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
657 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 657 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
658 658
659 if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE))
660 return -EINVAL;
661
662 pr_debug("enter, keylen: %d\n", keylen); 659 pr_debug("enter, keylen: %d\n", keylen);
663 660
664 /* Do we need to test against weak key? */ 661 /* Do we need to test against weak key? */
@@ -678,6 +675,28 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
678 return 0; 675 return 0;
679} 676}
680 677
678static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
679 unsigned int keylen)
680{
681 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
682 u32 flags;
683 int err;
684
685 pr_debug("enter, keylen: %d\n", keylen);
686
687 flags = crypto_ablkcipher_get_flags(cipher);
688 err = __des3_verify_key(&flags, key);
689 if (unlikely(err)) {
690 crypto_ablkcipher_set_flags(cipher, flags);
691 return err;
692 }
693
694 memcpy(ctx->key, key, keylen);
695 ctx->keylen = keylen;
696
697 return 0;
698}
699
681static int omap_des_ecb_encrypt(struct ablkcipher_request *req) 700static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
682{ 701{
683 return omap_des_crypt(req, FLAGS_ENCRYPT); 702 return omap_des_crypt(req, FLAGS_ENCRYPT);
@@ -788,7 +807,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
788 .cra_u.ablkcipher = { 807 .cra_u.ablkcipher = {
789 .min_keysize = 3*DES_KEY_SIZE, 808 .min_keysize = 3*DES_KEY_SIZE,
790 .max_keysize = 3*DES_KEY_SIZE, 809 .max_keysize = 3*DES_KEY_SIZE,
791 .setkey = omap_des_setkey, 810 .setkey = omap_des3_setkey,
792 .encrypt = omap_des_ecb_encrypt, 811 .encrypt = omap_des_ecb_encrypt,
793 .decrypt = omap_des_ecb_decrypt, 812 .decrypt = omap_des_ecb_decrypt,
794 } 813 }
@@ -811,7 +830,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
811 .min_keysize = 3*DES_KEY_SIZE, 830 .min_keysize = 3*DES_KEY_SIZE,
812 .max_keysize = 3*DES_KEY_SIZE, 831 .max_keysize = 3*DES_KEY_SIZE,
813 .ivsize = DES_BLOCK_SIZE, 832 .ivsize = DES_BLOCK_SIZE,
814 .setkey = omap_des_setkey, 833 .setkey = omap_des3_setkey,
815 .encrypt = omap_des_cbc_encrypt, 834 .encrypt = omap_des_cbc_encrypt,
816 .decrypt = omap_des_cbc_decrypt, 835 .decrypt = omap_des_cbc_decrypt,
817 } 836 }
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 0641185bd82f..51b20abac464 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1055,7 +1055,6 @@ static int omap_sham_finish_hmac(struct ahash_request *req)
1055 SHASH_DESC_ON_STACK(shash, bctx->shash); 1055 SHASH_DESC_ON_STACK(shash, bctx->shash);
1056 1056
1057 shash->tfm = bctx->shash; 1057 shash->tfm = bctx->shash;
1058 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
1059 1058
1060 return crypto_shash_init(shash) ?: 1059 return crypto_shash_init(shash) ?:
1061 crypto_shash_update(shash, bctx->opad, bs) ?: 1060 crypto_shash_update(shash, bctx->opad, bs) ?:
@@ -1226,7 +1225,6 @@ static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
1226 SHASH_DESC_ON_STACK(shash, tfm); 1225 SHASH_DESC_ON_STACK(shash, tfm);
1227 1226
1228 shash->tfm = tfm; 1227 shash->tfm = tfm;
1229 shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1230 1228
1231 return crypto_shash_digest(shash, data, len, out); 1229 return crypto_shash_digest(shash, data, len, out);
1232} 1230}
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 21e5cae0a1e0..e641481a3cd9 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -39,7 +39,6 @@ static int padlock_sha_init(struct shash_desc *desc)
39 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); 39 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
40 40
41 dctx->fallback.tfm = ctx->fallback; 41 dctx->fallback.tfm = ctx->fallback;
42 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
43 return crypto_shash_init(&dctx->fallback); 42 return crypto_shash_init(&dctx->fallback);
44} 43}
45 44
@@ -48,7 +47,6 @@ static int padlock_sha_update(struct shash_desc *desc,
48{ 47{
49 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); 48 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
50 49
51 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
52 return crypto_shash_update(&dctx->fallback, data, length); 50 return crypto_shash_update(&dctx->fallback, data, length);
53} 51}
54 52
@@ -65,7 +63,6 @@ static int padlock_sha_import(struct shash_desc *desc, const void *in)
65 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); 63 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
66 64
67 dctx->fallback.tfm = ctx->fallback; 65 dctx->fallback.tfm = ctx->fallback;
68 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
69 return crypto_shash_import(&dctx->fallback, in); 66 return crypto_shash_import(&dctx->fallback, in);
70} 67}
71 68
@@ -91,7 +88,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
91 unsigned int leftover; 88 unsigned int leftover;
92 int err; 89 int err;
93 90
94 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
95 err = crypto_shash_export(&dctx->fallback, &state); 91 err = crypto_shash_export(&dctx->fallback, &state);
96 if (err) 92 if (err)
97 goto out; 93 goto out;
@@ -153,7 +149,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
153 unsigned int leftover; 149 unsigned int leftover;
154 int err; 150 int err;
155 151
156 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
157 err = crypto_shash_export(&dctx->fallback, &state); 152 err = crypto_shash_export(&dctx->fallback, &state);
158 if (err) 153 if (err)
159 goto out; 154 goto out;
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 1b3acdeffede..05b89e703903 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -753,11 +753,6 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
753 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 753 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
754 u32 tmp[DES_EXPKEY_WORDS]; 754 u32 tmp[DES_EXPKEY_WORDS];
755 755
756 if (len > DES3_EDE_KEY_SIZE) {
757 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
758 return -EINVAL;
759 }
760
761 if (unlikely(!des_ekey(tmp, key)) && 756 if (unlikely(!des_ekey(tmp, key)) &&
762 (crypto_ablkcipher_get_flags(cipher) & 757 (crypto_ablkcipher_get_flags(cipher) &
763 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 758 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
@@ -772,6 +767,30 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
772} 767}
773 768
774/* 769/*
770 * Set the 3DES key for a block cipher transform. This also performs weak key
771 * checking if the transform has requested it.
772 */
773static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
774 unsigned int len)
775{
776 struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
777 u32 flags;
778 int err;
779
780 flags = crypto_ablkcipher_get_flags(cipher);
781 err = __des3_verify_key(&flags, key);
782 if (unlikely(err)) {
783 crypto_ablkcipher_set_flags(cipher, flags);
784 return err;
785 }
786
787 memcpy(ctx->key, key, len);
788 ctx->key_len = len;
789
790 return 0;
791}
792
793/*
775 * Set the key for an AES block cipher. Some key lengths are not supported in 794 * Set the key for an AES block cipher. Some key lengths are not supported in
776 * hardware so this must also check whether a fallback is needed. 795 * hardware so this must also check whether a fallback is needed.
777 */ 796 */
@@ -1196,7 +1215,7 @@ static const struct dev_pm_ops spacc_pm_ops = {
1196 1215
1197static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev) 1216static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
1198{ 1217{
1199 return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL; 1218 return dev ? dev_get_drvdata(dev) : NULL;
1200} 1219}
1201 1220
1202static ssize_t spacc_stat_irq_thresh_show(struct device *dev, 1221static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
@@ -1353,7 +1372,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
1353 .cra_type = &crypto_ablkcipher_type, 1372 .cra_type = &crypto_ablkcipher_type,
1354 .cra_module = THIS_MODULE, 1373 .cra_module = THIS_MODULE,
1355 .cra_ablkcipher = { 1374 .cra_ablkcipher = {
1356 .setkey = spacc_des_setkey, 1375 .setkey = spacc_des3_setkey,
1357 .encrypt = spacc_ablk_encrypt, 1376 .encrypt = spacc_ablk_encrypt,
1358 .decrypt = spacc_ablk_decrypt, 1377 .decrypt = spacc_ablk_decrypt,
1359 .min_keysize = DES3_EDE_KEY_SIZE, 1378 .min_keysize = DES3_EDE_KEY_SIZE,
@@ -1380,7 +1399,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
1380 .cra_type = &crypto_ablkcipher_type, 1399 .cra_type = &crypto_ablkcipher_type,
1381 .cra_module = THIS_MODULE, 1400 .cra_module = THIS_MODULE,
1382 .cra_ablkcipher = { 1401 .cra_ablkcipher = {
1383 .setkey = spacc_des_setkey, 1402 .setkey = spacc_des3_setkey,
1384 .encrypt = spacc_ablk_encrypt, 1403 .encrypt = spacc_ablk_encrypt,
1385 .decrypt = spacc_ablk_decrypt, 1404 .decrypt = spacc_ablk_decrypt,
1386 .min_keysize = DES3_EDE_KEY_SIZE, 1405 .min_keysize = DES3_EDE_KEY_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 975c75198f56..c8d401646902 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -164,7 +164,6 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
164 memset(ctx->ipad, 0, block_size); 164 memset(ctx->ipad, 0, block_size);
165 memset(ctx->opad, 0, block_size); 165 memset(ctx->opad, 0, block_size);
166 shash->tfm = ctx->hash_tfm; 166 shash->tfm = ctx->hash_tfm;
167 shash->flags = 0x0;
168 167
169 if (auth_keylen > block_size) { 168 if (auth_keylen > block_size) {
170 int ret = crypto_shash_digest(shash, auth_key, 169 int ret = crypto_shash_digest(shash, auth_key,
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index c9f324730d71..692a7aaee749 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -1300,8 +1300,6 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1300static struct akcipher_alg rsa = { 1300static struct akcipher_alg rsa = {
1301 .encrypt = qat_rsa_enc, 1301 .encrypt = qat_rsa_enc,
1302 .decrypt = qat_rsa_dec, 1302 .decrypt = qat_rsa_dec,
1303 .sign = qat_rsa_dec,
1304 .verify = qat_rsa_enc,
1305 .set_pub_key = qat_rsa_setpubkey, 1303 .set_pub_key = qat_rsa_setpubkey,
1306 .set_priv_key = qat_rsa_setprivkey, 1304 .set_priv_key = qat_rsa_setprivkey,
1307 .max_size = qat_rsa_max_size, 1305 .max_size = qat_rsa_max_size,
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 154b6baa124e..8d3493855a70 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -198,6 +198,25 @@ weakkey:
198 return -EINVAL; 198 return -EINVAL;
199} 199}
200 200
201static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
202 unsigned int keylen)
203{
204 struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
205 u32 flags;
206 int err;
207
208 flags = crypto_ablkcipher_get_flags(ablk);
209 err = __des3_verify_key(&flags, key);
210 if (unlikely(err)) {
211 crypto_ablkcipher_set_flags(ablk, flags);
212 return err;
213 }
214
215 ctx->enc_keylen = keylen;
216 memcpy(ctx->enc_key, key, keylen);
217 return 0;
218}
219
201static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) 220static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
202{ 221{
203 struct crypto_tfm *tfm = 222 struct crypto_tfm *tfm =
@@ -363,7 +382,8 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
363 alg->cra_ablkcipher.ivsize = def->ivsize; 382 alg->cra_ablkcipher.ivsize = def->ivsize;
364 alg->cra_ablkcipher.min_keysize = def->min_keysize; 383 alg->cra_ablkcipher.min_keysize = def->min_keysize;
365 alg->cra_ablkcipher.max_keysize = def->max_keysize; 384 alg->cra_ablkcipher.max_keysize = def->max_keysize;
366 alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey; 385 alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ?
386 qce_des3_setkey : qce_ablkcipher_setkey;
367 alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt; 387 alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
368 alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt; 388 alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
369 389
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 02dac6ae7e53..313759521a0f 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -46,24 +46,36 @@ static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
46 return 0; 46 return 0;
47} 47}
48 48
49static int rk_tdes_setkey(struct crypto_ablkcipher *cipher, 49static int rk_des_setkey(struct crypto_ablkcipher *cipher,
50 const u8 *key, unsigned int keylen) 50 const u8 *key, unsigned int keylen)
51{ 51{
52 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 52 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
53 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 53 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
54 u32 tmp[DES_EXPKEY_WORDS]; 54 u32 tmp[DES_EXPKEY_WORDS];
55 55
56 if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) { 56 if (!des_ekey(tmp, key) &&
57 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 57 (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
58 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
58 return -EINVAL; 59 return -EINVAL;
59 } 60 }
60 61
61 if (keylen == DES_KEY_SIZE) { 62 ctx->keylen = keylen;
62 if (!des_ekey(tmp, key) && 63 memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
63 (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 64 return 0;
64 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 65}
65 return -EINVAL; 66
66 } 67static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
68 const u8 *key, unsigned int keylen)
69{
70 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
71 u32 flags;
72 int err;
73
74 flags = crypto_ablkcipher_get_flags(cipher);
75 err = __des3_verify_key(&flags, key);
76 if (unlikely(err)) {
77 crypto_ablkcipher_set_flags(cipher, flags);
78 return err;
67 } 79 }
68 80
69 ctx->keylen = keylen; 81 ctx->keylen = keylen;
@@ -250,9 +262,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
250 u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + 262 u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
251 dev->sg_src->offset + dev->sg_src->length - ivsize; 263 dev->sg_src->offset + dev->sg_src->length - ivsize;
252 264
253 /* store the iv that need to be updated in chain mode */ 265 /* Store the iv that need to be updated in chain mode.
254 if (ctx->mode & RK_CRYPTO_DEC) 266 * And update the IV buffer to contain the next IV for decryption mode.
267 */
268 if (ctx->mode & RK_CRYPTO_DEC) {
255 memcpy(ctx->iv, src_last_blk, ivsize); 269 memcpy(ctx->iv, src_last_blk, ivsize);
270 sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
271 ivsize, dev->total - ivsize);
272 }
256 273
257 err = dev->load_data(dev, dev->sg_src, dev->sg_dst); 274 err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
258 if (!err) 275 if (!err)
@@ -288,13 +305,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
288 struct ablkcipher_request *req = 305 struct ablkcipher_request *req =
289 ablkcipher_request_cast(dev->async_req); 306 ablkcipher_request_cast(dev->async_req);
290 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 307 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
308 struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
291 u32 ivsize = crypto_ablkcipher_ivsize(tfm); 309 u32 ivsize = crypto_ablkcipher_ivsize(tfm);
292 310
293 if (ivsize == DES_BLOCK_SIZE) 311 /* Update the IV buffer to contain the next IV for encryption mode. */
294 memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0, 312 if (!(ctx->mode & RK_CRYPTO_DEC)) {
295 ivsize); 313 if (dev->aligned) {
296 else if (ivsize == AES_BLOCK_SIZE) 314 memcpy(req->info, sg_virt(dev->sg_dst) +
297 memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize); 315 dev->sg_dst->length - ivsize, ivsize);
316 } else {
317 memcpy(req->info, dev->addr_vir +
318 dev->count - ivsize, ivsize);
319 }
320 }
298} 321}
299 322
300static void rk_update_iv(struct rk_crypto_info *dev) 323static void rk_update_iv(struct rk_crypto_info *dev)
@@ -457,7 +480,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
457 .cra_u.ablkcipher = { 480 .cra_u.ablkcipher = {
458 .min_keysize = DES_KEY_SIZE, 481 .min_keysize = DES_KEY_SIZE,
459 .max_keysize = DES_KEY_SIZE, 482 .max_keysize = DES_KEY_SIZE,
460 .setkey = rk_tdes_setkey, 483 .setkey = rk_des_setkey,
461 .encrypt = rk_des_ecb_encrypt, 484 .encrypt = rk_des_ecb_encrypt,
462 .decrypt = rk_des_ecb_decrypt, 485 .decrypt = rk_des_ecb_decrypt,
463 } 486 }
@@ -483,7 +506,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
483 .min_keysize = DES_KEY_SIZE, 506 .min_keysize = DES_KEY_SIZE,
484 .max_keysize = DES_KEY_SIZE, 507 .max_keysize = DES_KEY_SIZE,
485 .ivsize = DES_BLOCK_SIZE, 508 .ivsize = DES_BLOCK_SIZE,
486 .setkey = rk_tdes_setkey, 509 .setkey = rk_des_setkey,
487 .encrypt = rk_des_cbc_encrypt, 510 .encrypt = rk_des_cbc_encrypt,
488 .decrypt = rk_des_cbc_decrypt, 511 .decrypt = rk_des_cbc_decrypt,
489 } 512 }
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 1afdcb81d8ed..9ef25230c199 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1534,7 +1534,6 @@ static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
1534 SHASH_DESC_ON_STACK(shash, tfm); 1534 SHASH_DESC_ON_STACK(shash, tfm);
1535 1535
1536 shash->tfm = tfm; 1536 shash->tfm = tfm;
1537 shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
1538 1537
1539 return crypto_shash_digest(shash, data, len, out); 1538 return crypto_shash_digest(shash, data, len, out);
1540} 1539}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8c32a3059b4a..fd11162a915e 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -354,7 +354,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
354{ 354{
355 u8 state; 355 u8 state;
356 356
357 if (!IS_ENABLED(DEBUG)) 357 if (!__is_defined(DEBUG))
358 return; 358 return;
359 359
360 state = SAHARA_STATUS_GET_STATE(status); 360 state = SAHARA_STATUS_GET_STATE(status);
@@ -406,7 +406,7 @@ static void sahara_dump_descriptors(struct sahara_dev *dev)
406{ 406{
407 int i; 407 int i;
408 408
409 if (!IS_ENABLED(DEBUG)) 409 if (!__is_defined(DEBUG))
410 return; 410 return;
411 411
412 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) { 412 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
@@ -427,7 +427,7 @@ static void sahara_dump_links(struct sahara_dev *dev)
427{ 427{
428 int i; 428 int i;
429 429
430 if (!IS_ENABLED(DEBUG)) 430 if (!__is_defined(DEBUG))
431 return; 431 return;
432 432
433 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) { 433 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 63aa78c0b12b..4491e2197d9f 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -24,6 +24,7 @@ config CRYPTO_DEV_STM32_CRYP
24 depends on ARCH_STM32 24 depends on ARCH_STM32
25 select CRYPTO_HASH 25 select CRYPTO_HASH
26 select CRYPTO_ENGINE 26 select CRYPTO_ENGINE
27 select CRYPTO_DES
27 help 28 help
28 This enables support for the CRYP (AES/DES/TDES) hw accelerator which 29 This enables support for the CRYP (AES/DES/TDES) hw accelerator which
29 can be found on STMicroelectronics STM32 SOC. 30 can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 23b0b7bd64c7..cddcc97875b2 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -137,7 +137,6 @@ struct stm32_cryp {
137 137
138 struct crypto_engine *engine; 138 struct crypto_engine *engine;
139 139
140 struct mutex lock; /* protects req / areq */
141 struct ablkcipher_request *req; 140 struct ablkcipher_request *req;
142 struct aead_request *areq; 141 struct aead_request *areq;
143 142
@@ -394,6 +393,23 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
394 } 393 }
395} 394}
396 395
396static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
397{
398 struct ablkcipher_request *req = cryp->req;
399 u32 *tmp = req->info;
400
401 if (!tmp)
402 return;
403
404 *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
405 *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
406
407 if (is_aes(cryp)) {
408 *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
409 *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
410 }
411}
412
397static void stm32_cryp_hw_write_key(struct stm32_cryp *c) 413static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
398{ 414{
399 unsigned int i; 415 unsigned int i;
@@ -623,6 +639,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
623 /* Phase 4 : output tag */ 639 /* Phase 4 : output tag */
624 err = stm32_cryp_read_auth_tag(cryp); 640 err = stm32_cryp_read_auth_tag(cryp);
625 641
642 if (!err && (!(is_gcm(cryp) || is_ccm(cryp))))
643 stm32_cryp_get_iv(cryp);
644
626 if (cryp->sgs_copied) { 645 if (cryp->sgs_copied) {
627 void *buf_in, *buf_out; 646 void *buf_in, *buf_out;
628 int pages, len; 647 int pages, len;
@@ -645,18 +664,13 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
645 pm_runtime_mark_last_busy(cryp->dev); 664 pm_runtime_mark_last_busy(cryp->dev);
646 pm_runtime_put_autosuspend(cryp->dev); 665 pm_runtime_put_autosuspend(cryp->dev);
647 666
648 if (is_gcm(cryp) || is_ccm(cryp)) { 667 if (is_gcm(cryp) || is_ccm(cryp))
649 crypto_finalize_aead_request(cryp->engine, cryp->areq, err); 668 crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
650 cryp->areq = NULL; 669 else
651 } else {
652 crypto_finalize_ablkcipher_request(cryp->engine, cryp->req, 670 crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
653 err); 671 err);
654 cryp->req = NULL;
655 }
656 672
657 memset(cryp->ctx->key, 0, cryp->ctx->keylen); 673 memset(cryp->ctx->key, 0, cryp->ctx->keylen);
658
659 mutex_unlock(&cryp->lock);
660} 674}
661 675
662static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) 676static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
@@ -753,19 +767,35 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
753static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 767static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
754 unsigned int keylen) 768 unsigned int keylen)
755{ 769{
770 u32 tmp[DES_EXPKEY_WORDS];
771
756 if (keylen != DES_KEY_SIZE) 772 if (keylen != DES_KEY_SIZE)
757 return -EINVAL; 773 return -EINVAL;
758 else 774
759 return stm32_cryp_setkey(tfm, key, keylen); 775 if ((crypto_ablkcipher_get_flags(tfm) &
776 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
777 unlikely(!des_ekey(tmp, key))) {
778 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
779 return -EINVAL;
780 }
781
782 return stm32_cryp_setkey(tfm, key, keylen);
760} 783}
761 784
762static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 785static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
763 unsigned int keylen) 786 unsigned int keylen)
764{ 787{
765 if (keylen != (3 * DES_KEY_SIZE)) 788 u32 flags;
766 return -EINVAL; 789 int err;
767 else 790
768 return stm32_cryp_setkey(tfm, key, keylen); 791 flags = crypto_ablkcipher_get_flags(tfm);
792 err = __des3_verify_key(&flags, key);
793 if (unlikely(err)) {
794 crypto_ablkcipher_set_flags(tfm, flags);
795 return err;
796 }
797
798 return stm32_cryp_setkey(tfm, key, keylen);
769} 799}
770 800
771static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, 801static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -917,8 +947,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
917 if (!cryp) 947 if (!cryp)
918 return -ENODEV; 948 return -ENODEV;
919 949
920 mutex_lock(&cryp->lock);
921
922 rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq); 950 rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
923 rctx->mode &= FLG_MODE_MASK; 951 rctx->mode &= FLG_MODE_MASK;
924 952
@@ -930,6 +958,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
930 958
931 if (req) { 959 if (req) {
932 cryp->req = req; 960 cryp->req = req;
961 cryp->areq = NULL;
933 cryp->total_in = req->nbytes; 962 cryp->total_in = req->nbytes;
934 cryp->total_out = cryp->total_in; 963 cryp->total_out = cryp->total_in;
935 } else { 964 } else {
@@ -955,6 +984,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
955 * <---------- total_out -----------------> 984 * <---------- total_out ----------------->
956 */ 985 */
957 cryp->areq = areq; 986 cryp->areq = areq;
987 cryp->req = NULL;
958 cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq)); 988 cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
959 cryp->total_in = areq->assoclen + areq->cryptlen; 989 cryp->total_in = areq->assoclen + areq->cryptlen;
960 if (is_encrypt(cryp)) 990 if (is_encrypt(cryp))
@@ -976,19 +1006,19 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
976 if (cryp->in_sg_len < 0) { 1006 if (cryp->in_sg_len < 0) {
977 dev_err(cryp->dev, "Cannot get in_sg_len\n"); 1007 dev_err(cryp->dev, "Cannot get in_sg_len\n");
978 ret = cryp->in_sg_len; 1008 ret = cryp->in_sg_len;
979 goto out; 1009 return ret;
980 } 1010 }
981 1011
982 cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out); 1012 cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
983 if (cryp->out_sg_len < 0) { 1013 if (cryp->out_sg_len < 0) {
984 dev_err(cryp->dev, "Cannot get out_sg_len\n"); 1014 dev_err(cryp->dev, "Cannot get out_sg_len\n");
985 ret = cryp->out_sg_len; 1015 ret = cryp->out_sg_len;
986 goto out; 1016 return ret;
987 } 1017 }
988 1018
989 ret = stm32_cryp_copy_sgs(cryp); 1019 ret = stm32_cryp_copy_sgs(cryp);
990 if (ret) 1020 if (ret)
991 goto out; 1021 return ret;
992 1022
993 scatterwalk_start(&cryp->in_walk, cryp->in_sg); 1023 scatterwalk_start(&cryp->in_walk, cryp->in_sg);
994 scatterwalk_start(&cryp->out_walk, cryp->out_sg); 1024 scatterwalk_start(&cryp->out_walk, cryp->out_sg);
@@ -1000,10 +1030,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
1000 } 1030 }
1001 1031
1002 ret = stm32_cryp_hw_init(cryp); 1032 ret = stm32_cryp_hw_init(cryp);
1003out:
1004 if (ret)
1005 mutex_unlock(&cryp->lock);
1006
1007 return ret; 1033 return ret;
1008} 1034}
1009 1035
@@ -1943,8 +1969,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
1943 1969
1944 cryp->dev = dev; 1970 cryp->dev = dev;
1945 1971
1946 mutex_init(&cryp->lock);
1947
1948 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1972 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1949 cryp->regs = devm_ioremap_resource(dev, res); 1973 cryp->regs = devm_ioremap_resource(dev, res);
1950 if (IS_ERR(cryp->regs)) 1974 if (IS_ERR(cryp->regs))
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4a6cc8a3045d..bfc49e67124b 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -181,8 +181,6 @@ struct stm32_hash_dev {
181 u32 dma_mode; 181 u32 dma_mode;
182 u32 dma_maxburst; 182 u32 dma_maxburst;
183 183
184 spinlock_t lock; /* lock to protect queue */
185
186 struct ahash_request *req; 184 struct ahash_request *req;
187 struct crypto_engine *engine; 185 struct crypto_engine *engine;
188 186
@@ -977,7 +975,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
977 975
978 pm_runtime_get_sync(hdev->dev); 976 pm_runtime_get_sync(hdev->dev);
979 977
980 while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY)) 978 while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
981 cpu_relax(); 979 cpu_relax();
982 980
983 rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER, 981 rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 54fd714d53ca..b060a0810934 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -41,11 +41,6 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
41 if (!areq->cryptlen) 41 if (!areq->cryptlen)
42 return 0; 42 return 0;
43 43
44 if (!areq->iv) {
45 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
46 return -EINVAL;
47 }
48
49 if (!areq->src || !areq->dst) { 44 if (!areq->src || !areq->dst) {
50 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); 45 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
51 return -EINVAL; 46 return -EINVAL;
@@ -134,6 +129,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
134 struct scatterlist *out_sg = areq->dst; 129 struct scatterlist *out_sg = areq->dst;
135 unsigned int ivsize = crypto_skcipher_ivsize(tfm); 130 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
136 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); 131 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
132 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
133 struct sun4i_ss_alg_template *algt;
137 u32 mode = ctx->mode; 134 u32 mode = ctx->mode;
138 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ 135 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
139 u32 rx_cnt = SS_RX_DEFAULT; 136 u32 rx_cnt = SS_RX_DEFAULT;
@@ -153,20 +150,20 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
153 unsigned int obo = 0; /* offset in bufo*/ 150 unsigned int obo = 0; /* offset in bufo*/
154 unsigned int obl = 0; /* length of data in bufo */ 151 unsigned int obl = 0; /* length of data in bufo */
155 unsigned long flags; 152 unsigned long flags;
153 bool need_fallback;
156 154
157 if (!areq->cryptlen) 155 if (!areq->cryptlen)
158 return 0; 156 return 0;
159 157
160 if (!areq->iv) {
161 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
162 return -EINVAL;
163 }
164
165 if (!areq->src || !areq->dst) { 158 if (!areq->src || !areq->dst) {
166 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); 159 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
167 return -EINVAL; 160 return -EINVAL;
168 } 161 }
169 162
163 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
164 if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
165 need_fallback = true;
166
170 /* 167 /*
171 * if we have only SGs with size multiple of 4, 168 * if we have only SGs with size multiple of 4,
172 * we can use the SS optimized function 169 * we can use the SS optimized function
@@ -182,9 +179,24 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
182 out_sg = sg_next(out_sg); 179 out_sg = sg_next(out_sg);
183 } 180 }
184 181
185 if (no_chunk == 1) 182 if (no_chunk == 1 && !need_fallback)
186 return sun4i_ss_opti_poll(areq); 183 return sun4i_ss_opti_poll(areq);
187 184
185 if (need_fallback) {
186 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
187 skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
188 skcipher_request_set_callback(subreq, areq->base.flags, NULL,
189 NULL);
190 skcipher_request_set_crypt(subreq, areq->src, areq->dst,
191 areq->cryptlen, areq->iv);
192 if (ctx->mode & SS_DECRYPTION)
193 err = crypto_skcipher_decrypt(subreq);
194 else
195 err = crypto_skcipher_encrypt(subreq);
196 skcipher_request_zero(subreq);
197 return err;
198 }
199
188 spin_lock_irqsave(&ss->slock, flags); 200 spin_lock_irqsave(&ss->slock, flags);
189 201
190 for (i = 0; i < op->keylen; i += 4) 202 for (i = 0; i < op->keylen; i += 4)
@@ -458,6 +470,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
458{ 470{
459 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); 471 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
460 struct sun4i_ss_alg_template *algt; 472 struct sun4i_ss_alg_template *algt;
473 const char *name = crypto_tfm_alg_name(tfm);
461 474
462 memset(op, 0, sizeof(struct sun4i_tfm_ctx)); 475 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
463 476
@@ -468,9 +481,22 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
468 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), 481 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
469 sizeof(struct sun4i_cipher_req_ctx)); 482 sizeof(struct sun4i_cipher_req_ctx));
470 483
484 op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
485 if (IS_ERR(op->fallback_tfm)) {
486 dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
487 name, PTR_ERR(op->fallback_tfm));
488 return PTR_ERR(op->fallback_tfm);
489 }
490
471 return 0; 491 return 0;
472} 492}
473 493
494void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
495{
496 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
497 crypto_free_sync_skcipher(op->fallback_tfm);
498}
499
474/* check and set the AES key, prepare the mode to be used */ 500/* check and set the AES key, prepare the mode to be used */
475int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 501int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
476 unsigned int keylen) 502 unsigned int keylen)
@@ -495,7 +521,11 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
495 } 521 }
496 op->keylen = keylen; 522 op->keylen = keylen;
497 memcpy(op->key, key, keylen); 523 memcpy(op->key, key, keylen);
498 return 0; 524
525 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
526 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
527
528 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
499} 529}
500 530
501/* check and set the DES key, prepare the mode to be used */ 531/* check and set the DES key, prepare the mode to be used */
@@ -525,7 +555,11 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
525 555
526 op->keylen = keylen; 556 op->keylen = keylen;
527 memcpy(op->key, key, keylen); 557 memcpy(op->key, key, keylen);
528 return 0; 558
559 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
560 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
561
562 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
529} 563}
530 564
531/* check and set the 3DES key, prepare the mode to be used */ 565/* check and set the 3DES key, prepare the mode to be used */
@@ -533,14 +567,18 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
533 unsigned int keylen) 567 unsigned int keylen)
534{ 568{
535 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); 569 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
536 struct sun4i_ss_ctx *ss = op->ss; 570 int err;
571
572 err = des3_verify_key(tfm, key);
573 if (unlikely(err))
574 return err;
537 575
538 if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
539 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
540 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
541 return -EINVAL;
542 }
543 op->keylen = keylen; 576 op->keylen = keylen;
544 memcpy(op->key, key, keylen); 577 memcpy(op->key, key, keylen);
545 return 0; 578
579 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
580 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
581
582 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
583
546} 584}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 89adf9e0fed2..05b3d3c32f6d 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -92,11 +92,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
92 .cra_driver_name = "cbc-aes-sun4i-ss", 92 .cra_driver_name = "cbc-aes-sun4i-ss",
93 .cra_priority = 300, 93 .cra_priority = 300,
94 .cra_blocksize = AES_BLOCK_SIZE, 94 .cra_blocksize = AES_BLOCK_SIZE,
95 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY, 95 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
96 .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), 96 .cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
97 .cra_module = THIS_MODULE, 97 .cra_module = THIS_MODULE,
98 .cra_alignmask = 3, 98 .cra_alignmask = 3,
99 .cra_init = sun4i_ss_cipher_init, 99 .cra_init = sun4i_ss_cipher_init,
100 .cra_exit = sun4i_ss_cipher_exit,
100 } 101 }
101 } 102 }
102}, 103},
@@ -107,17 +108,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
107 .decrypt = sun4i_ss_ecb_aes_decrypt, 108 .decrypt = sun4i_ss_ecb_aes_decrypt,
108 .min_keysize = AES_MIN_KEY_SIZE, 109 .min_keysize = AES_MIN_KEY_SIZE,
109 .max_keysize = AES_MAX_KEY_SIZE, 110 .max_keysize = AES_MAX_KEY_SIZE,
110 .ivsize = AES_BLOCK_SIZE,
111 .base = { 111 .base = {
112 .cra_name = "ecb(aes)", 112 .cra_name = "ecb(aes)",
113 .cra_driver_name = "ecb-aes-sun4i-ss", 113 .cra_driver_name = "ecb-aes-sun4i-ss",
114 .cra_priority = 300, 114 .cra_priority = 300,
115 .cra_blocksize = AES_BLOCK_SIZE, 115 .cra_blocksize = AES_BLOCK_SIZE,
116 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY, 116 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
117 .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), 117 .cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
118 .cra_module = THIS_MODULE, 118 .cra_module = THIS_MODULE,
119 .cra_alignmask = 3, 119 .cra_alignmask = 3,
120 .cra_init = sun4i_ss_cipher_init, 120 .cra_init = sun4i_ss_cipher_init,
121 .cra_exit = sun4i_ss_cipher_exit,
121 } 122 }
122 } 123 }
123}, 124},
@@ -134,11 +135,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
134 .cra_driver_name = "cbc-des-sun4i-ss", 135 .cra_driver_name = "cbc-des-sun4i-ss",
135 .cra_priority = 300, 136 .cra_priority = 300,
136 .cra_blocksize = DES_BLOCK_SIZE, 137 .cra_blocksize = DES_BLOCK_SIZE,
137 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY, 138 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
138 .cra_ctxsize = sizeof(struct sun4i_req_ctx), 139 .cra_ctxsize = sizeof(struct sun4i_req_ctx),
139 .cra_module = THIS_MODULE, 140 .cra_module = THIS_MODULE,
140 .cra_alignmask = 3, 141 .cra_alignmask = 3,
141 .cra_init = sun4i_ss_cipher_init, 142 .cra_init = sun4i_ss_cipher_init,
143 .cra_exit = sun4i_ss_cipher_exit,
142 } 144 }
143 } 145 }
144}, 146},
@@ -154,11 +156,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
154 .cra_driver_name = "ecb-des-sun4i-ss", 156 .cra_driver_name = "ecb-des-sun4i-ss",
155 .cra_priority = 300, 157 .cra_priority = 300,
156 .cra_blocksize = DES_BLOCK_SIZE, 158 .cra_blocksize = DES_BLOCK_SIZE,
157 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY, 159 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
158 .cra_ctxsize = sizeof(struct sun4i_req_ctx), 160 .cra_ctxsize = sizeof(struct sun4i_req_ctx),
159 .cra_module = THIS_MODULE, 161 .cra_module = THIS_MODULE,
160 .cra_alignmask = 3, 162 .cra_alignmask = 3,
161 .cra_init = sun4i_ss_cipher_init, 163 .cra_init = sun4i_ss_cipher_init,
164 .cra_exit = sun4i_ss_cipher_exit,
162 } 165 }
163 } 166 }
164}, 167},
@@ -175,11 +178,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
175 .cra_driver_name = "cbc-des3-sun4i-ss", 178 .cra_driver_name = "cbc-des3-sun4i-ss",
176 .cra_priority = 300, 179 .cra_priority = 300,
177 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 180 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
178 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY, 181 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
179 .cra_ctxsize = sizeof(struct sun4i_req_ctx), 182 .cra_ctxsize = sizeof(struct sun4i_req_ctx),
180 .cra_module = THIS_MODULE, 183 .cra_module = THIS_MODULE,
181 .cra_alignmask = 3, 184 .cra_alignmask = 3,
182 .cra_init = sun4i_ss_cipher_init, 185 .cra_init = sun4i_ss_cipher_init,
186 .cra_exit = sun4i_ss_cipher_exit,
183 } 187 }
184 } 188 }
185}, 189},
@@ -190,16 +194,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
190 .decrypt = sun4i_ss_ecb_des3_decrypt, 194 .decrypt = sun4i_ss_ecb_des3_decrypt,
191 .min_keysize = DES3_EDE_KEY_SIZE, 195 .min_keysize = DES3_EDE_KEY_SIZE,
192 .max_keysize = DES3_EDE_KEY_SIZE, 196 .max_keysize = DES3_EDE_KEY_SIZE,
193 .ivsize = DES3_EDE_BLOCK_SIZE,
194 .base = { 197 .base = {
195 .cra_name = "ecb(des3_ede)", 198 .cra_name = "ecb(des3_ede)",
196 .cra_driver_name = "ecb-des3-sun4i-ss", 199 .cra_driver_name = "ecb-des3-sun4i-ss",
197 .cra_priority = 300, 200 .cra_priority = 300,
198 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 201 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
202 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
199 .cra_ctxsize = sizeof(struct sun4i_req_ctx), 203 .cra_ctxsize = sizeof(struct sun4i_req_ctx),
200 .cra_module = THIS_MODULE, 204 .cra_module = THIS_MODULE,
201 .cra_alignmask = 3, 205 .cra_alignmask = 3,
202 .cra_init = sun4i_ss_cipher_init, 206 .cra_init = sun4i_ss_cipher_init,
207 .cra_exit = sun4i_ss_cipher_exit,
203 } 208 }
204 } 209 }
205}, 210},
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index a4b5ff2b72f8..f6936bb3b7be 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
240 } 240 }
241 } else { 241 } else {
242 /* Since we have the flag final, we can go up to modulo 4 */ 242 /* Since we have the flag final, we can go up to modulo 4 */
243 end = ((areq->nbytes + op->len) / 4) * 4 - op->len; 243 if (areq->nbytes < 4)
244 end = 0;
245 else
246 end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
244 } 247 }
245 248
246 /* TODO if SGlen % 4 and !op->len then DMA */ 249 /* TODO if SGlen % 4 and !op->len then DMA */
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index f3ac90692ac6..8c4ec9e93565 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -161,6 +161,7 @@ struct sun4i_tfm_ctx {
161 u32 keylen; 161 u32 keylen;
162 u32 keymode; 162 u32 keymode;
163 struct sun4i_ss_ctx *ss; 163 struct sun4i_ss_ctx *ss;
164 struct crypto_sync_skcipher *fallback_tfm;
164}; 165};
165 166
166struct sun4i_cipher_req_ctx { 167struct sun4i_cipher_req_ctx {
@@ -203,6 +204,7 @@ int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq);
203int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq); 204int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq);
204 205
205int sun4i_ss_cipher_init(struct crypto_tfm *tfm); 206int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
207void sun4i_ss_cipher_exit(struct crypto_tfm *tfm);
206int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 208int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
207 unsigned int keylen); 209 unsigned int keylen);
208int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, 210int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index de78b54bcfb1..1d429fc073d1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -913,6 +913,54 @@ badkey:
913 return -EINVAL; 913 return -EINVAL;
914} 914}
915 915
916static int aead_des3_setkey(struct crypto_aead *authenc,
917 const u8 *key, unsigned int keylen)
918{
919 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
920 struct device *dev = ctx->dev;
921 struct crypto_authenc_keys keys;
922 u32 flags;
923 int err;
924
925 err = crypto_authenc_extractkeys(&keys, key, keylen);
926 if (unlikely(err))
927 goto badkey;
928
929 err = -EINVAL;
930 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
931 goto badkey;
932
933 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
934 goto badkey;
935
936 flags = crypto_aead_get_flags(authenc);
937 err = __des3_verify_key(&flags, keys.enckey);
938 if (unlikely(err)) {
939 crypto_aead_set_flags(authenc, flags);
940 goto out;
941 }
942
943 if (ctx->keylen)
944 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
945
946 memcpy(ctx->key, keys.authkey, keys.authkeylen);
947 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
948
949 ctx->keylen = keys.authkeylen + keys.enckeylen;
950 ctx->enckeylen = keys.enckeylen;
951 ctx->authkeylen = keys.authkeylen;
952 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
953 DMA_TO_DEVICE);
954
955out:
956 memzero_explicit(&keys, sizeof(keys));
957 return err;
958
959badkey:
960 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
961 goto out;
962}
963
916/* 964/*
917 * talitos_edesc - s/w-extended descriptor 965 * talitos_edesc - s/w-extended descriptor
918 * @src_nents: number of segments in input scatterlist 966 * @src_nents: number of segments in input scatterlist
@@ -1527,12 +1575,22 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1527{ 1575{
1528 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1576 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1529 struct device *dev = ctx->dev; 1577 struct device *dev = ctx->dev;
1530 u32 tmp[DES_EXPKEY_WORDS];
1531 1578
1532 if (keylen > TALITOS_MAX_KEY_SIZE) { 1579 if (ctx->keylen)
1533 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1580 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1534 return -EINVAL; 1581
1535 } 1582 memcpy(&ctx->key, key, keylen);
1583 ctx->keylen = keylen;
1584
1585 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1586
1587 return 0;
1588}
1589
1590static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1591 const u8 *key, unsigned int keylen)
1592{
1593 u32 tmp[DES_EXPKEY_WORDS];
1536 1594
1537 if (unlikely(crypto_ablkcipher_get_flags(cipher) & 1595 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1538 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) && 1596 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
@@ -1541,15 +1599,23 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1541 return -EINVAL; 1599 return -EINVAL;
1542 } 1600 }
1543 1601
1544 if (ctx->keylen) 1602 return ablkcipher_setkey(cipher, key, keylen);
1545 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 1603}
1546 1604
1547 memcpy(&ctx->key, key, keylen); 1605static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1548 ctx->keylen = keylen; 1606 const u8 *key, unsigned int keylen)
1607{
1608 u32 flags;
1609 int err;
1549 1610
1550 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); 1611 flags = crypto_ablkcipher_get_flags(cipher);
1612 err = __des3_verify_key(&flags, key);
1613 if (unlikely(err)) {
1614 crypto_ablkcipher_set_flags(cipher, flags);
1615 return err;
1616 }
1551 1617
1552 return 0; 1618 return ablkcipher_setkey(cipher, key, keylen);
1553} 1619}
1554 1620
1555static void common_nonsnoop_unmap(struct device *dev, 1621static void common_nonsnoop_unmap(struct device *dev,
@@ -2313,6 +2379,7 @@ static struct talitos_alg_template driver_algs[] = {
2313 }, 2379 },
2314 .ivsize = DES3_EDE_BLOCK_SIZE, 2380 .ivsize = DES3_EDE_BLOCK_SIZE,
2315 .maxauthsize = SHA1_DIGEST_SIZE, 2381 .maxauthsize = SHA1_DIGEST_SIZE,
2382 .setkey = aead_des3_setkey,
2316 }, 2383 },
2317 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2384 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2318 DESC_HDR_SEL0_DEU | 2385 DESC_HDR_SEL0_DEU |
@@ -2336,6 +2403,7 @@ static struct talitos_alg_template driver_algs[] = {
2336 }, 2403 },
2337 .ivsize = DES3_EDE_BLOCK_SIZE, 2404 .ivsize = DES3_EDE_BLOCK_SIZE,
2338 .maxauthsize = SHA1_DIGEST_SIZE, 2405 .maxauthsize = SHA1_DIGEST_SIZE,
2406 .setkey = aead_des3_setkey,
2339 }, 2407 },
2340 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2408 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2341 DESC_HDR_SEL0_DEU | 2409 DESC_HDR_SEL0_DEU |
@@ -2399,6 +2467,7 @@ static struct talitos_alg_template driver_algs[] = {
2399 }, 2467 },
2400 .ivsize = DES3_EDE_BLOCK_SIZE, 2468 .ivsize = DES3_EDE_BLOCK_SIZE,
2401 .maxauthsize = SHA224_DIGEST_SIZE, 2469 .maxauthsize = SHA224_DIGEST_SIZE,
2470 .setkey = aead_des3_setkey,
2402 }, 2471 },
2403 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2472 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2404 DESC_HDR_SEL0_DEU | 2473 DESC_HDR_SEL0_DEU |
@@ -2422,6 +2491,7 @@ static struct talitos_alg_template driver_algs[] = {
2422 }, 2491 },
2423 .ivsize = DES3_EDE_BLOCK_SIZE, 2492 .ivsize = DES3_EDE_BLOCK_SIZE,
2424 .maxauthsize = SHA224_DIGEST_SIZE, 2493 .maxauthsize = SHA224_DIGEST_SIZE,
2494 .setkey = aead_des3_setkey,
2425 }, 2495 },
2426 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2496 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2427 DESC_HDR_SEL0_DEU | 2497 DESC_HDR_SEL0_DEU |
@@ -2485,6 +2555,7 @@ static struct talitos_alg_template driver_algs[] = {
2485 }, 2555 },
2486 .ivsize = DES3_EDE_BLOCK_SIZE, 2556 .ivsize = DES3_EDE_BLOCK_SIZE,
2487 .maxauthsize = SHA256_DIGEST_SIZE, 2557 .maxauthsize = SHA256_DIGEST_SIZE,
2558 .setkey = aead_des3_setkey,
2488 }, 2559 },
2489 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2560 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2490 DESC_HDR_SEL0_DEU | 2561 DESC_HDR_SEL0_DEU |
@@ -2508,6 +2579,7 @@ static struct talitos_alg_template driver_algs[] = {
2508 }, 2579 },
2509 .ivsize = DES3_EDE_BLOCK_SIZE, 2580 .ivsize = DES3_EDE_BLOCK_SIZE,
2510 .maxauthsize = SHA256_DIGEST_SIZE, 2581 .maxauthsize = SHA256_DIGEST_SIZE,
2582 .setkey = aead_des3_setkey,
2511 }, 2583 },
2512 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2584 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2513 DESC_HDR_SEL0_DEU | 2585 DESC_HDR_SEL0_DEU |
@@ -2550,6 +2622,7 @@ static struct talitos_alg_template driver_algs[] = {
2550 }, 2622 },
2551 .ivsize = DES3_EDE_BLOCK_SIZE, 2623 .ivsize = DES3_EDE_BLOCK_SIZE,
2552 .maxauthsize = SHA384_DIGEST_SIZE, 2624 .maxauthsize = SHA384_DIGEST_SIZE,
2625 .setkey = aead_des3_setkey,
2553 }, 2626 },
2554 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2627 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2555 DESC_HDR_SEL0_DEU | 2628 DESC_HDR_SEL0_DEU |
@@ -2592,6 +2665,7 @@ static struct talitos_alg_template driver_algs[] = {
2592 }, 2665 },
2593 .ivsize = DES3_EDE_BLOCK_SIZE, 2666 .ivsize = DES3_EDE_BLOCK_SIZE,
2594 .maxauthsize = SHA512_DIGEST_SIZE, 2667 .maxauthsize = SHA512_DIGEST_SIZE,
2668 .setkey = aead_des3_setkey,
2595 }, 2669 },
2596 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2670 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2597 DESC_HDR_SEL0_DEU | 2671 DESC_HDR_SEL0_DEU |
@@ -2654,6 +2728,7 @@ static struct talitos_alg_template driver_algs[] = {
2654 }, 2728 },
2655 .ivsize = DES3_EDE_BLOCK_SIZE, 2729 .ivsize = DES3_EDE_BLOCK_SIZE,
2656 .maxauthsize = MD5_DIGEST_SIZE, 2730 .maxauthsize = MD5_DIGEST_SIZE,
2731 .setkey = aead_des3_setkey,
2657 }, 2732 },
2658 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2733 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2659 DESC_HDR_SEL0_DEU | 2734 DESC_HDR_SEL0_DEU |
@@ -2676,6 +2751,7 @@ static struct talitos_alg_template driver_algs[] = {
2676 }, 2751 },
2677 .ivsize = DES3_EDE_BLOCK_SIZE, 2752 .ivsize = DES3_EDE_BLOCK_SIZE,
2678 .maxauthsize = MD5_DIGEST_SIZE, 2753 .maxauthsize = MD5_DIGEST_SIZE,
2754 .setkey = aead_des3_setkey,
2679 }, 2755 },
2680 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2756 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2681 DESC_HDR_SEL0_DEU | 2757 DESC_HDR_SEL0_DEU |
@@ -2748,6 +2824,7 @@ static struct talitos_alg_template driver_algs[] = {
2748 .min_keysize = DES_KEY_SIZE, 2824 .min_keysize = DES_KEY_SIZE,
2749 .max_keysize = DES_KEY_SIZE, 2825 .max_keysize = DES_KEY_SIZE,
2750 .ivsize = DES_BLOCK_SIZE, 2826 .ivsize = DES_BLOCK_SIZE,
2827 .setkey = ablkcipher_des_setkey,
2751 } 2828 }
2752 }, 2829 },
2753 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2830 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2764,6 +2841,7 @@ static struct talitos_alg_template driver_algs[] = {
2764 .min_keysize = DES_KEY_SIZE, 2841 .min_keysize = DES_KEY_SIZE,
2765 .max_keysize = DES_KEY_SIZE, 2842 .max_keysize = DES_KEY_SIZE,
2766 .ivsize = DES_BLOCK_SIZE, 2843 .ivsize = DES_BLOCK_SIZE,
2844 .setkey = ablkcipher_des_setkey,
2767 } 2845 }
2768 }, 2846 },
2769 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2847 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2781,6 +2859,7 @@ static struct talitos_alg_template driver_algs[] = {
2781 .min_keysize = DES3_EDE_KEY_SIZE, 2859 .min_keysize = DES3_EDE_KEY_SIZE,
2782 .max_keysize = DES3_EDE_KEY_SIZE, 2860 .max_keysize = DES3_EDE_KEY_SIZE,
2783 .ivsize = DES3_EDE_BLOCK_SIZE, 2861 .ivsize = DES3_EDE_BLOCK_SIZE,
2862 .setkey = ablkcipher_des3_setkey,
2784 } 2863 }
2785 }, 2864 },
2786 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2865 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2798,6 +2877,7 @@ static struct talitos_alg_template driver_algs[] = {
2798 .min_keysize = DES3_EDE_KEY_SIZE, 2877 .min_keysize = DES3_EDE_KEY_SIZE,
2799 .max_keysize = DES3_EDE_KEY_SIZE, 2878 .max_keysize = DES3_EDE_KEY_SIZE,
2800 .ivsize = DES3_EDE_BLOCK_SIZE, 2879 .ivsize = DES3_EDE_BLOCK_SIZE,
2880 .setkey = ablkcipher_des3_setkey,
2801 } 2881 }
2802 }, 2882 },
2803 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2883 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3144,7 +3224,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3144 alg->cra_init = talitos_cra_init; 3224 alg->cra_init = talitos_cra_init;
3145 alg->cra_exit = talitos_cra_exit; 3225 alg->cra_exit = talitos_cra_exit;
3146 alg->cra_type = &crypto_ablkcipher_type; 3226 alg->cra_type = &crypto_ablkcipher_type;
3147 alg->cra_ablkcipher.setkey = ablkcipher_setkey; 3227 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3228 ablkcipher_setkey;
3148 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; 3229 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3149 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt; 3230 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3150 break; 3231 break;
@@ -3152,7 +3233,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3152 alg = &t_alg->algt.alg.aead.base; 3233 alg = &t_alg->algt.alg.aead.base;
3153 alg->cra_exit = talitos_cra_exit; 3234 alg->cra_exit = talitos_cra_exit;
3154 t_alg->algt.alg.aead.init = talitos_cra_init_aead; 3235 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3155 t_alg->algt.alg.aead.setkey = aead_setkey; 3236 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3237 aead_setkey;
3156 t_alg->algt.alg.aead.encrypt = aead_encrypt; 3238 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3157 t_alg->algt.alg.aead.decrypt = aead_decrypt; 3239 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3158 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && 3240 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
index b497ae3dde07..dc8fff29c4b3 100644
--- a/drivers/crypto/ux500/cryp/Makefile
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -3,11 +3,7 @@
3# * Author: shujuan.chen@stericsson.com for ST-Ericsson. 3# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
4# * License terms: GNU General Public License (GPL) version 2 */ 4# * License terms: GNU General Public License (GPL) version 2 */
5 5
6ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG 6ccflags-$(CONFIG_CRYPTO_DEV_UX500_DEBUG) += -DDEBUG
7CFLAGS_cryp_core.o := -DDEBUG
8CFLAGS_cryp.o := -DDEBUG
9CFLAGS_cryp_irq.o := -DDEBUG
10endif
11 7
12obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o 8obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
13ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o 9ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 3235611928f2..7a93cba0877f 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1019,37 +1019,16 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1019 const u8 *key, unsigned int keylen) 1019 const u8 *key, unsigned int keylen)
1020{ 1020{
1021 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1021 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1022 u32 *flags = &cipher->base.crt_flags; 1022 u32 flags;
1023 const u32 *K = (const u32 *)key; 1023 int err;
1024 u32 tmp[DES3_EDE_EXPKEY_WORDS];
1025 int i, ret;
1026 1024
1027 pr_debug(DEV_DBG_NAME " [%s]", __func__); 1025 pr_debug(DEV_DBG_NAME " [%s]", __func__);
1028 if (keylen != DES3_EDE_KEY_SIZE) {
1029 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
1030 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
1031 __func__);
1032 return -EINVAL;
1033 }
1034 1026
1035 /* Checking key interdependency for weak key detection. */ 1027 flags = crypto_ablkcipher_get_flags(cipher);
1036 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 1028 err = __des3_verify_key(&flags, key);
1037 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && 1029 if (unlikely(err)) {
1038 (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 1030 crypto_ablkcipher_set_flags(cipher, flags);
1039 *flags |= CRYPTO_TFM_RES_WEAK_KEY; 1031 return err;
1040 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
1041 __func__);
1042 return -EINVAL;
1043 }
1044 for (i = 0; i < 3; i++) {
1045 ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
1046 if (unlikely(ret == 0) &&
1047 (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
1048 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
1049 pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
1050 __func__);
1051 return -EINVAL;
1052 }
1053 } 1032 }
1054 1033
1055 memcpy(ctx->key, key, keylen); 1034 memcpy(ctx->key, key, keylen);
@@ -1219,57 +1198,6 @@ static struct cryp_algo_template cryp_algs[] = {
1219 { 1198 {
1220 .algomode = CRYP_ALGO_DES_ECB, 1199 .algomode = CRYP_ALGO_DES_ECB,
1221 .crypto = { 1200 .crypto = {
1222 .cra_name = "des",
1223 .cra_driver_name = "des-ux500",
1224 .cra_priority = 300,
1225 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1226 CRYPTO_ALG_ASYNC,
1227 .cra_blocksize = DES_BLOCK_SIZE,
1228 .cra_ctxsize = sizeof(struct cryp_ctx),
1229 .cra_alignmask = 3,
1230 .cra_type = &crypto_ablkcipher_type,
1231 .cra_init = cryp_cra_init,
1232 .cra_module = THIS_MODULE,
1233 .cra_u = {
1234 .ablkcipher = {
1235 .min_keysize = DES_KEY_SIZE,
1236 .max_keysize = DES_KEY_SIZE,
1237 .setkey = des_ablkcipher_setkey,
1238 .encrypt = cryp_blk_encrypt,
1239 .decrypt = cryp_blk_decrypt
1240 }
1241 }
1242 }
1243
1244 },
1245 {
1246 .algomode = CRYP_ALGO_TDES_ECB,
1247 .crypto = {
1248 .cra_name = "des3_ede",
1249 .cra_driver_name = "des3_ede-ux500",
1250 .cra_priority = 300,
1251 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1252 CRYPTO_ALG_ASYNC,
1253 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1254 .cra_ctxsize = sizeof(struct cryp_ctx),
1255 .cra_alignmask = 3,
1256 .cra_type = &crypto_ablkcipher_type,
1257 .cra_init = cryp_cra_init,
1258 .cra_module = THIS_MODULE,
1259 .cra_u = {
1260 .ablkcipher = {
1261 .min_keysize = DES3_EDE_KEY_SIZE,
1262 .max_keysize = DES3_EDE_KEY_SIZE,
1263 .setkey = des_ablkcipher_setkey,
1264 .encrypt = cryp_blk_encrypt,
1265 .decrypt = cryp_blk_decrypt
1266 }
1267 }
1268 }
1269 },
1270 {
1271 .algomode = CRYP_ALGO_DES_ECB,
1272 .crypto = {
1273 .cra_name = "ecb(des)", 1201 .cra_name = "ecb(des)",
1274 .cra_driver_name = "ecb-des-ux500", 1202 .cra_driver_name = "ecb-des-ux500",
1275 .cra_priority = 300, 1203 .cra_priority = 300,
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index d7316f7a3a69..603a62081994 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -23,9 +23,10 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/crypto.h> 24#include <linux/crypto.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/hardirq.h> 26#include <asm/simd.h>
27#include <asm/switch_to.h> 27#include <asm/switch_to.h>
28#include <crypto/aes.h> 28#include <crypto/aes.h>
29#include <crypto/internal/simd.h>
29 30
30#include "aesp8-ppc.h" 31#include "aesp8-ppc.h"
31 32
@@ -78,20 +79,21 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
78 pagefault_disable(); 79 pagefault_disable();
79 enable_kernel_vsx(); 80 enable_kernel_vsx();
80 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); 81 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
81 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); 82 ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
82 disable_kernel_vsx(); 83 disable_kernel_vsx();
83 pagefault_enable(); 84 pagefault_enable();
84 preempt_enable(); 85 preempt_enable();
85 86
86 ret += crypto_cipher_setkey(ctx->fallback, key, keylen); 87 ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
87 return ret; 88
89 return ret ? -EINVAL : 0;
88} 90}
89 91
90static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) 92static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
91{ 93{
92 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); 94 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
93 95
94 if (in_interrupt()) { 96 if (!crypto_simd_usable()) {
95 crypto_cipher_encrypt_one(ctx->fallback, dst, src); 97 crypto_cipher_encrypt_one(ctx->fallback, dst, src);
96 } else { 98 } else {
97 preempt_disable(); 99 preempt_disable();
@@ -108,7 +110,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
108{ 110{
109 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); 111 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
110 112
111 if (in_interrupt()) { 113 if (!crypto_simd_usable()) {
112 crypto_cipher_decrypt_one(ctx->fallback, dst, src); 114 crypto_cipher_decrypt_one(ctx->fallback, dst, src);
113 } else { 115 } else {
114 preempt_disable(); 116 preempt_disable();
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index c5c5ff82b52e..a1a9a6f0d42c 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -23,9 +23,10 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/crypto.h> 24#include <linux/crypto.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/hardirq.h> 26#include <asm/simd.h>
27#include <asm/switch_to.h> 27#include <asm/switch_to.h>
28#include <crypto/aes.h> 28#include <crypto/aes.h>
29#include <crypto/internal/simd.h>
29#include <crypto/scatterwalk.h> 30#include <crypto/scatterwalk.h>
30#include <crypto/skcipher.h> 31#include <crypto/skcipher.h>
31 32
@@ -81,13 +82,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
81 pagefault_disable(); 82 pagefault_disable();
82 enable_kernel_vsx(); 83 enable_kernel_vsx();
83 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); 84 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
84 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); 85 ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
85 disable_kernel_vsx(); 86 disable_kernel_vsx();
86 pagefault_enable(); 87 pagefault_enable();
87 preempt_enable(); 88 preempt_enable();
88 89
89 ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 90 ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
90 return ret; 91
92 return ret ? -EINVAL : 0;
91} 93}
92 94
93static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, 95static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
@@ -99,7 +101,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
99 struct p8_aes_cbc_ctx *ctx = 101 struct p8_aes_cbc_ctx *ctx =
100 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 102 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
101 103
102 if (in_interrupt()) { 104 if (!crypto_simd_usable()) {
103 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 105 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
104 skcipher_request_set_sync_tfm(req, ctx->fallback); 106 skcipher_request_set_sync_tfm(req, ctx->fallback);
105 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 107 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
@@ -138,7 +140,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
138 struct p8_aes_cbc_ctx *ctx = 140 struct p8_aes_cbc_ctx *ctx =
139 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 141 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
140 142
141 if (in_interrupt()) { 143 if (!crypto_simd_usable()) {
142 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 144 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
143 skcipher_request_set_sync_tfm(req, ctx->fallback); 145 skcipher_request_set_sync_tfm(req, ctx->fallback);
144 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 146 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 8a2fe092cb8e..192a53512f5e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -23,9 +23,10 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/crypto.h> 24#include <linux/crypto.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/hardirq.h> 26#include <asm/simd.h>
27#include <asm/switch_to.h> 27#include <asm/switch_to.h>
28#include <crypto/aes.h> 28#include <crypto/aes.h>
29#include <crypto/internal/simd.h>
29#include <crypto/scatterwalk.h> 30#include <crypto/scatterwalk.h>
30#include <crypto/skcipher.h> 31#include <crypto/skcipher.h>
31 32
@@ -83,8 +84,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
83 pagefault_enable(); 84 pagefault_enable();
84 preempt_enable(); 85 preempt_enable();
85 86
86 ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 87 ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
87 return ret; 88
89 return ret ? -EINVAL : 0;
88} 90}
89 91
90static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, 92static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
@@ -118,7 +120,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
118 struct p8_aes_ctr_ctx *ctx = 120 struct p8_aes_ctr_ctx *ctx =
119 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 121 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
120 122
121 if (in_interrupt()) { 123 if (!crypto_simd_usable()) {
122 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 124 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
123 skcipher_request_set_sync_tfm(req, ctx->fallback); 125 skcipher_request_set_sync_tfm(req, ctx->fallback);
124 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 126 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index ecd64e5cc5bb..00d412d811ae 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -23,9 +23,10 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/crypto.h> 24#include <linux/crypto.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/hardirq.h> 26#include <asm/simd.h>
27#include <asm/switch_to.h> 27#include <asm/switch_to.h>
28#include <crypto/aes.h> 28#include <crypto/aes.h>
29#include <crypto/internal/simd.h>
29#include <crypto/scatterwalk.h> 30#include <crypto/scatterwalk.h>
30#include <crypto/xts.h> 31#include <crypto/xts.h>
31#include <crypto/skcipher.h> 32#include <crypto/skcipher.h>
@@ -86,14 +87,15 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
86 pagefault_disable(); 87 pagefault_disable();
87 enable_kernel_vsx(); 88 enable_kernel_vsx();
88 ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key); 89 ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
89 ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key); 90 ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
90 ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key); 91 ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
91 disable_kernel_vsx(); 92 disable_kernel_vsx();
92 pagefault_enable(); 93 pagefault_enable();
93 preempt_enable(); 94 preempt_enable();
94 95
95 ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 96 ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
96 return ret; 97
98 return ret ? -EINVAL : 0;
97} 99}
98 100
99static int p8_aes_xts_crypt(struct blkcipher_desc *desc, 101static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
@@ -108,7 +110,7 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
108 struct p8_aes_xts_ctx *ctx = 110 struct p8_aes_xts_ctx *ctx =
109 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 111 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
110 112
111 if (in_interrupt()) { 113 if (!crypto_simd_usable()) {
112 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 114 SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
113 skcipher_request_set_sync_tfm(req, ctx->fallback); 115 skcipher_request_set_sync_tfm(req, ctx->fallback);
114 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 116 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index d6a9f63d65ba..de78282b8f44 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1854,7 +1854,7 @@ Lctr32_enc8x_three:
1854 stvx_u $out1,$x10,$out 1854 stvx_u $out1,$x10,$out
1855 stvx_u $out2,$x20,$out 1855 stvx_u $out2,$x20,$out
1856 addi $out,$out,0x30 1856 addi $out,$out,0x30
1857 b Lcbc_dec8x_done 1857 b Lctr32_enc8x_done
1858 1858
1859.align 5 1859.align 5
1860Lctr32_enc8x_two: 1860Lctr32_enc8x_two:
@@ -1866,7 +1866,7 @@ Lctr32_enc8x_two:
1866 stvx_u $out0,$x00,$out 1866 stvx_u $out0,$x00,$out
1867 stvx_u $out1,$x10,$out 1867 stvx_u $out1,$x10,$out
1868 addi $out,$out,0x20 1868 addi $out,$out,0x20
1869 b Lcbc_dec8x_done 1869 b Lctr32_enc8x_done
1870 1870
1871.align 5 1871.align 5
1872Lctr32_enc8x_one: 1872Lctr32_enc8x_one:
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index dd8b8716467a..b5a6883bb09e 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -23,16 +23,15 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/crypto.h> 24#include <linux/crypto.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/hardirq.h> 26#include <asm/simd.h>
27#include <asm/switch_to.h> 27#include <asm/switch_to.h>
28#include <crypto/aes.h> 28#include <crypto/aes.h>
29#include <crypto/ghash.h> 29#include <crypto/ghash.h>
30#include <crypto/scatterwalk.h> 30#include <crypto/scatterwalk.h>
31#include <crypto/internal/hash.h> 31#include <crypto/internal/hash.h>
32#include <crypto/internal/simd.h>
32#include <crypto/b128ops.h> 33#include <crypto/b128ops.h>
33 34
34#define IN_INTERRUPT in_interrupt()
35
36void gcm_init_p8(u128 htable[16], const u64 Xi[2]); 35void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
37void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); 36void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
38void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], 37void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
@@ -102,7 +101,6 @@ static int p8_ghash_init(struct shash_desc *desc)
102 dctx->bytes = 0; 101 dctx->bytes = 0;
103 memset(dctx->shash, 0, GHASH_DIGEST_SIZE); 102 memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
104 dctx->fallback_desc.tfm = ctx->fallback; 103 dctx->fallback_desc.tfm = ctx->fallback;
105 dctx->fallback_desc.flags = desc->flags;
106 return crypto_shash_init(&dctx->fallback_desc); 104 return crypto_shash_init(&dctx->fallback_desc);
107} 105}
108 106
@@ -131,7 +129,7 @@ static int p8_ghash_update(struct shash_desc *desc,
131 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); 129 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
132 struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); 130 struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
133 131
134 if (IN_INTERRUPT) { 132 if (!crypto_simd_usable()) {
135 return crypto_shash_update(&dctx->fallback_desc, src, 133 return crypto_shash_update(&dctx->fallback_desc, src,
136 srclen); 134 srclen);
137 } else { 135 } else {
@@ -182,7 +180,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
182 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); 180 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
183 struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); 181 struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
184 182
185 if (IN_INTERRUPT) { 183 if (!crypto_simd_usable()) {
186 return crypto_shash_final(&dctx->fallback_desc, out); 184 return crypto_shash_final(&dctx->fallback_desc, out);
187 } else { 185 } else {
188 if (dctx->bytes) { 186 if (dctx->bytes) {
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 31a98dc6f849..a9f519830615 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -41,7 +41,7 @@ static struct crypto_alg *algs[] = {
41 NULL, 41 NULL,
42}; 42};
43 43
44int __init p8_init(void) 44static int __init p8_init(void)
45{ 45{
46 int ret = 0; 46 int ret = 0;
47 struct crypto_alg **alg_it; 47 struct crypto_alg **alg_it;
@@ -67,7 +67,7 @@ int __init p8_init(void)
67 return ret; 67 return ret;
68} 68}
69 69
70void __exit p8_exit(void) 70static void __exit p8_exit(void)
71{ 71{
72 struct crypto_alg **alg_it; 72 struct crypto_alg **alg_it;
73 73
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 2e2dff478833..ecf6e659c0da 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -80,7 +80,6 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
80 SHASH_DESC_ON_STACK(shash, rxe->tfm); 80 SHASH_DESC_ON_STACK(shash, rxe->tfm);
81 81
82 shash->tfm = rxe->tfm; 82 shash->tfm = rxe->tfm;
83 shash->flags = 0;
84 *(u32 *)shash_desc_ctx(shash) = crc; 83 *(u32 *)shash_desc_ctx(shash) = crc;
85 err = crypto_shash_update(shash, next, len); 84 err = crypto_shash_update(shash, next, len);
86 if (unlikely(err)) { 85 if (unlikely(err)) {
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index dd6565798778..9faed1c92b52 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -332,7 +332,6 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
332 int err; 332 int err;
333 333
334 desc->tfm = essiv->hash_tfm; 334 desc->tfm = essiv->hash_tfm;
335 desc->flags = 0;
336 335
337 err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); 336 err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
338 shash_desc_zero(desc); 337 shash_desc_zero(desc);
@@ -606,7 +605,6 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
606 int i, r; 605 int i, r;
607 606
608 desc->tfm = lmk->hash_tfm; 607 desc->tfm = lmk->hash_tfm;
609 desc->flags = 0;
610 608
611 r = crypto_shash_init(desc); 609 r = crypto_shash_init(desc);
612 if (r) 610 if (r)
@@ -768,7 +766,6 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
768 766
769 /* calculate crc32 for every 32bit part and xor it */ 767 /* calculate crc32 for every 32bit part and xor it */
770 desc->tfm = tcw->crc32_tfm; 768 desc->tfm = tcw->crc32_tfm;
771 desc->flags = 0;
772 for (i = 0; i < 4; i++) { 769 for (i = 0; i < 4; i++) {
773 r = crypto_shash_init(desc); 770 r = crypto_shash_init(desc);
774 if (r) 771 if (r)
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7c678f50aaa3..95ae4bf34203 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -532,7 +532,6 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
532 unsigned j, size; 532 unsigned j, size;
533 533
534 desc->tfm = ic->journal_mac; 534 desc->tfm = ic->journal_mac;
535 desc->flags = 0;
536 535
537 r = crypto_shash_init(desc); 536 r = crypto_shash_init(desc);
538 if (unlikely(r)) { 537 if (unlikely(r)) {
@@ -1276,7 +1275,6 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
1276 unsigned digest_size; 1275 unsigned digest_size;
1277 1276
1278 req->tfm = ic->internal_hash; 1277 req->tfm = ic->internal_hash;
1279 req->flags = 0;
1280 1278
1281 r = crypto_shash_init(req); 1279 r = crypto_shash_init(req);
1282 if (unlikely(r < 0)) { 1280 if (unlikely(r < 0)) {
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 7ccdc62c6052..ff61dd8748de 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -222,7 +222,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
222 goto out_free; 222 goto out_free;
223 } 223 }
224 state->sha1->tfm = shash; 224 state->sha1->tfm = shash;
225 state->sha1->flags = 0;
226 225
227 digestsize = crypto_shash_digestsize(shash); 226 digestsize = crypto_shash_digestsize(shash);
228 if (digestsize < MPPE_MAX_KEY_LEN) 227 if (digestsize < MPPE_MAX_KEY_LEN)
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index 67b0c05afbdb..a324bc4b7938 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -65,7 +65,6 @@ int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
65 hdr[ETH_ALEN * 2 + 3] = 0; 65 hdr[ETH_ALEN * 2 + 3] = 0;
66 66
67 desc->tfm = tfm_michael; 67 desc->tfm = tfm_michael;
68 desc->flags = 0;
69 68
70 err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN); 69 err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
71 if (err) 70 if (err)
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index b7828fb252f2..b681073ae8ba 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -449,7 +449,6 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
449 SHASH_DESC_ON_STACK(desc, tfm); 449 SHASH_DESC_ON_STACK(desc, tfm);
450 450
451 desc->tfm = tfm; 451 desc->tfm = tfm;
452 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
453 452
454 ret = crypto_shash_digest(desc, fw->image, image_size, 453 ret = crypto_shash_digest(desc, fw->image, image_size,
455 hash_data); 454 hash_data);
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 06ebea0be118..122d4c0af363 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -219,7 +219,6 @@ michael_mic(u8 *key, u8 *data, unsigned int len, u8 priority, u8 *result)
219 } 219 }
220 220
221 desc->tfm = tfm; 221 desc->tfm = tfm;
222 desc->flags = 0;
223 222
224 ret = crypto_shash_init(desc); 223 ret = crypto_shash_init(desc);
225 if (ret < 0) 224 if (ret < 0)
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 55da8c9dfe50..a084e1501f9d 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -507,7 +507,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
507 int err; 507 int err;
508 508
509 desc->tfm = tfm_michael; 509 desc->tfm = tfm_michael;
510 desc->flags = 0;
511 510
512 if (crypto_shash_setkey(tfm_michael, key, 8)) 511 if (crypto_shash_setkey(tfm_michael, key, 8))
513 return -1; 512 return -1;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 829fa4bd253c..d67bb57994c4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -503,7 +503,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
503 int err; 503 int err;
504 504
505 desc->tfm = tfm_michael; 505 desc->tfm = tfm_michael;
506 desc->flags = 0;
507 506
508 if (crypto_shash_setkey(tfm_michael, key, 8)) 507 if (crypto_shash_setkey(tfm_michael, key, 8))
509 return -1; 508 return -1;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 4e680d753941..ca7d7e8aecc0 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -252,7 +252,6 @@ static int chap_server_compute_md5(
252 } 252 }
253 253
254 desc->tfm = tfm; 254 desc->tfm = tfm;
255 desc->flags = 0;
256 255
257 ret = crypto_shash_init(desc); 256 ret = crypto_shash_init(desc);
258 if (ret < 0) { 257 if (ret < 0) {
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 7416bdbd8576..b7980c856898 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -678,7 +678,6 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
678 } 678 }
679 679
680 shash->tfm = tfm; 680 shash->tfm = tfm;
681 shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
682 681
683 memset(hmac, 0, sizeof(hmac)); 682 memset(hmac, 0, sizeof(hmac));
684 ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac); 683 ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 0dc6f08020ac..b1a696a73f7c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -959,7 +959,6 @@ cifs_alloc_hash(const char *name,
959 } 959 }
960 960
961 (*sdesc)->shash.tfm = *shash; 961 (*sdesc)->shash.tfm = *shash;
962 (*sdesc)->shash.flags = 0x0;
963 return 0; 962 return 0;
964} 963}
965 964
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 322ce9686bdb..2cb4956f8511 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -402,7 +402,6 @@ static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
402 { 402 {
403 SHASH_DESC_ON_STACK(desc, tfm); 403 SHASH_DESC_ON_STACK(desc, tfm);
404 desc->tfm = tfm; 404 desc->tfm = tfm;
405 desc->flags = 0;
406 405
407 return crypto_shash_digest(desc, key, keysize, salt); 406 return crypto_shash_digest(desc, key, keysize, salt);
408 } 407 }
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f664da55234e..491cf5baa8c2 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -68,7 +68,6 @@ static int ecryptfs_hash_digest(struct crypto_shash *tfm,
68 int err; 68 int err;
69 69
70 desc->tfm = tfm; 70 desc->tfm = tfm;
71 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
72 err = crypto_shash_digest(desc, src, len, dst); 71 err = crypto_shash_digest(desc, src, len, dst);
73 shash_desc_zero(desc); 72 shash_desc_zero(desc);
74 return err; 73 return err;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index e74fe84d0886..90fbac5d485b 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -769,7 +769,6 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
769 } 769 }
770 770
771 s->hash_desc->tfm = s->hash_tfm; 771 s->hash_desc->tfm = s->hash_tfm;
772 s->hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
773 772
774 rc = crypto_shash_digest(s->hash_desc, 773 rc = crypto_shash_digest(s->hash_desc,
775 (u8 *)s->auth_tok->token.password.session_key_encryption_key, 774 (u8 *)s->auth_tok->token.password.session_key_encryption_key,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 82ffdacdc7fa..0833b5fc0668 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2024,7 +2024,6 @@ static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
2024 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx)); 2024 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx));
2025 2025
2026 desc.shash.tfm = sbi->s_chksum_driver; 2026 desc.shash.tfm = sbi->s_chksum_driver;
2027 desc.shash.flags = 0;
2028 *(u32 *)desc.ctx = crc; 2027 *(u32 *)desc.ctx = crc;
2029 2028
2030 BUG_ON(crypto_shash_update(&desc.shash, address, length)); 2029 BUG_ON(crypto_shash_update(&desc.shash, address, length));
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 87f75ebd2fd6..21b0ab6bd15a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1422,7 +1422,6 @@ static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1422 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); 1422 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1423 1423
1424 desc.shash.tfm = sbi->s_chksum_driver; 1424 desc.shash.tfm = sbi->s_chksum_driver;
1425 desc.shash.flags = 0;
1426 *(u32 *)desc.ctx = crc; 1425 *(u32 *)desc.ctx = crc;
1427 1426
1428 err = crypto_shash_update(&desc.shash, address, length); 1427 err = crypto_shash_update(&desc.shash, address, length);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 5188f9f70c78..8c8563441208 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -126,7 +126,6 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
126 SHASH_DESC_ON_STACK(desc, tfm); 126 SHASH_DESC_ON_STACK(desc, tfm);
127 127
128 desc->tfm = tfm; 128 desc->tfm = tfm;
129 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
130 129
131 status = crypto_shash_digest(desc, clname->data, clname->len, 130 status = crypto_shash_digest(desc, clname->data, clname->len,
132 cksum.data); 131 cksum.data);
diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
index 5bf5fd08879e..b758004085c4 100644
--- a/fs/ubifs/auth.c
+++ b/fs/ubifs/auth.c
@@ -33,7 +33,6 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node,
33 int err; 33 int err;
34 34
35 shash->tfm = c->hash_tfm; 35 shash->tfm = c->hash_tfm;
36 shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
37 36
38 err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash); 37 err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash);
39 if (err < 0) 38 if (err < 0)
@@ -56,7 +55,6 @@ static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash,
56 int err; 55 int err;
57 56
58 shash->tfm = c->hmac_tfm; 57 shash->tfm = c->hmac_tfm;
59 shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
60 58
61 err = crypto_shash_digest(shash, hash, c->hash_len, hmac); 59 err = crypto_shash_digest(shash, hash, c->hash_len, hmac);
62 if (err < 0) 60 if (err < 0)
@@ -88,7 +86,6 @@ int ubifs_prepare_auth_node(struct ubifs_info *c, void *node,
88 return -ENOMEM; 86 return -ENOMEM;
89 87
90 hash_desc->tfm = c->hash_tfm; 88 hash_desc->tfm = c->hash_tfm;
91 hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
92 ubifs_shash_copy_state(c, inhash, hash_desc); 89 ubifs_shash_copy_state(c, inhash, hash_desc);
93 90
94 err = crypto_shash_final(hash_desc, hash); 91 err = crypto_shash_final(hash_desc, hash);
@@ -123,7 +120,6 @@ static struct shash_desc *ubifs_get_desc(const struct ubifs_info *c,
123 return ERR_PTR(-ENOMEM); 120 return ERR_PTR(-ENOMEM);
124 121
125 desc->tfm = tfm; 122 desc->tfm = tfm;
126 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
127 123
128 err = crypto_shash_init(desc); 124 err = crypto_shash_init(desc);
129 if (err) { 125 if (err) {
@@ -364,7 +360,6 @@ static int ubifs_node_calc_hmac(const struct ubifs_info *c, const void *node,
364 ubifs_assert(c, ofs_hmac + hmac_len < len); 360 ubifs_assert(c, ofs_hmac + hmac_len < len);
365 361
366 shash->tfm = c->hmac_tfm; 362 shash->tfm = c->hmac_tfm;
367 shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
368 363
369 err = crypto_shash_init(shash); 364 err = crypto_shash_init(shash);
370 if (err) 365 if (err)
@@ -483,7 +478,6 @@ int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac)
483 return 0; 478 return 0;
484 479
485 shash->tfm = c->hmac_tfm; 480 shash->tfm = c->hmac_tfm;
486 shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
487 481
488 err = crypto_shash_init(shash); 482 err = crypto_shash_init(shash);
489 if (err) 483 if (err)
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 0a0e65c07c6d..5c8a81a019a4 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -576,7 +576,6 @@ static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_h
576 SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm); 576 SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
577 577
578 hash_desc->tfm = c->hash_tfm; 578 hash_desc->tfm = c->hash_tfm;
579 hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
580 579
581 ubifs_shash_copy_state(c, log_hash, hash_desc); 580 ubifs_shash_copy_state(c, log_hash, hash_desc);
582 return crypto_shash_final(hash_desc, hash); 581 return crypto_shash_final(hash_desc, hash);
@@ -587,7 +586,6 @@ static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac)
587 SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm); 586 SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
588 587
589 hmac_desc->tfm = c->hmac_tfm; 588 hmac_desc->tfm = c->hmac_tfm;
590 hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
591 589
592 return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac); 590 return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac);
593} 591}
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 852eaa9cd4db..0fdb542c70cd 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -28,10 +28,10 @@ struct crypto_aes_ctx {
28 u32 key_length; 28 u32 key_length;
29}; 29};
30 30
31extern const u32 crypto_ft_tab[4][256]; 31extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned;
32extern const u32 crypto_fl_tab[4][256]; 32extern const u32 crypto_fl_tab[4][256] ____cacheline_aligned;
33extern const u32 crypto_it_tab[4][256]; 33extern const u32 crypto_it_tab[4][256] ____cacheline_aligned;
34extern const u32 crypto_il_tab[4][256]; 34extern const u32 crypto_il_tab[4][256] ____cacheline_aligned;
35 35
36int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 36int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
37 unsigned int key_len); 37 unsigned int key_len);
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 2d690494568c..8884046659a0 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -19,14 +19,20 @@
19 * 19 *
20 * @base: Common attributes for async crypto requests 20 * @base: Common attributes for async crypto requests
21 * @src: Source data 21 * @src: Source data
22 * @dst: Destination data 22 * For verify op this is signature + digest, in that case
23 * total size of @src is @src_len + @dst_len.
24 * @dst: Destination data (Should be NULL for verify op)
23 * @src_len: Size of the input buffer 25 * @src_len: Size of the input buffer
24 * @dst_len: Size of the output buffer. It needs to be at least 26 * For verify op it's size of signature part of @src, this part
25 * as big as the expected result depending on the operation 27 * is supposed to be operated by cipher.
28 * @dst_len: Size of @dst buffer (for all ops except verify).
29 * It needs to be at least as big as the expected result
30 * depending on the operation.
26 * After operation it will be updated with the actual size of the 31 * After operation it will be updated with the actual size of the
27 * result. 32 * result.
28 * In case of error where the dst sgl size was insufficient, 33 * In case of error where the dst sgl size was insufficient,
29 * it will be updated to the size required for the operation. 34 * it will be updated to the size required for the operation.
35 * For verify op this is size of digest part in @src.
30 * @__ctx: Start of private context data 36 * @__ctx: Start of private context data
31 */ 37 */
32struct akcipher_request { 38struct akcipher_request {
@@ -55,10 +61,9 @@ struct crypto_akcipher {
55 * algorithm. In case of error, where the dst_len was insufficient, 61 * algorithm. In case of error, where the dst_len was insufficient,
56 * the req->dst_len will be updated to the size required for the 62 * the req->dst_len will be updated to the size required for the
57 * operation 63 * operation
58 * @verify: Function performs a sign operation as defined by public key 64 * @verify: Function performs a complete verify operation as defined by
59 * algorithm. In case of error, where the dst_len was insufficient, 65 * public key algorithm, returning verification status. Requires
60 * the req->dst_len will be updated to the size required for the 66 * digest value as input parameter.
61 * operation
62 * @encrypt: Function performs an encrypt operation as defined by public key 67 * @encrypt: Function performs an encrypt operation as defined by public key
63 * algorithm. In case of error, where the dst_len was insufficient, 68 * algorithm. In case of error, where the dst_len was insufficient,
64 * the req->dst_len will be updated to the size required for the 69 * the req->dst_len will be updated to the size required for the
@@ -69,10 +74,10 @@ struct crypto_akcipher {
69 * operation 74 * operation
70 * @set_pub_key: Function invokes the algorithm specific set public key 75 * @set_pub_key: Function invokes the algorithm specific set public key
71 * function, which knows how to decode and interpret 76 * function, which knows how to decode and interpret
72 * the BER encoded public key 77 * the BER encoded public key and parameters
73 * @set_priv_key: Function invokes the algorithm specific set private key 78 * @set_priv_key: Function invokes the algorithm specific set private key
74 * function, which knows how to decode and interpret 79 * function, which knows how to decode and interpret
75 * the BER encoded private key 80 * the BER encoded private key and parameters
76 * @max_size: Function returns dest buffer size required for a given key. 81 * @max_size: Function returns dest buffer size required for a given key.
77 * @init: Initialize the cryptographic transformation object. 82 * @init: Initialize the cryptographic transformation object.
78 * This function is used to initialize the cryptographic 83 * This function is used to initialize the cryptographic
@@ -238,9 +243,10 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req,
238 * 243 *
239 * @req: public key request 244 * @req: public key request
240 * @src: ptr to input scatter list 245 * @src: ptr to input scatter list
241 * @dst: ptr to output scatter list 246 * @dst: ptr to output scatter list or NULL for verify op
242 * @src_len: size of the src input scatter list to be processed 247 * @src_len: size of the src input scatter list to be processed
243 * @dst_len: size of the dst output scatter list 248 * @dst_len: size of the dst output scatter list or size of signature
249 * portion in @src for verify op
244 */ 250 */
245static inline void akcipher_request_set_crypt(struct akcipher_request *req, 251static inline void akcipher_request_set_crypt(struct akcipher_request *req,
246 struct scatterlist *src, 252 struct scatterlist *src,
@@ -343,14 +349,18 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
343} 349}
344 350
345/** 351/**
346 * crypto_akcipher_verify() - Invoke public key verify operation 352 * crypto_akcipher_verify() - Invoke public key signature verification
347 * 353 *
348 * Function invokes the specific public key verify operation for a given 354 * Function invokes the specific public key signature verification operation
349 * public key algorithm 355 * for a given public key algorithm.
350 * 356 *
351 * @req: asymmetric key request 357 * @req: asymmetric key request
352 * 358 *
353 * Return: zero on success; error code in case of error 359 * Note: req->dst should be NULL, req->src should point to SG of size
360 * (req->src_size + req->dst_size), containing signature (of req->src_size
361 * length) with appended digest (of req->dst_size length).
362 *
363 * Return: zero on verification success; error code in case of error.
354 */ 364 */
355static inline int crypto_akcipher_verify(struct akcipher_request *req) 365static inline int crypto_akcipher_verify(struct akcipher_request *req)
356{ 366{
@@ -369,11 +379,12 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
369 * crypto_akcipher_set_pub_key() - Invoke set public key operation 379 * crypto_akcipher_set_pub_key() - Invoke set public key operation
370 * 380 *
371 * Function invokes the algorithm specific set key function, which knows 381 * Function invokes the algorithm specific set key function, which knows
372 * how to decode and interpret the encoded key 382 * how to decode and interpret the encoded key and parameters
373 * 383 *
374 * @tfm: tfm handle 384 * @tfm: tfm handle
375 * @key: BER encoded public key 385 * @key: BER encoded public key, algo OID, paramlen, BER encoded
376 * @keylen: length of the key 386 * parameters
387 * @keylen: length of the key (not including other data)
377 * 388 *
378 * Return: zero on success; error code in case of error 389 * Return: zero on success; error code in case of error
379 */ 390 */
@@ -390,11 +401,12 @@ static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm,
390 * crypto_akcipher_set_priv_key() - Invoke set private key operation 401 * crypto_akcipher_set_priv_key() - Invoke set private key operation
391 * 402 *
392 * Function invokes the algorithm specific set key function, which knows 403 * Function invokes the algorithm specific set key function, which knows
393 * how to decode and interpret the encoded key 404 * how to decode and interpret the encoded key and parameters
394 * 405 *
395 * @tfm: tfm handle 406 * @tfm: tfm handle
396 * @key: BER encoded private key 407 * @key: BER encoded private key, algo OID, paramlen, BER encoded
397 * @keylen: length of the key 408 * parameters
409 * @keylen: length of the key (not including other data)
398 * 410 *
399 * Return: zero on success; error code in case of error 411 * Return: zero on success; error code in case of error
400 */ 412 */
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 1e64f354c2b8..23169f4d87e6 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -18,27 +18,11 @@
18#include <crypto/hash.h> 18#include <crypto/hash.h>
19#include <crypto/skcipher.h> 19#include <crypto/skcipher.h>
20 20
21struct cryptd_ablkcipher {
22 struct crypto_ablkcipher base;
23};
24
25static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
26 struct crypto_ablkcipher *tfm)
27{
28 return (struct cryptd_ablkcipher *)tfm;
29}
30
31/* alg_name should be algorithm to be cryptd-ed */
32struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
33 u32 type, u32 mask);
34struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
35bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
36void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
37
38struct cryptd_skcipher { 21struct cryptd_skcipher {
39 struct crypto_skcipher base; 22 struct crypto_skcipher base;
40}; 23};
41 24
25/* alg_name should be algorithm to be cryptd-ed */
42struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, 26struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
43 u32 type, u32 mask); 27 u32 type, u32 mask);
44struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm); 28struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm);
diff --git a/include/crypto/des.h b/include/crypto/des.h
index d4094d58ac54..72c7c8e5a5a7 100644
--- a/include/crypto/des.h
+++ b/include/crypto/des.h
@@ -6,6 +6,11 @@
6#ifndef __CRYPTO_DES_H 6#ifndef __CRYPTO_DES_H
7#define __CRYPTO_DES_H 7#define __CRYPTO_DES_H
8 8
9#include <crypto/skcipher.h>
10#include <linux/compiler.h>
11#include <linux/fips.h>
12#include <linux/string.h>
13
9#define DES_KEY_SIZE 8 14#define DES_KEY_SIZE 8
10#define DES_EXPKEY_WORDS 32 15#define DES_EXPKEY_WORDS 32
11#define DES_BLOCK_SIZE 8 16#define DES_BLOCK_SIZE 8
@@ -14,6 +19,44 @@
14#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS) 19#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS)
15#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE 20#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE
16 21
22static inline int __des3_verify_key(u32 *flags, const u8 *key)
23{
24 int err = -EINVAL;
25 u32 K[6];
26
27 memcpy(K, key, DES3_EDE_KEY_SIZE);
28
29 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
30 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
31 (fips_enabled ||
32 (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)))
33 goto bad;
34
35 if (unlikely(!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled)
36 goto bad;
37
38 err = 0;
39
40out:
41 memzero_explicit(K, DES3_EDE_KEY_SIZE);
42
43 return err;
44
45bad:
46 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
47 goto out;
48}
49
50static inline int des3_verify_key(struct crypto_skcipher *tfm, const u8 *key)
51{
52 u32 flags;
53 int err;
54
55 flags = crypto_skcipher_get_flags(tfm);
56 err = __des3_verify_key(&flags, key);
57 crypto_skcipher_set_flags(tfm, flags);
58 return err;
59}
17 60
18extern unsigned long des_ekey(u32 *pe, const u8 *k); 61extern unsigned long des_ekey(u32 *pe, const u8 *k);
19 62
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 3b31c1b349ae..d21bea2c4382 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -146,8 +146,6 @@ struct ahash_alg {
146 146
147struct shash_desc { 147struct shash_desc {
148 struct crypto_shash *tfm; 148 struct crypto_shash *tfm;
149 u32 flags;
150
151 void *__ctx[] CRYPTO_MINALIGN_ATTR; 149 void *__ctx[] CRYPTO_MINALIGN_ATTR;
152}; 150};
153 151
@@ -819,6 +817,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
819 * cipher handle must point to a keyed message digest cipher in order for this 817 * cipher handle must point to a keyed message digest cipher in order for this
820 * function to succeed. 818 * function to succeed.
821 * 819 *
820 * Context: Any context.
822 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 821 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
823 */ 822 */
824int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, 823int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -835,6 +834,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
835 * crypto_shash_update and crypto_shash_final. The parameters have the same 834 * crypto_shash_update and crypto_shash_final. The parameters have the same
836 * meaning as discussed for those separate three functions. 835 * meaning as discussed for those separate three functions.
837 * 836 *
837 * Context: Any context.
838 * Return: 0 if the message digest creation was successful; < 0 if an error 838 * Return: 0 if the message digest creation was successful; < 0 if an error
839 * occurred 839 * occurred
840 */ 840 */
@@ -850,6 +850,7 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
850 * caller-allocated output buffer out which must have sufficient size (e.g. by 850 * caller-allocated output buffer out which must have sufficient size (e.g. by
851 * calling crypto_shash_descsize). 851 * calling crypto_shash_descsize).
852 * 852 *
853 * Context: Any context.
853 * Return: 0 if the export creation was successful; < 0 if an error occurred 854 * Return: 0 if the export creation was successful; < 0 if an error occurred
854 */ 855 */
855static inline int crypto_shash_export(struct shash_desc *desc, void *out) 856static inline int crypto_shash_export(struct shash_desc *desc, void *out)
@@ -866,6 +867,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
866 * the input buffer. That buffer should have been generated with the 867 * the input buffer. That buffer should have been generated with the
867 * crypto_ahash_export function. 868 * crypto_ahash_export function.
868 * 869 *
870 * Context: Any context.
869 * Return: 0 if the import was successful; < 0 if an error occurred 871 * Return: 0 if the import was successful; < 0 if an error occurred
870 */ 872 */
871static inline int crypto_shash_import(struct shash_desc *desc, const void *in) 873static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
@@ -886,6 +888,7 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
886 * operational state handle. Any potentially existing state created by 888 * operational state handle. Any potentially existing state created by
887 * previous operations is discarded. 889 * previous operations is discarded.
888 * 890 *
891 * Context: Any context.
889 * Return: 0 if the message digest initialization was successful; < 0 if an 892 * Return: 0 if the message digest initialization was successful; < 0 if an
890 * error occurred 893 * error occurred
891 */ 894 */
@@ -907,6 +910,7 @@ static inline int crypto_shash_init(struct shash_desc *desc)
907 * 910 *
908 * Updates the message digest state of the operational state handle. 911 * Updates the message digest state of the operational state handle.
909 * 912 *
913 * Context: Any context.
910 * Return: 0 if the message digest update was successful; < 0 if an error 914 * Return: 0 if the message digest update was successful; < 0 if an error
911 * occurred 915 * occurred
912 */ 916 */
@@ -923,6 +927,7 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
923 * into the output buffer. The caller must ensure that the output buffer is 927 * into the output buffer. The caller must ensure that the output buffer is
924 * large enough by using crypto_shash_digestsize. 928 * large enough by using crypto_shash_digestsize.
925 * 929 *
930 * Context: Any context.
926 * Return: 0 if the message digest creation was successful; < 0 if an error 931 * Return: 0 if the message digest creation was successful; < 0 if an error
927 * occurred 932 * occurred
928 */ 933 */
@@ -939,6 +944,7 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out);
939 * crypto_shash_update and crypto_shash_final. The parameters have the same 944 * crypto_shash_update and crypto_shash_final. The parameters have the same
940 * meaning as discussed for those separate functions. 945 * meaning as discussed for those separate functions.
941 * 946 *
947 * Context: Any context.
942 * Return: 0 if the message digest creation was successful; < 0 if an error 948 * Return: 0 if the message digest creation was successful; < 0 if an error
943 * occurred 949 * occurred
944 */ 950 */
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index f18344518e32..d2316242a988 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,11 @@
6#ifndef _CRYPTO_INTERNAL_SIMD_H 6#ifndef _CRYPTO_INTERNAL_SIMD_H
7#define _CRYPTO_INTERNAL_SIMD_H 7#define _CRYPTO_INTERNAL_SIMD_H
8 8
9#include <linux/percpu.h>
10#include <linux/types.h>
11
12/* skcipher support */
13
9struct simd_skcipher_alg; 14struct simd_skcipher_alg;
10struct skcipher_alg; 15struct skcipher_alg;
11 16
@@ -22,4 +27,43 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
22void simd_unregister_skciphers(struct skcipher_alg *algs, int count, 27void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
23 struct simd_skcipher_alg **simd_algs); 28 struct simd_skcipher_alg **simd_algs);
24 29
30/* AEAD support */
31
32struct simd_aead_alg;
33struct aead_alg;
34
35struct simd_aead_alg *simd_aead_create_compat(const char *algname,
36 const char *drvname,
37 const char *basename);
38struct simd_aead_alg *simd_aead_create(const char *algname,
39 const char *basename);
40void simd_aead_free(struct simd_aead_alg *alg);
41
42int simd_register_aeads_compat(struct aead_alg *algs, int count,
43 struct simd_aead_alg **simd_algs);
44
45void simd_unregister_aeads(struct aead_alg *algs, int count,
46 struct simd_aead_alg **simd_algs);
47
48/*
49 * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or
50 * access the SIMD register file?
51 *
52 * This delegates to may_use_simd(), except that this also returns false if SIMD
53 * in crypto code has been temporarily disabled on this CPU by the crypto
54 * self-tests, in order to test the no-SIMD fallback code. This override is
55 * currently limited to configurations where the extra self-tests are enabled,
56 * because it might be a bit too invasive to be part of the regular self-tests.
57 *
58 * This is a macro so that <asm/simd.h>, which some architectures don't have,
59 * doesn't have to be included directly here.
60 */
61#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
62DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
63#define crypto_simd_usable() \
64 (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
65#else
66#define crypto_simd_usable() may_use_simd()
67#endif
68
25#endif /* _CRYPTO_INTERNAL_SIMD_H */ 69#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index ad2aa743dd99..5cefddb1991f 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -47,16 +47,7 @@ int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
47int crypto_morus1280_glue_encrypt(struct aead_request *req); 47int crypto_morus1280_glue_encrypt(struct aead_request *req);
48int crypto_morus1280_glue_decrypt(struct aead_request *req); 48int crypto_morus1280_glue_decrypt(struct aead_request *req);
49 49
50int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, 50#define MORUS1280_DECLARE_ALG(id, driver_name, priority) \
51 unsigned int keylen);
52int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
53 unsigned int authsize);
54int cryptd_morus1280_glue_encrypt(struct aead_request *req);
55int cryptd_morus1280_glue_decrypt(struct aead_request *req);
56int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead);
57void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
58
59#define MORUS1280_DECLARE_ALGS(id, driver_name, priority) \
60 static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\ 51 static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\
61 .init = crypto_morus1280_##id##_init, \ 52 .init = crypto_morus1280_##id##_init, \
62 .ad = crypto_morus1280_##id##_ad, \ 53 .ad = crypto_morus1280_##id##_ad, \
@@ -77,55 +68,29 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
77 { \ 68 { \
78 } \ 69 } \
79 \ 70 \
80 static struct aead_alg crypto_morus1280_##id##_algs[] = {\ 71 static struct aead_alg crypto_morus1280_##id##_alg = { \
81 { \ 72 .setkey = crypto_morus1280_glue_setkey, \
82 .setkey = crypto_morus1280_glue_setkey, \ 73 .setauthsize = crypto_morus1280_glue_setauthsize, \
83 .setauthsize = crypto_morus1280_glue_setauthsize, \ 74 .encrypt = crypto_morus1280_glue_encrypt, \
84 .encrypt = crypto_morus1280_glue_encrypt, \ 75 .decrypt = crypto_morus1280_glue_decrypt, \
85 .decrypt = crypto_morus1280_glue_decrypt, \ 76 .init = crypto_morus1280_##id##_init_tfm, \
86 .init = crypto_morus1280_##id##_init_tfm, \ 77 .exit = crypto_morus1280_##id##_exit_tfm, \
87 .exit = crypto_morus1280_##id##_exit_tfm, \ 78 \
88 \ 79 .ivsize = MORUS_NONCE_SIZE, \
89 .ivsize = MORUS_NONCE_SIZE, \ 80 .maxauthsize = MORUS_MAX_AUTH_SIZE, \
90 .maxauthsize = MORUS_MAX_AUTH_SIZE, \ 81 .chunksize = MORUS1280_BLOCK_SIZE, \
91 .chunksize = MORUS1280_BLOCK_SIZE, \ 82 \
92 \ 83 .base = { \
93 .base = { \ 84 .cra_flags = CRYPTO_ALG_INTERNAL, \
94 .cra_flags = CRYPTO_ALG_INTERNAL, \ 85 .cra_blocksize = 1, \
95 .cra_blocksize = 1, \ 86 .cra_ctxsize = sizeof(struct morus1280_ctx), \
96 .cra_ctxsize = sizeof(struct morus1280_ctx), \ 87 .cra_alignmask = 0, \
97 .cra_alignmask = 0, \ 88 .cra_priority = priority, \
98 \
99 .cra_name = "__morus1280", \
100 .cra_driver_name = "__"driver_name, \
101 \
102 .cra_module = THIS_MODULE, \
103 } \
104 }, { \
105 .setkey = cryptd_morus1280_glue_setkey, \
106 .setauthsize = cryptd_morus1280_glue_setauthsize, \
107 .encrypt = cryptd_morus1280_glue_encrypt, \
108 .decrypt = cryptd_morus1280_glue_decrypt, \
109 .init = cryptd_morus1280_glue_init_tfm, \
110 .exit = cryptd_morus1280_glue_exit_tfm, \
111 \ 89 \
112 .ivsize = MORUS_NONCE_SIZE, \ 90 .cra_name = "__morus1280", \
113 .maxauthsize = MORUS_MAX_AUTH_SIZE, \ 91 .cra_driver_name = "__"driver_name, \
114 .chunksize = MORUS1280_BLOCK_SIZE, \
115 \ 92 \
116 .base = { \ 93 .cra_module = THIS_MODULE, \
117 .cra_flags = CRYPTO_ALG_ASYNC, \
118 .cra_blocksize = 1, \
119 .cra_ctxsize = sizeof(struct crypto_aead *), \
120 .cra_alignmask = 0, \
121 \
122 .cra_priority = priority, \
123 \
124 .cra_name = "morus1280", \
125 .cra_driver_name = driver_name, \
126 \
127 .cra_module = THIS_MODULE, \
128 } \
129 } \ 94 } \
130 } 95 }
131 96
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index df8e1103ff94..0ee6266cb26c 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -47,16 +47,7 @@ int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
47int crypto_morus640_glue_encrypt(struct aead_request *req); 47int crypto_morus640_glue_encrypt(struct aead_request *req);
48int crypto_morus640_glue_decrypt(struct aead_request *req); 48int crypto_morus640_glue_decrypt(struct aead_request *req);
49 49
50int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key, 50#define MORUS640_DECLARE_ALG(id, driver_name, priority) \
51 unsigned int keylen);
52int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
53 unsigned int authsize);
54int cryptd_morus640_glue_encrypt(struct aead_request *req);
55int cryptd_morus640_glue_decrypt(struct aead_request *req);
56int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead);
57void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
58
59#define MORUS640_DECLARE_ALGS(id, driver_name, priority) \
60 static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\ 51 static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\
61 .init = crypto_morus640_##id##_init, \ 52 .init = crypto_morus640_##id##_init, \
62 .ad = crypto_morus640_##id##_ad, \ 53 .ad = crypto_morus640_##id##_ad, \
@@ -77,55 +68,29 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
77 { \ 68 { \
78 } \ 69 } \
79 \ 70 \
80 static struct aead_alg crypto_morus640_##id##_algs[] = {\ 71 static struct aead_alg crypto_morus640_##id##_alg = {\
81 { \ 72 .setkey = crypto_morus640_glue_setkey, \
82 .setkey = crypto_morus640_glue_setkey, \ 73 .setauthsize = crypto_morus640_glue_setauthsize, \
83 .setauthsize = crypto_morus640_glue_setauthsize, \ 74 .encrypt = crypto_morus640_glue_encrypt, \
84 .encrypt = crypto_morus640_glue_encrypt, \ 75 .decrypt = crypto_morus640_glue_decrypt, \
85 .decrypt = crypto_morus640_glue_decrypt, \ 76 .init = crypto_morus640_##id##_init_tfm, \
86 .init = crypto_morus640_##id##_init_tfm, \ 77 .exit = crypto_morus640_##id##_exit_tfm, \
87 .exit = crypto_morus640_##id##_exit_tfm, \ 78 \
88 \ 79 .ivsize = MORUS_NONCE_SIZE, \
89 .ivsize = MORUS_NONCE_SIZE, \ 80 .maxauthsize = MORUS_MAX_AUTH_SIZE, \
90 .maxauthsize = MORUS_MAX_AUTH_SIZE, \ 81 .chunksize = MORUS640_BLOCK_SIZE, \
91 .chunksize = MORUS640_BLOCK_SIZE, \ 82 \
92 \ 83 .base = { \
93 .base = { \ 84 .cra_flags = CRYPTO_ALG_INTERNAL, \
94 .cra_flags = CRYPTO_ALG_INTERNAL, \ 85 .cra_blocksize = 1, \
95 .cra_blocksize = 1, \ 86 .cra_ctxsize = sizeof(struct morus640_ctx), \
96 .cra_ctxsize = sizeof(struct morus640_ctx), \ 87 .cra_alignmask = 0, \
97 .cra_alignmask = 0, \ 88 .cra_priority = priority, \
98 \
99 .cra_name = "__morus640", \
100 .cra_driver_name = "__"driver_name, \
101 \
102 .cra_module = THIS_MODULE, \
103 } \
104 }, { \
105 .setkey = cryptd_morus640_glue_setkey, \
106 .setauthsize = cryptd_morus640_glue_setauthsize, \
107 .encrypt = cryptd_morus640_glue_encrypt, \
108 .decrypt = cryptd_morus640_glue_decrypt, \
109 .init = cryptd_morus640_glue_init_tfm, \
110 .exit = cryptd_morus640_glue_exit_tfm, \
111 \ 89 \
112 .ivsize = MORUS_NONCE_SIZE, \ 90 .cra_name = "__morus640", \
113 .maxauthsize = MORUS_MAX_AUTH_SIZE, \ 91 .cra_driver_name = "__"driver_name, \
114 .chunksize = MORUS640_BLOCK_SIZE, \
115 \ 92 \
116 .base = { \ 93 .cra_module = THIS_MODULE, \
117 .cra_flags = CRYPTO_ALG_ASYNC, \
118 .cra_blocksize = 1, \
119 .cra_ctxsize = sizeof(struct crypto_aead *), \
120 .cra_alignmask = 0, \
121 \
122 .cra_priority = priority, \
123 \
124 .cra_name = "morus640", \
125 .cra_driver_name = driver_name, \
126 \
127 .cra_module = THIS_MODULE, \
128 } \
129 } \ 94 } \
130 } 95 }
131 96
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index be626eac9113..712fe1214b5f 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
15#define _LINUX_PUBLIC_KEY_H 15#define _LINUX_PUBLIC_KEY_H
16 16
17#include <linux/keyctl.h> 17#include <linux/keyctl.h>
18#include <linux/oid_registry.h>
18 19
19/* 20/*
20 * Cryptographic data for the public-key subtype of the asymmetric key type. 21 * Cryptographic data for the public-key subtype of the asymmetric key type.
@@ -25,6 +26,9 @@
25struct public_key { 26struct public_key {
26 void *key; 27 void *key;
27 u32 keylen; 28 u32 keylen;
29 enum OID algo;
30 void *params;
31 u32 paramlen;
28 bool key_is_private; 32 bool key_is_private;
29 const char *id_type; 33 const char *id_type;
30 const char *pkey_algo; 34 const char *pkey_algo;
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index 856e32af8657..cae1b4a01971 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -23,7 +23,10 @@ struct streebog_uint512 {
23}; 23};
24 24
25struct streebog_state { 25struct streebog_state {
26 u8 buffer[STREEBOG_BLOCK_SIZE]; 26 union {
27 u8 buffer[STREEBOG_BLOCK_SIZE];
28 struct streebog_uint512 m;
29 };
27 struct streebog_uint512 hash; 30 struct streebog_uint512 hash;
28 struct streebog_uint512 h; 31 struct streebog_uint512 h;
29 struct streebog_uint512 N; 32 struct streebog_uint512 N;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0f919d5fe84f..c2ffff5f9ae2 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1606,7 +1606,6 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
1606 JBD_MAX_CHECKSUM_SIZE); 1606 JBD_MAX_CHECKSUM_SIZE);
1607 1607
1608 desc.shash.tfm = journal->j_chksum_driver; 1608 desc.shash.tfm = journal->j_chksum_driver;
1609 desc.shash.flags = 0;
1610 *(u32 *)desc.ctx = crc; 1609 *(u32 *)desc.ctx = crc;
1611 1610
1612 err = crypto_shash_update(&desc.shash, address, length); 1611 err = crypto_shash_update(&desc.shash, address, length);
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index d2fa9ca42e9a..7f30446348c4 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -93,6 +93,24 @@ enum OID {
93 OID_authorityKeyIdentifier, /* 2.5.29.35 */ 93 OID_authorityKeyIdentifier, /* 2.5.29.35 */
94 OID_extKeyUsage, /* 2.5.29.37 */ 94 OID_extKeyUsage, /* 2.5.29.37 */
95 95
96 /* EC-RDSA */
97 OID_gostCPSignA, /* 1.2.643.2.2.35.1 */
98 OID_gostCPSignB, /* 1.2.643.2.2.35.2 */
99 OID_gostCPSignC, /* 1.2.643.2.2.35.3 */
100 OID_gost2012PKey256, /* 1.2.643.7.1.1.1.1 */
101 OID_gost2012PKey512, /* 1.2.643.7.1.1.1.2 */
102 OID_gost2012Digest256, /* 1.2.643.7.1.1.2.2 */
103 OID_gost2012Digest512, /* 1.2.643.7.1.1.2.3 */
104 OID_gost2012Signature256, /* 1.2.643.7.1.1.3.2 */
105 OID_gost2012Signature512, /* 1.2.643.7.1.1.3.3 */
106 OID_gostTC26Sign256A, /* 1.2.643.7.1.2.1.1.1 */
107 OID_gostTC26Sign256B, /* 1.2.643.7.1.2.1.1.2 */
108 OID_gostTC26Sign256C, /* 1.2.643.7.1.2.1.1.3 */
109 OID_gostTC26Sign256D, /* 1.2.643.7.1.2.1.1.4 */
110 OID_gostTC26Sign512A, /* 1.2.643.7.1.2.1.2.1 */
111 OID_gostTC26Sign512B, /* 1.2.643.7.1.2.1.2.2 */
112 OID_gostTC26Sign512C, /* 1.2.643.7.1.2.1.2.3 */
113
96 OID__NR 114 OID__NR
97}; 115};
98 116
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 827c601841c4..6f89fc8d4b8e 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -5,8 +5,7 @@
5 * 5 *
6 * Author: Brijesh Singh <brijesh.singh@amd.com> 6 * Author: Brijesh Singh <brijesh.singh@amd.com>
7 * 7 *
8 * SEV spec 0.14 is available at: 8 * SEV API spec is available at https://developer.amd.com/sev
9 * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf
10 * 9 *
11 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index ac8c60bcc83b..43521d500c2b 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -6,8 +6,7 @@
6 * 6 *
7 * Author: Brijesh Singh <brijesh.singh@amd.com> 7 * Author: Brijesh Singh <brijesh.singh@amd.com>
8 * 8 *
9 * SEV spec 0.14 is available at: 9 * SEV API specification is available at: https://developer.amd.com/sev/
10 * http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
11 * 10 *
12 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
@@ -30,7 +29,8 @@ enum {
30 SEV_PDH_GEN, 29 SEV_PDH_GEN,
31 SEV_PDH_CERT_EXPORT, 30 SEV_PDH_CERT_EXPORT,
32 SEV_PEK_CERT_IMPORT, 31 SEV_PEK_CERT_IMPORT,
33 SEV_GET_ID, 32 SEV_GET_ID, /* This command is deprecated, use SEV_GET_ID2 */
33 SEV_GET_ID2,
34 34
35 SEV_MAX, 35 SEV_MAX,
36}; 36};
@@ -125,7 +125,7 @@ struct sev_user_data_pdh_cert_export {
125} __packed; 125} __packed;
126 126
127/** 127/**
128 * struct sev_user_data_get_id - GET_ID command parameters 128 * struct sev_user_data_get_id - GET_ID command parameters (deprecated)
129 * 129 *
130 * @socket1: Buffer to pass unique ID of first socket 130 * @socket1: Buffer to pass unique ID of first socket
131 * @socket2: Buffer to pass unique ID of second socket 131 * @socket2: Buffer to pass unique ID of second socket
@@ -136,6 +136,16 @@ struct sev_user_data_get_id {
136} __packed; 136} __packed;
137 137
138/** 138/**
139 * struct sev_user_data_get_id2 - GET_ID command parameters
140 * @address: Buffer to store unique ID
141 * @length: length of the unique ID
142 */
143struct sev_user_data_get_id2 {
144 __u64 address; /* In */
145 __u32 length; /* In/Out */
146} __packed;
147
148/**
139 * struct sev_issue_cmd - SEV ioctl parameters 149 * struct sev_issue_cmd - SEV ioctl parameters
140 * 150 *
141 * @cmd: SEV commands to execute 151 * @cmd: SEV commands to execute
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index f1d0e00a3971..f7fb8f6a688f 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -688,7 +688,6 @@ static int kexec_calculate_store_digests(struct kimage *image)
688 goto out_free_desc; 688 goto out_free_desc;
689 689
690 desc->tfm = tfm; 690 desc->tfm = tfm;
691 desc->flags = 0;
692 691
693 ret = crypto_shash_init(desc); 692 ret = crypto_shash_init(desc);
694 if (ret < 0) 693 if (ret < 0)
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 4d0d47c1ffbd..e89ebfdbb0fc 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -69,7 +69,6 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
69 69
70 rcu_read_lock(); 70 rcu_read_lock();
71 desc.shash.tfm = rcu_dereference(crct10dif_tfm); 71 desc.shash.tfm = rcu_dereference(crct10dif_tfm);
72 desc.shash.flags = 0;
73 *(__u16 *)desc.ctx = crc; 72 *(__u16 *)desc.ctx = crc;
74 73
75 err = crypto_shash_update(&desc.shash, buffer, len); 74 err = crypto_shash_update(&desc.shash, buffer, len);
diff --git a/lib/digsig.c b/lib/digsig.c
index 6ba6fcd92dd1..3b0a579bdcdf 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -240,7 +240,6 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
240 goto err; 240 goto err;
241 241
242 desc->tfm = shash; 242 desc->tfm = shash;
243 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
244 243
245 crypto_shash_init(desc); 244 crypto_shash_init(desc);
246 crypto_shash_update(desc, data, datalen); 245 crypto_shash_update(desc, data, datalen);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index f0a2934605bf..4e9829c4d64c 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -47,7 +47,6 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
47 int err; 47 int err;
48 48
49 shash->tfm = tfm; 49 shash->tfm = tfm;
50 shash->flags = 0;
51 *ctx = crc; 50 *ctx = crc;
52 51
53 err = crypto_shash_update(shash, address, length); 52 err = crypto_shash_update(shash, address, length);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index 78bec8df8525..aaa39409eeb7 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -161,7 +161,6 @@ static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
161 } 161 }
162 162
163 shash->tfm = tfm; 163 shash->tfm = tfm;
164 shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
165 164
166 ret = crypto_shash_digest(shash, plaintext, psize, output); 165 ret = crypto_shash_digest(shash, plaintext, psize, output);
167 166
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 621146d04c03..e68c715f8d37 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -183,7 +183,6 @@ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m,
183 } 183 }
184 184
185 desc->tfm = tfm; 185 desc->tfm = tfm;
186 desc->flags = 0;
187 186
188 /* Swap key and message from LSB to MSB */ 187 /* Swap key and message from LSB to MSB */
189 swap_buf(k, tmp, 16); 188 swap_buf(k, tmp, 16);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 39d72e58b8e5..31569f4809f6 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -760,7 +760,6 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
760 SHASH_DESC_ON_STACK(desc, tfm); 760 SHASH_DESC_ON_STACK(desc, tfm);
761 761
762 desc->tfm = tfm; 762 desc->tfm = tfm;
763 desc->flags = 0;
764 crypto_shash_digest(desc, (u8 *)auth, 763 crypto_shash_digest(desc, (u8 *)auth,
765 end - (unsigned char *)auth, digest); 764 end - (unsigned char *)auth, digest);
766 shash_desc_zero(desc); 765 shash_desc_zero(desc);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index d05c57664e36..72e74503f9fc 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1684,7 +1684,6 @@ static struct sctp_cookie_param *sctp_pack_cookie(
1684 1684
1685 /* Sign the message. */ 1685 /* Sign the message. */
1686 desc->tfm = sctp_sk(ep->base.sk)->hmac; 1686 desc->tfm = sctp_sk(ep->base.sk)->hmac;
1687 desc->flags = 0;
1688 1687
1689 err = crypto_shash_setkey(desc->tfm, ep->secret_key, 1688 err = crypto_shash_setkey(desc->tfm, ep->secret_key,
1690 sizeof(ep->secret_key)) ?: 1689 sizeof(ep->secret_key)) ?:
@@ -1755,7 +1754,6 @@ struct sctp_association *sctp_unpack_cookie(
1755 int err; 1754 int err;
1756 1755
1757 desc->tfm = sctp_sk(ep->base.sk)->hmac; 1756 desc->tfm = sctp_sk(ep->base.sk)->hmac;
1758 desc->flags = 0;
1759 1757
1760 err = crypto_shash_setkey(desc->tfm, ep->secret_key, 1758 err = crypto_shash_setkey(desc->tfm, ep->secret_key,
1761 sizeof(ep->secret_key)) ?: 1759 sizeof(ep->secret_key)) ?:
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 4f43383971ba..6f2d30d7b766 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -977,7 +977,6 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
977 } 977 }
978 978
979 desc->tfm = hmac; 979 desc->tfm = hmac;
980 desc->flags = 0;
981 980
982 /* Compute intermediate Kseq from session key */ 981 /* Compute intermediate Kseq from session key */
983 err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); 982 err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
@@ -1045,7 +1044,6 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
1045 } 1044 }
1046 1045
1047 desc->tfm = hmac; 1046 desc->tfm = hmac;
1048 desc->flags = 0;
1049 1047
1050 /* Compute intermediate Kcrypt from session key */ 1048 /* Compute intermediate Kcrypt from session key */
1051 for (i = 0; i < kctx->gk5e->keylength; i++) 1049 for (i = 0; i < kctx->gk5e->keylength; i++)
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 56cc85c5bc06..6e5d6d240215 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -438,7 +438,6 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
438 } 438 }
439 439
440 desc->tfm = hmac; 440 desc->tfm = hmac;
441 desc->flags = 0;
442 441
443 err = crypto_shash_digest(desc, sigkeyconstant, slen, ctx->cksum); 442 err = crypto_shash_digest(desc, sigkeyconstant, slen, ctx->cksum);
444 kzfree(desc); 443 kzfree(desc);
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 35f06563207d..11eaa5956f00 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -501,7 +501,6 @@ static int michael_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *hdr,
501 } 501 }
502 502
503 desc->tfm = tfm_michael; 503 desc->tfm = tfm_michael;
504 desc->flags = 0;
505 504
506 if (crypto_shash_setkey(tfm_michael, key, 8)) 505 if (crypto_shash_setkey(tfm_michael, key, 8))
507 return -1; 506 return -1;
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index af03d98c7552..baba63bc66b1 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -43,7 +43,6 @@ char *aa_calc_hash(void *data, size_t len)
43 goto fail; 43 goto fail;
44 44
45 desc->tfm = apparmor_tfm; 45 desc->tfm = apparmor_tfm;
46 desc->flags = 0;
47 46
48 error = crypto_shash_init(desc); 47 error = crypto_shash_init(desc);
49 if (error) 48 if (error)
@@ -81,7 +80,6 @@ int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
81 goto fail; 80 goto fail;
82 81
83 desc->tfm = apparmor_tfm; 82 desc->tfm = apparmor_tfm;
84 desc->flags = 0;
85 83
86 error = crypto_shash_init(desc); 84 error = crypto_shash_init(desc);
87 if (error) 85 if (error)
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index d775e03fbbcc..99080871eb9f 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -104,9 +104,16 @@ int asymmetric_verify(struct key *keyring, const char *sig,
104 104
105 memset(&pks, 0, sizeof(pks)); 105 memset(&pks, 0, sizeof(pks));
106 106
107 pks.pkey_algo = "rsa";
108 pks.hash_algo = hash_algo_name[hdr->hash_algo]; 107 pks.hash_algo = hash_algo_name[hdr->hash_algo];
109 pks.encoding = "pkcs1"; 108 if (hdr->hash_algo == HASH_ALGO_STREEBOG_256 ||
109 hdr->hash_algo == HASH_ALGO_STREEBOG_512) {
110 /* EC-RDSA and Streebog should go together. */
111 pks.pkey_algo = "ecrdsa";
112 pks.encoding = "raw";
113 } else {
114 pks.pkey_algo = "rsa";
115 pks.encoding = "pkcs1";
116 }
110 pks.digest = (u8 *)data; 117 pks.digest = (u8 *)data;
111 pks.digest_size = datalen; 118 pks.digest_size = datalen;
112 pks.s = hdr->sig; 119 pks.s = hdr->sig;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index c37d08118af5..e11564eb645b 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -124,7 +124,6 @@ out:
124 return ERR_PTR(-ENOMEM); 124 return ERR_PTR(-ENOMEM);
125 125
126 desc->tfm = *tfm; 126 desc->tfm = *tfm;
127 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
128 127
129 rc = crypto_shash_init(desc); 128 rc = crypto_shash_init(desc);
130 if (rc) { 129 if (rc) {
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 16a4f45863b1..a32878e10ebc 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -333,7 +333,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
333 SHASH_DESC_ON_STACK(shash, tfm); 333 SHASH_DESC_ON_STACK(shash, tfm);
334 334
335 shash->tfm = tfm; 335 shash->tfm = tfm;
336 shash->flags = 0;
337 336
338 hash->length = crypto_shash_digestsize(tfm); 337 hash->length = crypto_shash_digestsize(tfm);
339 338
@@ -469,7 +468,6 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
469 int rc, i; 468 int rc, i;
470 469
471 shash->tfm = tfm; 470 shash->tfm = tfm;
472 shash->flags = 0;
473 471
474 hash->length = crypto_shash_digestsize(tfm); 472 hash->length = crypto_shash_digestsize(tfm);
475 473
@@ -591,7 +589,6 @@ static int calc_buffer_shash_tfm(const void *buf, loff_t size,
591 int rc; 589 int rc;
592 590
593 shash->tfm = tfm; 591 shash->tfm = tfm;
594 shash->flags = 0;
595 592
596 hash->length = crypto_shash_digestsize(tfm); 593 hash->length = crypto_shash_digestsize(tfm);
597 594
@@ -664,7 +661,6 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest,
664 SHASH_DESC_ON_STACK(shash, tfm); 661 SHASH_DESC_ON_STACK(shash, tfm);
665 662
666 shash->tfm = tfm; 663 shash->tfm = tfm;
667 shash->flags = 0;
668 664
669 rc = crypto_shash_init(shash); 665 rc = crypto_shash_init(shash);
670 if (rc != 0) 666 if (rc != 0)
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 711e89d8c415..23f95dec771b 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -112,7 +112,6 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
112 if (!sdesc) 112 if (!sdesc)
113 goto out_free_tfm; 113 goto out_free_tfm;
114 sdesc->shash.tfm = tfm; 114 sdesc->shash.tfm = tfm;
115 sdesc->shash.flags = 0x0;
116 115
117 *sdesc_ret = sdesc; 116 *sdesc_ret = sdesc;
118 117
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 347108f660a1..1b1456b21a93 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -333,7 +333,6 @@ static int calc_hash(struct crypto_shash *tfm, u8 *digest,
333 int err; 333 int err;
334 334
335 desc->tfm = tfm; 335 desc->tfm = tfm;
336 desc->flags = 0;
337 336
338 err = crypto_shash_digest(desc, buf, buflen, digest); 337 err = crypto_shash_digest(desc, buf, buflen, digest);
339 shash_desc_zero(desc); 338 shash_desc_zero(desc);
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index efdbf17f3915..a75b2f0f1230 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -55,7 +55,6 @@ static struct sdesc *init_sdesc(struct crypto_shash *alg)
55 if (!sdesc) 55 if (!sdesc)
56 return ERR_PTR(-ENOMEM); 56 return ERR_PTR(-ENOMEM);
57 sdesc->shash.tfm = alg; 57 sdesc->shash.tfm = alg;
58 sdesc->shash.flags = 0x0;
59 return sdesc; 58 return sdesc;
60} 59}
61 60