aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 16:40:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-26 16:40:17 -0400
commitbbce2ad2d711c12d93145a7bbdf086e73f414bcd (patch)
tree35432a39f68f4c5df44ed38037cbf05adadb923e
parent0f776dc377f6c87f4e4d4a5f63602f33fb93b31e (diff)
parent0f95e2ffc58f5d32a90eb1051d17aeebc21cf91d (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "Here is the crypto update for 4.8: API: - first part of skcipher low-level conversions - add KPP (Key-agreement Protocol Primitives) interface. Algorithms: - fix IPsec/cryptd reordering issues that affects aesni - RSA no longer does explicit leading zero removal - add SHA3 - add DH - add ECDH - improve DRBG performance by not doing CTR by hand Drivers: - add x86 AVX2 multibuffer SHA256/512 - add POWER8 optimised crc32c - add xts support to vmx - add DH support to qat - add RSA support to caam - add Layerscape support to caam - add SEC1 AEAD support to talitos - improve performance by chaining requests in marvell/cesa - add support for Araneus Alea I USB RNG - add support for Broadcom BCM5301 RNG - add support for Amlogic Meson RNG - add support Broadcom NSP SoC RNG" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (180 commits) crypto: vmx - Fix aes_p8_xts_decrypt build failure crypto: vmx - Ignore generated files crypto: vmx - Adding support for XTS crypto: vmx - Adding asm subroutines for XTS crypto: skcipher - add comment for skcipher_alg->base crypto: testmgr - Print akcipher algorithm name crypto: marvell - Fix wrong flag used for GFP in mv_cesa_dma_add_iv_op crypto: nx - off by one bug in nx_of_update_msc() crypto: rsa-pkcs1pad - fix rsa-pkcs1pad request struct crypto: scatterwalk - Inline start/map/done crypto: scatterwalk - Remove unnecessary BUG in scatterwalk_start crypto: scatterwalk - Remove unnecessary advance in scatterwalk_pagedone crypto: scatterwalk - Fix test in scatterwalk_done crypto: api - Optimise away crypto_yield when hard preemption is on crypto: scatterwalk - add no-copy support to copychunks crypto: scatterwalk - Remove scatterwalk_bytes_sglen crypto: omap - Stop using crypto scatterwalk_bytes_sglen crypto: skcipher - Remove top-level givcipher interface crypto: user - Remove crypto_lookup_skcipher call crypto: cts - Convert to skcipher ...
-rw-r--r--Documentation/DocBook/crypto-API.tmpl4
-rw-r--r--Documentation/crypto/asymmetric-keys.txt2
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,bcm2835.txt8
-rw-r--r--MAINTAINERS5
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi5
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c40
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi43
-rw-r--r--arch/arm64/include/asm/io.h4
-rw-r--r--arch/powerpc/crypto/Makefile2
-rw-r--r--arch/powerpc/crypto/aes-spe-regs.h2
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_asm.S1553
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c167
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h12
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h3
-rw-r--r--arch/powerpc/kernel/iomap.c24
-rw-r--r--arch/s390/crypto/aes_s390.c113
-rw-r--r--arch/x86/crypto/Makefile4
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c94
-rw-r--r--arch/x86/crypto/chacha20_glue.c2
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c40
-rw-r--r--arch/x86/crypto/sha1-mb/Makefile (renamed from arch/x86/crypto/sha-mb/Makefile)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c (renamed from arch/x86/crypto/sha-mb/sha1_mb.c)288
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_ctx.h (renamed from arch/x86/crypto/sha-mb/sha_mb_ctx.h)2
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr.h (renamed from arch/x86/crypto/sha-mb/sha_mb_mgr.h)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S (renamed from arch/x86/crypto/sha-mb/sha1_mb_mgr_datastruct.S)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S (renamed from arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c (renamed from arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c)2
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S (renamed from arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S)0
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_x8_avx2.S (renamed from arch/x86/crypto/sha-mb/sha1_x8_avx2.S)0
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c6
-rw-r--r--arch/x86/crypto/sha256-mb/Makefile11
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c1030
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_ctx.h136
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr.h108
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S304
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S304
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c65
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S215
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_x8_avx2.S593
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c10
-rw-r--r--arch/x86/crypto/sha512-mb/Makefile11
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c1046
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_ctx.h130
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr.h104
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S281
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S291
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c67
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S222
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_x4_avx2.S529
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c6
-rw-r--r--crypto/Kconfig77
-rw-r--r--crypto/Makefile12
-rw-r--r--crypto/ablk_helper.c6
-rw-r--r--crypto/ablkcipher.c223
-rw-r--r--crypto/aead.c16
-rw-r--r--crypto/ahash.c6
-rw-r--r--crypto/algapi.c24
-rw-r--r--crypto/authenc.c116
-rw-r--r--crypto/authencesn.c106
-rw-r--r--crypto/blkcipher.c185
-rw-r--r--crypto/ccm.c72
-rw-r--r--crypto/chacha20poly1305.c89
-rw-r--r--crypto/chainiv.c317
-rw-r--r--crypto/cryptd.c132
-rw-r--r--crypto/crypto_null.c11
-rw-r--r--crypto/crypto_user.c57
-rw-r--r--crypto/ctr.c183
-rw-r--r--crypto/cts.c495
-rw-r--r--crypto/dh.c189
-rw-r--r--crypto/dh_helper.c95
-rw-r--r--crypto/drbg.c269
-rw-r--r--crypto/ecc.c1018
-rw-r--r--crypto/ecc.h83
-rw-r--r--crypto/ecc_curve_defs.h57
-rw-r--r--crypto/ecdh.c151
-rw-r--r--crypto/ecdh_helper.c86
-rw-r--r--crypto/echainiv.c16
-rw-r--r--crypto/eseqiv.c242
-rw-r--r--crypto/gcm.c115
-rw-r--r--crypto/jitterentropy-kcapi.c22
-rw-r--r--crypto/kpp.c123
-rw-r--r--crypto/mcryptd.c132
-rw-r--r--crypto/rsa-pkcs1pad.c325
-rw-r--r--crypto/rsa.c113
-rw-r--r--crypto/rsa_helper.c172
-rw-r--r--crypto/rsaprivkey.asn110
-rw-r--r--crypto/scatterwalk.c81
-rw-r--r--crypto/seqiv.c176
-rw-r--r--crypto/sha3_generic.c300
-rw-r--r--crypto/skcipher.c196
-rw-r--r--crypto/tcrypt.c442
-rw-r--r--crypto/testmgr.c288
-rw-r--r--crypto/testmgr.h1036
-rw-r--r--drivers/char/hw_random/Kconfig16
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c47
-rw-r--r--drivers/char/hw_random/exynos-rng.c4
-rw-r--r--drivers/char/hw_random/meson-rng.c131
-rw-r--r--drivers/char/hw_random/omap-rng.c16
-rw-r--r--drivers/char/hw_random/stm32-rng.c10
-rw-r--r--drivers/crypto/bfin_crc.c5
-rw-r--r--drivers/crypto/caam/Kconfig18
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamhash.c5
-rw-r--r--drivers/crypto/caam/caampkc.c607
-rw-r--r--drivers/crypto/caam/caampkc.h70
-rw-r--r--drivers/crypto/caam/compat.h3
-rw-r--r--drivers/crypto/caam/ctrl.c125
-rw-r--r--drivers/crypto/caam/desc.h11
-rw-r--r--drivers/crypto/caam/desc_constr.h51
-rw-r--r--drivers/crypto/caam/jr.c22
-rw-r--r--drivers/crypto/caam/pdb.h188
-rw-r--r--drivers/crypto/caam/pkc_desc.c36
-rw-r--r--drivers/crypto/caam/regs.h151
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h17
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c43
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h3
-rw-r--r--drivers/crypto/marvell/cesa.c142
-rw-r--r--drivers/crypto/marvell/cesa.h120
-rw-r--r--drivers/crypto/marvell/cipher.c157
-rw-r--r--drivers/crypto/marvell/hash.c150
-rw-r--r--drivers/crypto/marvell/tdma.c130
-rw-r--r--drivers/crypto/mxs-dcp.c47
-rw-r--r--drivers/crypto/nx/nx.c2
-rw-r--r--drivers/crypto/omap-aes.c36
-rw-r--r--drivers/crypto/omap-des.c14
-rw-r--r--drivers/crypto/omap-sham.c47
-rw-r--r--drivers/crypto/picoxcell_crypto.c60
-rw-r--r--drivers/crypto/qat/Kconfig3
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_common/Makefile11
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c49
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c8
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c872
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsaprivkey.asn111
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsapubkey.asn14
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c1
-rw-r--r--drivers/crypto/qce/ablkcipher.c27
-rw-r--r--drivers/crypto/qce/cipher.h2
-rw-r--r--drivers/crypto/s5p-sss.c80
-rw-r--r--drivers/crypto/sahara.c112
-rw-r--r--drivers/crypto/talitos.c672
-rw-r--r--drivers/crypto/ux500/cryp/Makefile6
-rw-r--r--drivers/crypto/ux500/hash/Makefile2
-rw-r--r--drivers/crypto/vmx/.gitignore2
-rw-r--r--drivers/crypto/vmx/Makefile2
-rw-r--r--drivers/crypto/vmx/aes_xts.c190
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h4
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl1863
-rw-r--r--drivers/crypto/vmx/vmx.c2
-rw-r--r--drivers/usb/misc/Kconfig11
-rw-r--r--drivers/usb/misc/chaoskey.c21
-rw-r--r--include/asm-generic/io.h71
-rw-r--r--include/asm-generic/iomap.h8
-rw-r--r--include/crypto/aead.h12
-rw-r--r--include/crypto/algapi.h4
-rw-r--r--include/crypto/cryptd.h5
-rw-r--r--include/crypto/dh.h29
-rw-r--r--include/crypto/drbg.h12
-rw-r--r--include/crypto/ecdh.h30
-rw-r--r--include/crypto/internal/aead.h21
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/internal/hash.h12
-rw-r--r--include/crypto/internal/kpp.h64
-rw-r--r--include/crypto/internal/rsa.h42
-rw-r--r--include/crypto/internal/skcipher.h122
-rw-r--r--include/crypto/kpp.h330
-rw-r--r--include/crypto/mcryptd.h8
-rw-r--r--include/crypto/null.h12
-rw-r--r--include/crypto/scatterwalk.h48
-rw-r--r--include/crypto/sha3.h29
-rw-r--r--include/crypto/skcipher.h207
-rw-r--r--include/linux/crypto.h31
-rw-r--r--include/linux/mpi.h3
-rw-r--r--include/uapi/linux/cryptouser.h5
-rw-r--r--lib/digsig.c16
-rw-r--r--lib/mpi/mpicoder.c249
-rw-r--r--security/keys/big_key.c30
184 files changed, 19350 insertions, 4261 deletions
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
index d55dc5a39bad..fb2a1526f6ec 100644
--- a/Documentation/DocBook/crypto-API.tmpl
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -440,8 +440,8 @@
440 The type flag specifies the type of the cipher algorithm. 440 The type flag specifies the type of the cipher algorithm.
441 The caller usually provides a 0 when the caller wants the 441 The caller usually provides a 0 when the caller wants the
442 default handling. Otherwise, the caller may provide the 442 default handling. Otherwise, the caller may provide the
443 following selections which match the the aforementioned 443 following selections which match the aforementioned cipher
444 cipher types: 444 types:
445 </para> 445 </para>
446 446
447 <itemizedlist> 447 <itemizedlist>
diff --git a/Documentation/crypto/asymmetric-keys.txt b/Documentation/crypto/asymmetric-keys.txt
index 8c07e0ea6bc0..2b7816dea370 100644
--- a/Documentation/crypto/asymmetric-keys.txt
+++ b/Documentation/crypto/asymmetric-keys.txt
@@ -76,7 +76,7 @@ the criterion string:
76Looking in /proc/keys, the last 8 hex digits of the key fingerprint are 76Looking in /proc/keys, the last 8 hex digits of the key fingerprint are
77displayed, along with the subtype: 77displayed, along with the subtype:
78 78
79 1a39e171 I----- 1 perm 3f010000 0 0 asymmetri modsign.0: DSA 5acc2142 [] 79 1a39e171 I----- 1 perm 3f010000 0 0 asymmetric modsign.0: DSA 5acc2142 []
80 80
81 81
82========================= 82=========================
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt b/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
index 07ccdaa68324..26542690b578 100644
--- a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
+++ b/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
@@ -2,7 +2,8 @@ BCM2835 Random number generator
2 2
3Required properties: 3Required properties:
4 4
5- compatible : should be "brcm,bcm2835-rng" 5- compatible : should be "brcm,bcm2835-rng" or "brcm,bcm-nsp-rng" or
6 "brcm,bcm5301x-rng"
6- reg : Specifies base physical address and size of the registers. 7- reg : Specifies base physical address and size of the registers.
7 8
8Example: 9Example:
@@ -11,3 +12,8 @@ rng {
11 compatible = "brcm,bcm2835-rng"; 12 compatible = "brcm,bcm2835-rng";
12 reg = <0x7e104000 0x10>; 13 reg = <0x7e104000 0x10>;
13}; 14};
15
16rng@18033000 {
17 compatible = "brcm,bcm-nsp-rng";
18 reg = <0x18033000 0x14>;
19};
diff --git a/MAINTAINERS b/MAINTAINERS
index 92a3f42449df..531c600b8b93 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3286,6 +3286,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
3286T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git 3286T: git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
3287S: Maintained 3287S: Maintained
3288F: Documentation/crypto/ 3288F: Documentation/crypto/
3289F: Documentation/devicetree/bindings/crypto/
3289F: Documentation/DocBook/crypto-API.tmpl 3290F: Documentation/DocBook/crypto-API.tmpl
3290F: arch/*/crypto/ 3291F: arch/*/crypto/
3291F: crypto/ 3292F: crypto/
@@ -5273,6 +5274,7 @@ M: Matt Mackall <mpm@selenic.com>
5273M: Herbert Xu <herbert@gondor.apana.org.au> 5274M: Herbert Xu <herbert@gondor.apana.org.au>
5274L: linux-crypto@vger.kernel.org 5275L: linux-crypto@vger.kernel.org
5275S: Odd fixes 5276S: Odd fixes
5277F: Documentation/devicetree/bindings/rng/
5276F: Documentation/hw_random.txt 5278F: Documentation/hw_random.txt
5277F: drivers/char/hw_random/ 5279F: drivers/char/hw_random/
5278F: include/linux/hw_random.h 5280F: include/linux/hw_random.h
@@ -9318,7 +9320,8 @@ L: rtc-linux@googlegroups.com
9318S: Maintained 9320S: Maintained
9319 9321
9320QAT DRIVER 9322QAT DRIVER
9321M: Tadeusz Struk <tadeusz.struk@intel.com> 9323M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
9324M: Salvatore Benedetto <salvatore.benedetto@intel.com>
9322L: qat-linux@intel.com 9325L: qat-linux@intel.com
9323S: Supported 9326S: Supported
9324F: drivers/crypto/qat/ 9327F: drivers/crypto/qat/
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index def9e783b5c6..1ed829e699d4 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -206,6 +206,11 @@
206 brcm,nand-has-wp; 206 brcm,nand-has-wp;
207 }; 207 };
208 208
209 rng: rng@33000 {
210 compatible = "brcm,bcm-nsp-rng";
211 reg = <0x33000 0x14>;
212 };
213
209 ccbtimer0: timer@34000 { 214 ccbtimer0: timer@34000 {
210 compatible = "arm,sp804"; 215 compatible = "arm,sp804";
211 reg = <0x34000 0x1000>; 216 reg = <0x34000 0x1000>;
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 03a39fe29246..1568cb5cd870 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -154,30 +154,23 @@ static int ghash_async_init(struct ahash_request *req)
154 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); 154 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
155 struct ahash_request *cryptd_req = ahash_request_ctx(req); 155 struct ahash_request *cryptd_req = ahash_request_ctx(req);
156 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 156 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
157 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
158 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
157 159
158 if (!may_use_simd()) { 160 desc->tfm = child;
159 memcpy(cryptd_req, req, sizeof(*req)); 161 desc->flags = req->base.flags;
160 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 162 return crypto_shash_init(desc);
161 return crypto_ahash_init(cryptd_req);
162 } else {
163 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
164 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
165
166 desc->tfm = child;
167 desc->flags = req->base.flags;
168 return crypto_shash_init(desc);
169 }
170} 163}
171 164
172static int ghash_async_update(struct ahash_request *req) 165static int ghash_async_update(struct ahash_request *req)
173{ 166{
174 struct ahash_request *cryptd_req = ahash_request_ctx(req); 167 struct ahash_request *cryptd_req = ahash_request_ctx(req);
168 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
169 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
170 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
175 171
176 if (!may_use_simd()) { 172 if (!may_use_simd() ||
177 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 173 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
178 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
179 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
180
181 memcpy(cryptd_req, req, sizeof(*req)); 174 memcpy(cryptd_req, req, sizeof(*req));
182 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 175 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
183 return crypto_ahash_update(cryptd_req); 176 return crypto_ahash_update(cryptd_req);
@@ -190,12 +183,12 @@ static int ghash_async_update(struct ahash_request *req)
190static int ghash_async_final(struct ahash_request *req) 183static int ghash_async_final(struct ahash_request *req)
191{ 184{
192 struct ahash_request *cryptd_req = ahash_request_ctx(req); 185 struct ahash_request *cryptd_req = ahash_request_ctx(req);
186 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
187 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
188 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
193 189
194 if (!may_use_simd()) { 190 if (!may_use_simd() ||
195 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 191 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
196 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
197 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
198
199 memcpy(cryptd_req, req, sizeof(*req)); 192 memcpy(cryptd_req, req, sizeof(*req));
200 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 193 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
201 return crypto_ahash_final(cryptd_req); 194 return crypto_ahash_final(cryptd_req);
@@ -212,7 +205,8 @@ static int ghash_async_digest(struct ahash_request *req)
212 struct ahash_request *cryptd_req = ahash_request_ctx(req); 205 struct ahash_request *cryptd_req = ahash_request_ctx(req);
213 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 206 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
214 207
215 if (!may_use_simd()) { 208 if (!may_use_simd() ||
209 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
216 memcpy(cryptd_req, req, sizeof(*req)); 210 memcpy(cryptd_req, req, sizeof(*req));
217 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 211 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
218 return crypto_ahash_digest(cryptd_req); 212 return crypto_ahash_digest(cryptd_req);
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
index f895fc02ab06..40846319be69 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
@@ -49,6 +49,10 @@
49 49
50/ { 50/ {
51 model = "LS1043A RDB Board"; 51 model = "LS1043A RDB Board";
52
53 aliases {
54 crypto = &crypto;
55 };
52}; 56};
53 57
54&i2c0 { 58&i2c0 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index de0323b48b1e..6bd46c133010 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -159,6 +159,49 @@
159 big-endian; 159 big-endian;
160 }; 160 };
161 161
162 crypto: crypto@1700000 {
163 compatible = "fsl,sec-v5.4", "fsl,sec-v5.0",
164 "fsl,sec-v4.0";
165 fsl,sec-era = <3>;
166 #address-cells = <1>;
167 #size-cells = <1>;
168 ranges = <0x0 0x00 0x1700000 0x100000>;
169 reg = <0x00 0x1700000 0x0 0x100000>;
170 interrupts = <0 75 0x4>;
171
172 sec_jr0: jr@10000 {
173 compatible = "fsl,sec-v5.4-job-ring",
174 "fsl,sec-v5.0-job-ring",
175 "fsl,sec-v4.0-job-ring";
176 reg = <0x10000 0x10000>;
177 interrupts = <0 71 0x4>;
178 };
179
180 sec_jr1: jr@20000 {
181 compatible = "fsl,sec-v5.4-job-ring",
182 "fsl,sec-v5.0-job-ring",
183 "fsl,sec-v4.0-job-ring";
184 reg = <0x20000 0x10000>;
185 interrupts = <0 72 0x4>;
186 };
187
188 sec_jr2: jr@30000 {
189 compatible = "fsl,sec-v5.4-job-ring",
190 "fsl,sec-v5.0-job-ring",
191 "fsl,sec-v4.0-job-ring";
192 reg = <0x30000 0x10000>;
193 interrupts = <0 73 0x4>;
194 };
195
196 sec_jr3: jr@40000 {
197 compatible = "fsl,sec-v5.4-job-ring",
198 "fsl,sec-v5.0-job-ring",
199 "fsl,sec-v4.0-job-ring";
200 reg = <0x40000 0x10000>;
201 interrupts = <0 74 0x4>;
202 };
203 };
204
162 dcfg: dcfg@1ee0000 { 205 dcfg: dcfg@1ee0000 {
163 compatible = "fsl,ls1043a-dcfg", "syscon"; 206 compatible = "fsl,ls1043a-dcfg", "syscon";
164 reg = <0x0 0x1ee0000 0x0 0x10000>; 207 reg = <0x0 0x1ee0000 0x0 0x10000>;
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 44be1e03ed65..9b6e408cfa51 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -174,13 +174,15 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
174#define iounmap __iounmap 174#define iounmap __iounmap
175 175
176/* 176/*
177 * io{read,write}{16,32}be() macros 177 * io{read,write}{16,32,64}be() macros
178 */ 178 */
179#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) 179#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
180#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) 180#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
181#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
181 182
182#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) 183#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
183#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) 184#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
185#define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
184 186
185/* 187/*
186 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 188 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 9c221b69c181..7998c177f0a2 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -9,9 +9,11 @@ obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o
9obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o 9obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
10obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o 10obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
11obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o 11obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
12obj-$(CONFIG_CRYPT_CRC32C_VPMSUM) += crc32c-vpmsum.o
12 13
13aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o 14aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
14md5-ppc-y := md5-asm.o md5-glue.o 15md5-ppc-y := md5-asm.o md5-glue.o
15sha1-powerpc-y := sha1-powerpc-asm.o sha1.o 16sha1-powerpc-y := sha1-powerpc-asm.o sha1.o
16sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o 17sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
17sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o 18sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
19crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
diff --git a/arch/powerpc/crypto/aes-spe-regs.h b/arch/powerpc/crypto/aes-spe-regs.h
index 30d217b399c3..2cc3a2caadae 100644
--- a/arch/powerpc/crypto/aes-spe-regs.h
+++ b/arch/powerpc/crypto/aes-spe-regs.h
@@ -18,7 +18,7 @@
18#define rLN r7 /* length of data to be processed */ 18#define rLN r7 /* length of data to be processed */
19#define rIP r8 /* potiner to IV (CBC/CTR/XTS modes) */ 19#define rIP r8 /* potiner to IV (CBC/CTR/XTS modes) */
20#define rKT r9 /* pointer to tweak key (XTS mode) */ 20#define rKT r9 /* pointer to tweak key (XTS mode) */
21#define rT0 r11 /* pointers to en-/decrpytion tables */ 21#define rT0 r11 /* pointers to en-/decryption tables */
22#define rT1 r10 22#define rT1 r10
23#define rD0 r9 /* data */ 23#define rD0 r9 /* data */
24#define rD1 r14 24#define rD1 r14
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_asm.S b/arch/powerpc/crypto/crc32c-vpmsum_asm.S
new file mode 100644
index 000000000000..dc640b212299
--- /dev/null
+++ b/arch/powerpc/crypto/crc32c-vpmsum_asm.S
@@ -0,0 +1,1553 @@
1/*
2 * Calculate the checksum of data that is 16 byte aligned and a multiple of
3 * 16 bytes.
4 *
5 * The first step is to reduce it to 1024 bits. We do this in 8 parallel
6 * chunks in order to mask the latency of the vpmsum instructions. If we
7 * have more than 32 kB of data to checksum we repeat this step multiple
8 * times, passing in the previous 1024 bits.
9 *
10 * The next step is to reduce the 1024 bits to 64 bits. This step adds
11 * 32 bits of 0s to the end - this matches what a CRC does. We just
12 * calculate constants that land the data in this 32 bits.
13 *
14 * We then use fixed point Barrett reduction to compute a mod n over GF(2)
15 * for n = CRC using POWER8 instructions. We use x = 32.
16 *
17 * http://en.wikipedia.org/wiki/Barrett_reduction
18 *
19 * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
20 *
21 * This program is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU General Public License
23 * as published by the Free Software Foundation; either version
24 * 2 of the License, or (at your option) any later version.
25 */
26#include <asm/ppc_asm.h>
27#include <asm/ppc-opcode.h>
28
29 .section .rodata
30.balign 16
31
32.byteswap_constant:
33 /* byte reverse permute constant */
34 .octa 0x0F0E0D0C0B0A09080706050403020100
35
36#define MAX_SIZE 32768
37.constants:
38
39 /* Reduce 262144 kbits to 1024 bits */
40 /* x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1 */
41 .octa 0x00000000b6ca9e20000000009c37c408
42
43 /* x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1 */
44 .octa 0x00000000350249a800000001b51df26c
45
46 /* x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1 */
47 .octa 0x00000001862dac54000000000724b9d0
48
49 /* x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1 */
50 .octa 0x00000001d87fb48c00000001c00532fe
51
52 /* x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1 */
53 .octa 0x00000001f39b699e00000000f05a9362
54
55 /* x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1 */
56 .octa 0x0000000101da11b400000001e1007970
57
58 /* x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1 */
59 .octa 0x00000001cab571e000000000a57366ee
60
61 /* x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1 */
62 .octa 0x00000000c7020cfe0000000192011284
63
64 /* x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1 */
65 .octa 0x00000000cdaed1ae0000000162716d9a
66
67 /* x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1 */
68 .octa 0x00000001e804effc00000000cd97ecde
69
70 /* x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1 */
71 .octa 0x0000000077c3ea3a0000000058812bc0
72
73 /* x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1 */
74 .octa 0x0000000068df31b40000000088b8c12e
75
76 /* x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1 */
77 .octa 0x00000000b059b6c200000001230b234c
78
79 /* x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1 */
80 .octa 0x0000000145fb8ed800000001120b416e
81
82 /* x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1 */
83 .octa 0x00000000cbc0916800000001974aecb0
84
85 /* x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1 */
86 .octa 0x000000005ceeedc2000000008ee3f226
87
88 /* x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1 */
89 .octa 0x0000000047d74e8600000001089aba9a
90
91 /* x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1 */
92 .octa 0x00000001407e9e220000000065113872
93
94 /* x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1 */
95 .octa 0x00000001da967bda000000005c07ec10
96
97 /* x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1 */
98 .octa 0x000000006c8983680000000187590924
99
100 /* x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1 */
101 .octa 0x00000000f2d14c9800000000e35da7c6
102
103 /* x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1 */
104 .octa 0x00000001993c6ad4000000000415855a
105
106 /* x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1 */
107 .octa 0x000000014683d1ac0000000073617758
108
109 /* x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1 */
110 .octa 0x00000001a7c93e6c0000000176021d28
111
112 /* x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1 */
113 .octa 0x000000010211e90a00000001c358fd0a
114
115 /* x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1 */
116 .octa 0x000000001119403e00000001ff7a2c18
117
118 /* x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1 */
119 .octa 0x000000001c3261aa00000000f2d9f7e4
120
121 /* x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1 */
122 .octa 0x000000014e37a634000000016cf1f9c8
123
124 /* x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1 */
125 .octa 0x0000000073786c0c000000010af9279a
126
127 /* x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1 */
128 .octa 0x000000011dc037f80000000004f101e8
129
130 /* x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1 */
131 .octa 0x0000000031433dfc0000000070bcf184
132
133 /* x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1 */
134 .octa 0x000000009cde8348000000000a8de642
135
136 /* x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1 */
137 .octa 0x0000000038d3c2a60000000062ea130c
138
139 /* x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1 */
140 .octa 0x000000011b25f26000000001eb31cbb2
141
142 /* x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1 */
143 .octa 0x000000001629e6f00000000170783448
144
145 /* x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1 */
146 .octa 0x0000000160838b4c00000001a684b4c6
147
148 /* x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1 */
149 .octa 0x000000007a44011c00000000253ca5b4
150
151 /* x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1 */
152 .octa 0x00000000226f417a0000000057b4b1e2
153
154 /* x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1 */
155 .octa 0x0000000045eb2eb400000000b6bd084c
156
157 /* x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1 */
158 .octa 0x000000014459d70c0000000123c2d592
159
160 /* x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1 */
161 .octa 0x00000001d406ed8200000000159dafce
162
163 /* x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1 */
164 .octa 0x0000000160c8e1a80000000127e1a64e
165
166 /* x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1 */
167 .octa 0x0000000027ba80980000000056860754
168
169 /* x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1 */
170 .octa 0x000000006d92d01800000001e661aae8
171
172 /* x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1 */
173 .octa 0x000000012ed7e3f200000000f82c6166
174
175 /* x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1 */
176 .octa 0x000000002dc8778800000000c4f9c7ae
177
178 /* x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1 */
179 .octa 0x0000000018240bb80000000074203d20
180
181 /* x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1 */
182 .octa 0x000000001ad381580000000198173052
183
184 /* x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1 */
185 .octa 0x00000001396b78f200000001ce8aba54
186
187 /* x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1 */
188 .octa 0x000000011a68133400000001850d5d94
189
190 /* x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1 */
191 .octa 0x000000012104732e00000001d609239c
192
193 /* x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1 */
194 .octa 0x00000000a140d90c000000001595f048
195
196 /* x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1 */
197 .octa 0x00000001b7215eda0000000042ccee08
198
199 /* x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1 */
200 .octa 0x00000001aaf1df3c000000010a389d74
201
202 /* x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1 */
203 .octa 0x0000000029d15b8a000000012a840da6
204
205 /* x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1 */
206 .octa 0x00000000f1a96922000000001d181c0c
207
208 /* x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1 */
209 .octa 0x00000001ac80d03c0000000068b7d1f6
210
211 /* x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1 */
212 .octa 0x000000000f11d56a000000005b0f14fc
213
214 /* x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1 */
215 .octa 0x00000001f1c022a20000000179e9e730
216
217 /* x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1 */
218 .octa 0x0000000173d00ae200000001ce1368d6
219
220 /* x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1 */
221 .octa 0x00000001d4ffe4ac0000000112c3a84c
222
223 /* x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1 */
224 .octa 0x000000016edc5ae400000000de940fee
225
226 /* x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1 */
227 .octa 0x00000001f1a0214000000000fe896b7e
228
229 /* x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1 */
230 .octa 0x00000000ca0b28a000000001f797431c
231
232 /* x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1 */
233 .octa 0x00000001928e30a20000000053e989ba
234
235 /* x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1 */
236 .octa 0x0000000097b1b002000000003920cd16
237
238 /* x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1 */
239 .octa 0x00000000b15bf90600000001e6f579b8
240
241 /* x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1 */
242 .octa 0x00000000411c5d52000000007493cb0a
243
244 /* x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1 */
245 .octa 0x00000001c36f330000000001bdd376d8
246
247 /* x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1 */
248 .octa 0x00000001119227e0000000016badfee6
249
250 /* x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1 */
251 .octa 0x00000000114d47020000000071de5c58
252
253 /* x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1 */
254 .octa 0x00000000458b5b9800000000453f317c
255
256 /* x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1 */
257 .octa 0x000000012e31fb8e0000000121675cce
258
259 /* x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1 */
260 .octa 0x000000005cf619d800000001f409ee92
261
262 /* x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1 */
263 .octa 0x0000000063f4d8b200000000f36b9c88
264
265 /* x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1 */
266 .octa 0x000000004138dc8a0000000036b398f4
267
268 /* x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1 */
269 .octa 0x00000001d29ee8e000000001748f9adc
270
271 /* x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1 */
272 .octa 0x000000006a08ace800000001be94ec00
273
274 /* x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1 */
275 .octa 0x0000000127d4201000000000b74370d6
276
277 /* x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1 */
278 .octa 0x0000000019d76b6200000001174d0b98
279
280 /* x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1 */
281 .octa 0x00000001b1471f6e00000000befc06a4
282
283 /* x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1 */
284 .octa 0x00000001f64c19cc00000001ae125288
285
286 /* x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1 */
287 .octa 0x00000000003c0ea00000000095c19b34
288
289 /* x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1 */
290 .octa 0x000000014d73abf600000001a78496f2
291
292 /* x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1 */
293 .octa 0x00000001620eb84400000001ac5390a0
294
295 /* x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1 */
296 .octa 0x0000000147655048000000002a80ed6e
297
298 /* x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1 */
299 .octa 0x0000000067b5077e00000001fa9b0128
300
301 /* x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1 */
302 .octa 0x0000000010ffe20600000001ea94929e
303
304 /* x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1 */
305 .octa 0x000000000fee8f1e0000000125f4305c
306
307 /* x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1 */
308 .octa 0x00000001da26fbae00000001471e2002
309
310 /* x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1 */
311 .octa 0x00000001b3a8bd880000000132d2253a
312
313 /* x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1 */
314 .octa 0x00000000e8f3898e00000000f26b3592
315
316 /* x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1 */
317 .octa 0x00000000b0d0d28c00000000bc8b67b0
318
319 /* x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1 */
320 .octa 0x0000000030f2a798000000013a826ef2
321
322 /* x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1 */
323 .octa 0x000000000fba10020000000081482c84
324
325 /* x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1 */
326 .octa 0x00000000bdb9bd7200000000e77307c2
327
328 /* x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1 */
329 .octa 0x0000000075d3bf5a00000000d4a07ec8
330
331 /* x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1 */
332 .octa 0x00000000ef1f98a00000000017102100
333
334 /* x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1 */
335 .octa 0x00000000689c760200000000db406486
336
337 /* x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1 */
338 .octa 0x000000016d5fa5fe0000000192db7f88
339
340 /* x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1 */
341 .octa 0x00000001d0d2b9ca000000018bf67b1e
342
343 /* x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1 */
344 .octa 0x0000000041e7b470000000007c09163e
345
346 /* x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1 */
347 .octa 0x00000001cbb6495e000000000adac060
348
349 /* x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1 */
350 .octa 0x000000010052a0b000000000bd8316ae
351
352 /* x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1 */
353 .octa 0x00000001d8effb5c000000019f09ab54
354
355 /* x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1 */
356 .octa 0x00000001d969853c0000000125155542
357
358 /* x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1 */
359 .octa 0x00000000523ccce2000000018fdb5882
360
361 /* x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1 */
362 .octa 0x000000001e2436bc00000000e794b3f4
363
364 /* x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1 */
365 .octa 0x00000000ddd1c3a2000000016f9bb022
366
367 /* x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1 */
368 .octa 0x0000000019fcfe3800000000290c9978
369
370 /* x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1 */
371 .octa 0x00000001ce95db640000000083c0f350
372
373 /* x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1 */
374 .octa 0x00000000af5828060000000173ea6628
375
376 /* x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1 */
377 .octa 0x00000001006388f600000001c8b4e00a
378
379 /* x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1 */
380 .octa 0x0000000179eca00a00000000de95d6aa
381
382 /* x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1 */
383 .octa 0x0000000122410a6a000000010b7f7248
384
385 /* x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1 */
386 .octa 0x000000004288e87c00000001326e3a06
387
388 /* x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1 */
389 .octa 0x000000016c5490da00000000bb62c2e6
390
391 /* x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1 */
392 .octa 0x00000000d1c71f6e0000000156a4b2c2
393
394 /* x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1 */
395 .octa 0x00000001b4ce08a6000000011dfe763a
396
397 /* x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1 */
398 .octa 0x00000001466ba60c000000007bcca8e2
399
400 /* x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1 */
401 .octa 0x00000001f6c488a40000000186118faa
402
403 /* x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1 */
404 .octa 0x000000013bfb06820000000111a65a88
405
406 /* x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1 */
407 .octa 0x00000000690e9e54000000003565e1c4
408
409 /* x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1 */
410 .octa 0x00000000281346b6000000012ed02a82
411
412 /* x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1 */
413 .octa 0x000000015646402400000000c486ecfc
414
415 /* x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1 */
416 .octa 0x000000016063a8dc0000000001b951b2
417
418 /* x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1 */
419 .octa 0x0000000116a663620000000048143916
420
421 /* x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1 */
422 .octa 0x000000017e8aa4d200000001dc2ae124
423
424 /* x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1 */
425 .octa 0x00000001728eb10c00000001416c58d6
426
427 /* x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1 */
428 .octa 0x00000001b08fd7fa00000000a479744a
429
430 /* x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1 */
431 .octa 0x00000001092a16e80000000096ca3a26
432
433 /* x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1 */
434 .octa 0x00000000a505637c00000000ff223d4e
435
436 /* x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1 */
437 .octa 0x00000000d94869b2000000010e84da42
438
439 /* x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1 */
440 .octa 0x00000001c8b203ae00000001b61ba3d0
441
442 /* x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1 */
443 .octa 0x000000005704aea000000000680f2de8
444
445 /* x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1 */
446 .octa 0x000000012e295fa2000000008772a9a8
447
448 /* x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1 */
449 .octa 0x000000011d0908bc0000000155f295bc
450
451 /* x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1 */
452 .octa 0x0000000193ed97ea00000000595f9282
453
454 /* x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1 */
455 .octa 0x000000013a0f1c520000000164b1c25a
456
457 /* x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1 */
458 .octa 0x000000010c2c40c000000000fbd67c50
459
460 /* x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1 */
461 .octa 0x00000000ff6fac3e0000000096076268
462
463 /* x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1 */
464 .octa 0x000000017b3609c000000001d288e4cc
465
466 /* x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1 */
467 .octa 0x0000000088c8c92200000001eaac1bdc
468
469 /* x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1 */
470 .octa 0x00000001751baae600000001f1ea39e2
471
472 /* x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1 */
473 .octa 0x000000010795297200000001eb6506fc
474
475 /* x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1 */
476 .octa 0x0000000162b00abe000000010f806ffe
477
478 /* x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1 */
479 .octa 0x000000000d7b404c000000010408481e
480
481 /* x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1 */
482 .octa 0x00000000763b13d40000000188260534
483
484 /* x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1 */
485 .octa 0x00000000f6dc22d80000000058fc73e0
486
487 /* x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1 */
488 .octa 0x000000007daae06000000000391c59b8
489
490 /* x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1 */
491 .octa 0x000000013359ab7c000000018b638400
492
493 /* x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1 */
494 .octa 0x000000008add438a000000011738f5c4
495
496 /* x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1 */
497 .octa 0x00000001edbefdea000000008cf7c6da
498
499 /* x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1 */
500 .octa 0x000000004104e0f800000001ef97fb16
501
502 /* x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1 */
503 .octa 0x00000000b48a82220000000102130e20
504
505 /* x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1 */
506 .octa 0x00000001bcb4684400000000db968898
507
508 /* x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1 */
509 .octa 0x000000013293ce0a00000000b5047b5e
510
511 /* x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1 */
512 .octa 0x00000001710d0844000000010b90fdb2
513
514 /* x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1 */
515 .octa 0x0000000117907f6e000000004834a32e
516
517 /* x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1 */
518 .octa 0x0000000087ddf93e0000000059c8f2b0
519
520 /* x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1 */
521 .octa 0x000000005970e9b00000000122cec508
522
523 /* x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1 */
524 .octa 0x0000000185b2b7d0000000000a330cda
525
526 /* x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1 */
527 .octa 0x00000001dcee0efc000000014a47148c
528
529 /* x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1 */
530 .octa 0x0000000030da27220000000042c61cb8
531
532 /* x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1 */
533 .octa 0x000000012f925a180000000012fe6960
534
535 /* x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1 */
536 .octa 0x00000000dd2e357c00000000dbda2c20
537
538 /* x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1 */
539 .octa 0x00000000071c80de000000011122410c
540
541 /* x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1 */
542 .octa 0x000000011513140a00000000977b2070
543
544 /* x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1 */
545 .octa 0x00000001df876e8e000000014050438e
546
547 /* x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1 */
548 .octa 0x000000015f81d6ce0000000147c840e8
549
550 /* x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1 */
551 .octa 0x000000019dd94dbe00000001cc7c88ce
552
553 /* x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1 */
554 .octa 0x00000001373d206e00000001476b35a4
555
556 /* x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1 */
557 .octa 0x00000000668ccade000000013d52d508
558
559 /* x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1 */
560 .octa 0x00000001b192d268000000008e4be32e
561
562 /* x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1 */
563 .octa 0x00000000e30f3a7800000000024120fe
564
565 /* x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1 */
566 .octa 0x000000010ef1f7bc00000000ddecddb4
567
568 /* x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1 */
569 .octa 0x00000001f5ac738000000000d4d403bc
570
571 /* x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1 */
572 .octa 0x000000011822ea7000000001734b89aa
573
574 /* x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1 */
575 .octa 0x00000000c3a33848000000010e7a58d6
576
577 /* x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1 */
578 .octa 0x00000001bd151c2400000001f9f04e9c
579
580 /* x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1 */
581 .octa 0x0000000056002d7600000000b692225e
582
583 /* x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1 */
584 .octa 0x000000014657c4f4000000019b8d3f3e
585
586 /* x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1 */
587 .octa 0x0000000113742d7c00000001a874f11e
588
589 /* x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1 */
590 .octa 0x000000019c5920ba000000010d5a4254
591
592 /* x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1 */
593 .octa 0x000000005216d2d600000000bbb2f5d6
594
595 /* x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1 */
596 .octa 0x0000000136f5ad8a0000000179cc0e36
597
598 /* x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1 */
599 .octa 0x000000018b07beb600000001dca1da4a
600
601 /* x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1 */
602 .octa 0x00000000db1e93b000000000feb1a192
603
604 /* x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1 */
605 .octa 0x000000000b96fa3a00000000d1eeedd6
606
607 /* x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1 */
608 .octa 0x00000001d9968af0000000008fad9bb4
609
610 /* x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1 */
611 .octa 0x000000000e4a77a200000001884938e4
612
613 /* x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1 */
614 .octa 0x00000000508c2ac800000001bc2e9bc0
615
616 /* x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1 */
617 .octa 0x0000000021572a8000000001f9658a68
618
619 /* x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1 */
620 .octa 0x00000001b859daf2000000001b9224fc
621
622 /* x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1 */
623 .octa 0x000000016f7884740000000055b2fb84
624
625 /* x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1 */
626 .octa 0x00000001b438810e000000018b090348
627
628 /* x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1 */
629 .octa 0x0000000095ddc6f2000000011ccbd5ea
630
631 /* x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1 */
632 .octa 0x00000001d977c20c0000000007ae47f8
633
634 /* x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1 */
635 .octa 0x00000000ebedb99a0000000172acbec0
636
637 /* x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1 */
638 .octa 0x00000001df9e9e9200000001c6e3ff20
639
640 /* x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1 */
641 .octa 0x00000001a4a3f95200000000e1b38744
642
643 /* x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1 */
644 .octa 0x00000000e2f5122000000000791585b2
645
646 /* x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1 */
647 .octa 0x000000004aa01f3e00000000ac53b894
648
649 /* x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1 */
650 .octa 0x00000000b3e90a5800000001ed5f2cf4
651
652 /* x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1 */
653 .octa 0x000000000c9ca2aa00000001df48b2e0
654
655 /* x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1 */
656 .octa 0x000000015168231600000000049c1c62
657
658 /* x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1 */
659 .octa 0x0000000036fce78c000000017c460c12
660
661 /* x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1 */
662 .octa 0x000000009037dc10000000015be4da7e
663
664 /* x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1 */
665 .octa 0x00000000d3298582000000010f38f668
666
667 /* x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1 */
668 .octa 0x00000001b42e8ad60000000039f40a00
669
670 /* x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1 */
671 .octa 0x00000000142a983800000000bd4c10c4
672
673 /* x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1 */
674 .octa 0x0000000109c7f1900000000042db1d98
675
676 /* x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1 */
677 .octa 0x0000000056ff931000000001c905bae6
678
679 /* x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1 */
680 .octa 0x00000001594513aa00000000069d40ea
681
682 /* x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1 */
683 .octa 0x00000001e3b5b1e8000000008e4fbad0
684
685 /* x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1 */
686 .octa 0x000000011dd5fc080000000047bedd46
687
688 /* x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1 */
689 .octa 0x00000001675f0cc20000000026396bf8
690
691 /* x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1 */
692 .octa 0x00000000d1c8dd4400000000379beb92
693
694 /* x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1 */
695 .octa 0x0000000115ebd3d8000000000abae54a
696
697 /* x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1 */
698 .octa 0x00000001ecbd0dac0000000007e6a128
699
700 /* x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1 */
701 .octa 0x00000000cdf67af2000000000ade29d2
702
703 /* x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1 */
704 .octa 0x000000004c01ff4c00000000f974c45c
705
706 /* x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1 */
707 .octa 0x00000000f2d8657e00000000e77ac60a
708
709 /* x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1 */
710 .octa 0x000000006bae74c40000000145895816
711
712 /* x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1 */
713 .octa 0x0000000152af8aa00000000038e362be
714
715 /* x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1 */
716 .octa 0x0000000004663802000000007f991a64
717
718 /* x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1 */
719 .octa 0x00000001ab2f5afc00000000fa366d3a
720
721 /* x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1 */
722 .octa 0x0000000074a4ebd400000001a2bb34f0
723
724 /* x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1 */
725 .octa 0x00000001d7ab3a4c0000000028a9981e
726
727 /* x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1 */
728 .octa 0x00000001a8da60c600000001dbc672be
729
730 /* x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1 */
731 .octa 0x000000013cf6382000000000b04d77f6
732
733 /* x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1 */
734 .octa 0x00000000bec12e1e0000000124400d96
735
736 /* x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1 */
737 .octa 0x00000001c6368010000000014ca4b414
738
739 /* x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1 */
740 .octa 0x00000001e6e78758000000012fe2c938
741
742 /* x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1 */
743 .octa 0x000000008d7f2b3c00000001faed01e6
744
745 /* x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1 */
746 .octa 0x000000016b4a156e000000007e80ecfe
747
748 /* x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1 */
749 .octa 0x00000001c63cfeb60000000098daee94
750
751 /* x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1 */
752 .octa 0x000000015f902670000000010a04edea
753
754 /* x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1 */
755 .octa 0x00000001cd5de11e00000001c00b4524
756
757 /* x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1 */
758 .octa 0x000000001acaec540000000170296550
759
760 /* x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1 */
761 .octa 0x000000002bd0ca780000000181afaa48
762
763 /* x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1 */
764 .octa 0x0000000032d63d5c0000000185a31ffa
765
766 /* x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1 */
767 .octa 0x000000001c6d4e4c000000002469f608
768
769 /* x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1 */
770 .octa 0x0000000106a60b92000000006980102a
771
772 /* x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1 */
773 .octa 0x00000000d3855e120000000111ea9ca8
774
775 /* x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1 */
776 .octa 0x00000000e312563600000001bd1d29ce
777
778 /* x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1 */
779 .octa 0x000000009e8f7ea400000001b34b9580
780
781 /* x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1 */
782 .octa 0x00000001c82e562c000000003076054e
783
784 /* x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1 */
785 .octa 0x00000000ca9f09ce000000012a608ea4
786
787 /* x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1 */
788 .octa 0x00000000c63764e600000000784d05fe
789
790 /* x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1 */
791 .octa 0x0000000168d2e49e000000016ef0d82a
792
793 /* x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1 */
794 .octa 0x00000000e986c1480000000075bda454
795
796 /* x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1 */
797 .octa 0x00000000cfb65894000000003dc0a1c4
798
799 /* x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1 */
800 .octa 0x0000000111cadee400000000e9a5d8be
801
802 /* x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1 */
803 .octa 0x0000000171fb63ce00000001609bc4b4
804
805.short_constants:
806
807 /* Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros */
808 /* x^1952 mod p(x)`, x^1984 mod p(x)`, x^2016 mod p(x)`, x^2048 mod p(x)` */
809 .octa 0x7fec2963e5bf80485cf015c388e56f72
810
811 /* x^1824 mod p(x)`, x^1856 mod p(x)`, x^1888 mod p(x)`, x^1920 mod p(x)` */
812 .octa 0x38e888d4844752a9963a18920246e2e6
813
814 /* x^1696 mod p(x)`, x^1728 mod p(x)`, x^1760 mod p(x)`, x^1792 mod p(x)` */
815 .octa 0x42316c00730206ad419a441956993a31
816
817 /* x^1568 mod p(x)`, x^1600 mod p(x)`, x^1632 mod p(x)`, x^1664 mod p(x)` */
818 .octa 0x543d5c543e65ddf9924752ba2b830011
819
820 /* x^1440 mod p(x)`, x^1472 mod p(x)`, x^1504 mod p(x)`, x^1536 mod p(x)` */
821 .octa 0x78e87aaf56767c9255bd7f9518e4a304
822
823 /* x^1312 mod p(x)`, x^1344 mod p(x)`, x^1376 mod p(x)`, x^1408 mod p(x)` */
824 .octa 0x8f68fcec1903da7f6d76739fe0553f1e
825
826 /* x^1184 mod p(x)`, x^1216 mod p(x)`, x^1248 mod p(x)`, x^1280 mod p(x)` */
827 .octa 0x3f4840246791d588c133722b1fe0b5c3
828
829 /* x^1056 mod p(x)`, x^1088 mod p(x)`, x^1120 mod p(x)`, x^1152 mod p(x)` */
830 .octa 0x34c96751b04de25a64b67ee0e55ef1f3
831
832 /* x^928 mod p(x)`, x^960 mod p(x)`, x^992 mod p(x)`, x^1024 mod p(x)` */
833 .octa 0x156c8e180b4a395b069db049b8fdb1e7
834
835 /* x^800 mod p(x)`, x^832 mod p(x)`, x^864 mod p(x)`, x^896 mod p(x)` */
836 .octa 0xe0b99ccbe661f7bea11bfaf3c9e90b9e
837
838 /* x^672 mod p(x)`, x^704 mod p(x)`, x^736 mod p(x)`, x^768 mod p(x)` */
839 .octa 0x041d37768cd75659817cdc5119b29a35
840
841 /* x^544 mod p(x)`, x^576 mod p(x)`, x^608 mod p(x)`, x^640 mod p(x)` */
842 .octa 0x3a0777818cfaa9651ce9d94b36c41f1c
843
844 /* x^416 mod p(x)`, x^448 mod p(x)`, x^480 mod p(x)`, x^512 mod p(x)` */
845 .octa 0x0e148e8252377a554f256efcb82be955
846
847 /* x^288 mod p(x)`, x^320 mod p(x)`, x^352 mod p(x)`, x^384 mod p(x)` */
848 .octa 0x9c25531d19e65ddeec1631edb2dea967
849
850 /* x^160 mod p(x)`, x^192 mod p(x)`, x^224 mod p(x)`, x^256 mod p(x)` */
851 .octa 0x790606ff9957c0a65d27e147510ac59a
852
853 /* x^32 mod p(x)`, x^64 mod p(x)`, x^96 mod p(x)`, x^128 mod p(x)` */
854 .octa 0x82f63b786ea2d55ca66805eb18b8ea18
855
856
857.barrett_constants:
858 /* 33 bit reflected Barrett constant m - (4^32)/n */
859 .octa 0x000000000000000000000000dea713f1 /* x^64 div p(x)` */
860 /* 33 bit reflected Barrett constant n */
861 .octa 0x00000000000000000000000105ec76f1
862
863 .text
864
865#if defined(__BIG_ENDIAN__)
866#define BYTESWAP_DATA
867#else
868#undef BYTESWAP_DATA
869#endif
870
871#define off16 r25
872#define off32 r26
873#define off48 r27
874#define off64 r28
875#define off80 r29
876#define off96 r30
877#define off112 r31
878
879#define const1 v24
880#define const2 v25
881
882#define byteswap v26
883#define mask_32bit v27
884#define mask_64bit v28
885#define zeroes v29
886
887#ifdef BYTESWAP_DATA
888#define VPERM(A, B, C, D) vperm A, B, C, D
889#else
890#define VPERM(A, B, C, D)
891#endif
892
893/* unsigned int __crc32c_vpmsum(unsigned int crc, void *p, unsigned long len) */
894FUNC_START(__crc32c_vpmsum)
895 std r31,-8(r1)
896 std r30,-16(r1)
897 std r29,-24(r1)
898 std r28,-32(r1)
899 std r27,-40(r1)
900 std r26,-48(r1)
901 std r25,-56(r1)
902
903 li off16,16
904 li off32,32
905 li off48,48
906 li off64,64
907 li off80,80
908 li off96,96
909 li off112,112
910 li r0,0
911
912 /* Enough room for saving 10 non volatile VMX registers */
913 subi r6,r1,56+10*16
914 subi r7,r1,56+2*16
915
916 stvx v20,0,r6
917 stvx v21,off16,r6
918 stvx v22,off32,r6
919 stvx v23,off48,r6
920 stvx v24,off64,r6
921 stvx v25,off80,r6
922 stvx v26,off96,r6
923 stvx v27,off112,r6
924 stvx v28,0,r7
925 stvx v29,off16,r7
926
927 mr r10,r3
928
929 vxor zeroes,zeroes,zeroes
930 vspltisw v0,-1
931
932 vsldoi mask_32bit,zeroes,v0,4
933 vsldoi mask_64bit,zeroes,v0,8
934
935 /* Get the initial value into v8 */
936 vxor v8,v8,v8
937 MTVRD(v8, R3)
938 vsldoi v8,zeroes,v8,8 /* shift into bottom 32 bits */
939
940#ifdef BYTESWAP_DATA
941 addis r3,r2,.byteswap_constant@toc@ha
942 addi r3,r3,.byteswap_constant@toc@l
943
944 lvx byteswap,0,r3
945 addi r3,r3,16
946#endif
947
948 cmpdi r5,256
949 blt .Lshort
950
951 rldicr r6,r5,0,56
952
953 /* Checksum in blocks of MAX_SIZE */
9541: lis r7,MAX_SIZE@h
955 ori r7,r7,MAX_SIZE@l
956 mr r9,r7
957 cmpd r6,r7
958 bgt 2f
959 mr r7,r6
9602: subf r6,r7,r6
961
962 /* our main loop does 128 bytes at a time */
963 srdi r7,r7,7
964
965 /*
966 * Work out the offset into the constants table to start at. Each
967 * constant is 16 bytes, and it is used against 128 bytes of input
968 * data - 128 / 16 = 8
969 */
970 sldi r8,r7,4
971 srdi r9,r9,3
972 subf r8,r8,r9
973
974 /* We reduce our final 128 bytes in a separate step */
975 addi r7,r7,-1
976 mtctr r7
977
978 addis r3,r2,.constants@toc@ha
979 addi r3,r3,.constants@toc@l
980
981 /* Find the start of our constants */
982 add r3,r3,r8
983
984 /* zero v0-v7 which will contain our checksums */
985 vxor v0,v0,v0
986 vxor v1,v1,v1
987 vxor v2,v2,v2
988 vxor v3,v3,v3
989 vxor v4,v4,v4
990 vxor v5,v5,v5
991 vxor v6,v6,v6
992 vxor v7,v7,v7
993
994 lvx const1,0,r3
995
996 /*
997 * If we are looping back to consume more data we use the values
998 * already in v16-v23.
999 */
1000 cmpdi r0,1
1001 beq 2f
1002
1003 /* First warm up pass */
1004 lvx v16,0,r4
1005 lvx v17,off16,r4
1006 VPERM(v16,v16,v16,byteswap)
1007 VPERM(v17,v17,v17,byteswap)
1008 lvx v18,off32,r4
1009 lvx v19,off48,r4
1010 VPERM(v18,v18,v18,byteswap)
1011 VPERM(v19,v19,v19,byteswap)
1012 lvx v20,off64,r4
1013 lvx v21,off80,r4
1014 VPERM(v20,v20,v20,byteswap)
1015 VPERM(v21,v21,v21,byteswap)
1016 lvx v22,off96,r4
1017 lvx v23,off112,r4
1018 VPERM(v22,v22,v22,byteswap)
1019 VPERM(v23,v23,v23,byteswap)
1020 addi r4,r4,8*16
1021
1022 /* xor in initial value */
1023 vxor v16,v16,v8
1024
10252: bdz .Lfirst_warm_up_done
1026
1027 addi r3,r3,16
1028 lvx const2,0,r3
1029
1030 /* Second warm up pass */
1031 VPMSUMD(v8,v16,const1)
1032 lvx v16,0,r4
1033 VPERM(v16,v16,v16,byteswap)
1034 ori r2,r2,0
1035
1036 VPMSUMD(v9,v17,const1)
1037 lvx v17,off16,r4
1038 VPERM(v17,v17,v17,byteswap)
1039 ori r2,r2,0
1040
1041 VPMSUMD(v10,v18,const1)
1042 lvx v18,off32,r4
1043 VPERM(v18,v18,v18,byteswap)
1044 ori r2,r2,0
1045
1046 VPMSUMD(v11,v19,const1)
1047 lvx v19,off48,r4
1048 VPERM(v19,v19,v19,byteswap)
1049 ori r2,r2,0
1050
1051 VPMSUMD(v12,v20,const1)
1052 lvx v20,off64,r4
1053 VPERM(v20,v20,v20,byteswap)
1054 ori r2,r2,0
1055
1056 VPMSUMD(v13,v21,const1)
1057 lvx v21,off80,r4
1058 VPERM(v21,v21,v21,byteswap)
1059 ori r2,r2,0
1060
1061 VPMSUMD(v14,v22,const1)
1062 lvx v22,off96,r4
1063 VPERM(v22,v22,v22,byteswap)
1064 ori r2,r2,0
1065
1066 VPMSUMD(v15,v23,const1)
1067 lvx v23,off112,r4
1068 VPERM(v23,v23,v23,byteswap)
1069
1070 addi r4,r4,8*16
1071
1072 bdz .Lfirst_cool_down
1073
1074 /*
1075 * main loop. We modulo schedule it such that it takes three iterations
1076 * to complete - first iteration load, second iteration vpmsum, third
1077 * iteration xor.
1078 */
1079 .balign 16
10804: lvx const1,0,r3
1081 addi r3,r3,16
1082 ori r2,r2,0
1083
1084 vxor v0,v0,v8
1085 VPMSUMD(v8,v16,const2)
1086 lvx v16,0,r4
1087 VPERM(v16,v16,v16,byteswap)
1088 ori r2,r2,0
1089
1090 vxor v1,v1,v9
1091 VPMSUMD(v9,v17,const2)
1092 lvx v17,off16,r4
1093 VPERM(v17,v17,v17,byteswap)
1094 ori r2,r2,0
1095
1096 vxor v2,v2,v10
1097 VPMSUMD(v10,v18,const2)
1098 lvx v18,off32,r4
1099 VPERM(v18,v18,v18,byteswap)
1100 ori r2,r2,0
1101
1102 vxor v3,v3,v11
1103 VPMSUMD(v11,v19,const2)
1104 lvx v19,off48,r4
1105 VPERM(v19,v19,v19,byteswap)
1106 lvx const2,0,r3
1107 ori r2,r2,0
1108
1109 vxor v4,v4,v12
1110 VPMSUMD(v12,v20,const1)
1111 lvx v20,off64,r4
1112 VPERM(v20,v20,v20,byteswap)
1113 ori r2,r2,0
1114
1115 vxor v5,v5,v13
1116 VPMSUMD(v13,v21,const1)
1117 lvx v21,off80,r4
1118 VPERM(v21,v21,v21,byteswap)
1119 ori r2,r2,0
1120
1121 vxor v6,v6,v14
1122 VPMSUMD(v14,v22,const1)
1123 lvx v22,off96,r4
1124 VPERM(v22,v22,v22,byteswap)
1125 ori r2,r2,0
1126
1127 vxor v7,v7,v15
1128 VPMSUMD(v15,v23,const1)
1129 lvx v23,off112,r4
1130 VPERM(v23,v23,v23,byteswap)
1131
1132 addi r4,r4,8*16
1133
1134 bdnz 4b
1135
1136.Lfirst_cool_down:
1137 /* First cool down pass */
1138 lvx const1,0,r3
1139 addi r3,r3,16
1140
1141 vxor v0,v0,v8
1142 VPMSUMD(v8,v16,const1)
1143 ori r2,r2,0
1144
1145 vxor v1,v1,v9
1146 VPMSUMD(v9,v17,const1)
1147 ori r2,r2,0
1148
1149 vxor v2,v2,v10
1150 VPMSUMD(v10,v18,const1)
1151 ori r2,r2,0
1152
1153 vxor v3,v3,v11
1154 VPMSUMD(v11,v19,const1)
1155 ori r2,r2,0
1156
1157 vxor v4,v4,v12
1158 VPMSUMD(v12,v20,const1)
1159 ori r2,r2,0
1160
1161 vxor v5,v5,v13
1162 VPMSUMD(v13,v21,const1)
1163 ori r2,r2,0
1164
1165 vxor v6,v6,v14
1166 VPMSUMD(v14,v22,const1)
1167 ori r2,r2,0
1168
1169 vxor v7,v7,v15
1170 VPMSUMD(v15,v23,const1)
1171 ori r2,r2,0
1172
1173.Lsecond_cool_down:
1174 /* Second cool down pass */
1175 vxor v0,v0,v8
1176 vxor v1,v1,v9
1177 vxor v2,v2,v10
1178 vxor v3,v3,v11
1179 vxor v4,v4,v12
1180 vxor v5,v5,v13
1181 vxor v6,v6,v14
1182 vxor v7,v7,v15
1183
1184 /*
1185 * vpmsumd produces a 96 bit result in the least significant bits
1186 * of the register. Since we are bit reflected we have to shift it
1187 * left 32 bits so it occupies the least significant bits in the
1188 * bit reflected domain.
1189 */
1190 vsldoi v0,v0,zeroes,4
1191 vsldoi v1,v1,zeroes,4
1192 vsldoi v2,v2,zeroes,4
1193 vsldoi v3,v3,zeroes,4
1194 vsldoi v4,v4,zeroes,4
1195 vsldoi v5,v5,zeroes,4
1196 vsldoi v6,v6,zeroes,4
1197 vsldoi v7,v7,zeroes,4
1198
1199 /* xor with last 1024 bits */
1200 lvx v8,0,r4
1201 lvx v9,off16,r4
1202 VPERM(v8,v8,v8,byteswap)
1203 VPERM(v9,v9,v9,byteswap)
1204 lvx v10,off32,r4
1205 lvx v11,off48,r4
1206 VPERM(v10,v10,v10,byteswap)
1207 VPERM(v11,v11,v11,byteswap)
1208 lvx v12,off64,r4
1209 lvx v13,off80,r4
1210 VPERM(v12,v12,v12,byteswap)
1211 VPERM(v13,v13,v13,byteswap)
1212 lvx v14,off96,r4
1213 lvx v15,off112,r4
1214 VPERM(v14,v14,v14,byteswap)
1215 VPERM(v15,v15,v15,byteswap)
1216
1217 addi r4,r4,8*16
1218
1219 vxor v16,v0,v8
1220 vxor v17,v1,v9
1221 vxor v18,v2,v10
1222 vxor v19,v3,v11
1223 vxor v20,v4,v12
1224 vxor v21,v5,v13
1225 vxor v22,v6,v14
1226 vxor v23,v7,v15
1227
1228 li r0,1
1229 cmpdi r6,0
1230 addi r6,r6,128
1231 bne 1b
1232
1233 /* Work out how many bytes we have left */
1234 andi. r5,r5,127
1235
1236 /* Calculate where in the constant table we need to start */
1237 subfic r6,r5,128
1238 add r3,r3,r6
1239
1240 /* How many 16 byte chunks are in the tail */
1241 srdi r7,r5,4
1242 mtctr r7
1243
1244 /*
1245 * Reduce the previously calculated 1024 bits to 64 bits, shifting
1246 * 32 bits to include the trailing 32 bits of zeros
1247 */
1248 lvx v0,0,r3
1249 lvx v1,off16,r3
1250 lvx v2,off32,r3
1251 lvx v3,off48,r3
1252 lvx v4,off64,r3
1253 lvx v5,off80,r3
1254 lvx v6,off96,r3
1255 lvx v7,off112,r3
1256 addi r3,r3,8*16
1257
1258 VPMSUMW(v0,v16,v0)
1259 VPMSUMW(v1,v17,v1)
1260 VPMSUMW(v2,v18,v2)
1261 VPMSUMW(v3,v19,v3)
1262 VPMSUMW(v4,v20,v4)
1263 VPMSUMW(v5,v21,v5)
1264 VPMSUMW(v6,v22,v6)
1265 VPMSUMW(v7,v23,v7)
1266
1267 /* Now reduce the tail (0 - 112 bytes) */
1268 cmpdi r7,0
1269 beq 1f
1270
1271 lvx v16,0,r4
1272 lvx v17,0,r3
1273 VPERM(v16,v16,v16,byteswap)
1274 VPMSUMW(v16,v16,v17)
1275 vxor v0,v0,v16
1276 bdz 1f
1277
1278 lvx v16,off16,r4
1279 lvx v17,off16,r3
1280 VPERM(v16,v16,v16,byteswap)
1281 VPMSUMW(v16,v16,v17)
1282 vxor v0,v0,v16
1283 bdz 1f
1284
1285 lvx v16,off32,r4
1286 lvx v17,off32,r3
1287 VPERM(v16,v16,v16,byteswap)
1288 VPMSUMW(v16,v16,v17)
1289 vxor v0,v0,v16
1290 bdz 1f
1291
1292 lvx v16,off48,r4
1293 lvx v17,off48,r3
1294 VPERM(v16,v16,v16,byteswap)
1295 VPMSUMW(v16,v16,v17)
1296 vxor v0,v0,v16
1297 bdz 1f
1298
1299 lvx v16,off64,r4
1300 lvx v17,off64,r3
1301 VPERM(v16,v16,v16,byteswap)
1302 VPMSUMW(v16,v16,v17)
1303 vxor v0,v0,v16
1304 bdz 1f
1305
1306 lvx v16,off80,r4
1307 lvx v17,off80,r3
1308 VPERM(v16,v16,v16,byteswap)
1309 VPMSUMW(v16,v16,v17)
1310 vxor v0,v0,v16
1311 bdz 1f
1312
1313 lvx v16,off96,r4
1314 lvx v17,off96,r3
1315 VPERM(v16,v16,v16,byteswap)
1316 VPMSUMW(v16,v16,v17)
1317 vxor v0,v0,v16
1318
1319 /* Now xor all the parallel chunks together */
13201: vxor v0,v0,v1
1321 vxor v2,v2,v3
1322 vxor v4,v4,v5
1323 vxor v6,v6,v7
1324
1325 vxor v0,v0,v2
1326 vxor v4,v4,v6
1327
1328 vxor v0,v0,v4
1329
1330.Lbarrett_reduction:
1331 /* Barrett constants */
1332 addis r3,r2,.barrett_constants@toc@ha
1333 addi r3,r3,.barrett_constants@toc@l
1334
1335 lvx const1,0,r3
1336 lvx const2,off16,r3
1337
1338 vsldoi v1,v0,v0,8
1339 vxor v0,v0,v1 /* xor two 64 bit results together */
1340
1341 /* shift left one bit */
1342 vspltisb v1,1
1343 vsl v0,v0,v1
1344
1345 vand v0,v0,mask_64bit
1346
1347 /*
1348 * The reflected version of Barrett reduction. Instead of bit
1349 * reflecting our data (which is expensive to do), we bit reflect our
1350 * constants and our algorithm, which means the intermediate data in
1351 * our vector registers goes from 0-63 instead of 63-0. We can reflect
1352 * the algorithm because we don't carry in mod 2 arithmetic.
1353 */
1354 vand v1,v0,mask_32bit /* bottom 32 bits of a */
1355 VPMSUMD(v1,v1,const1) /* ma */
1356 vand v1,v1,mask_32bit /* bottom 32bits of ma */
1357 VPMSUMD(v1,v1,const2) /* qn */
1358 vxor v0,v0,v1 /* a - qn, subtraction is xor in GF(2) */
1359
1360 /*
1361 * Since we are bit reflected, the result (ie the low 32 bits) is in
1362 * the high 32 bits. We just need to shift it left 4 bytes
1363 * V0 [ 0 1 X 3 ]
1364 * V0 [ 0 X 2 3 ]
1365 */
1366 vsldoi v0,v0,zeroes,4 /* shift result into top 64 bits of */
1367
1368 /* Get it into r3 */
1369 MFVRD(R3, v0)
1370
1371.Lout:
1372 subi r6,r1,56+10*16
1373 subi r7,r1,56+2*16
1374
1375 lvx v20,0,r6
1376 lvx v21,off16,r6
1377 lvx v22,off32,r6
1378 lvx v23,off48,r6
1379 lvx v24,off64,r6
1380 lvx v25,off80,r6
1381 lvx v26,off96,r6
1382 lvx v27,off112,r6
1383 lvx v28,0,r7
1384 lvx v29,off16,r7
1385
1386 ld r31,-8(r1)
1387 ld r30,-16(r1)
1388 ld r29,-24(r1)
1389 ld r28,-32(r1)
1390 ld r27,-40(r1)
1391 ld r26,-48(r1)
1392 ld r25,-56(r1)
1393
1394 blr
1395
1396.Lfirst_warm_up_done:
1397 lvx const1,0,r3
1398 addi r3,r3,16
1399
1400 VPMSUMD(v8,v16,const1)
1401 VPMSUMD(v9,v17,const1)
1402 VPMSUMD(v10,v18,const1)
1403 VPMSUMD(v11,v19,const1)
1404 VPMSUMD(v12,v20,const1)
1405 VPMSUMD(v13,v21,const1)
1406 VPMSUMD(v14,v22,const1)
1407 VPMSUMD(v15,v23,const1)
1408
1409 b .Lsecond_cool_down
1410
1411.Lshort:
1412 cmpdi r5,0
1413 beq .Lzero
1414
1415 addis r3,r2,.short_constants@toc@ha
1416 addi r3,r3,.short_constants@toc@l
1417
1418 /* Calculate where in the constant table we need to start */
1419 subfic r6,r5,256
1420 add r3,r3,r6
1421
1422 /* How many 16 byte chunks? */
1423 srdi r7,r5,4
1424 mtctr r7
1425
1426 vxor v19,v19,v19
1427 vxor v20,v20,v20
1428
1429 lvx v0,0,r4
1430 lvx v16,0,r3
1431 VPERM(v0,v0,v16,byteswap)
1432 vxor v0,v0,v8 /* xor in initial value */
1433 VPMSUMW(v0,v0,v16)
1434 bdz .Lv0
1435
1436 lvx v1,off16,r4
1437 lvx v17,off16,r3
1438 VPERM(v1,v1,v17,byteswap)
1439 VPMSUMW(v1,v1,v17)
1440 bdz .Lv1
1441
1442 lvx v2,off32,r4
1443 lvx v16,off32,r3
1444 VPERM(v2,v2,v16,byteswap)
1445 VPMSUMW(v2,v2,v16)
1446 bdz .Lv2
1447
1448 lvx v3,off48,r4
1449 lvx v17,off48,r3
1450 VPERM(v3,v3,v17,byteswap)
1451 VPMSUMW(v3,v3,v17)
1452 bdz .Lv3
1453
1454 lvx v4,off64,r4
1455 lvx v16,off64,r3
1456 VPERM(v4,v4,v16,byteswap)
1457 VPMSUMW(v4,v4,v16)
1458 bdz .Lv4
1459
1460 lvx v5,off80,r4
1461 lvx v17,off80,r3
1462 VPERM(v5,v5,v17,byteswap)
1463 VPMSUMW(v5,v5,v17)
1464 bdz .Lv5
1465
1466 lvx v6,off96,r4
1467 lvx v16,off96,r3
1468 VPERM(v6,v6,v16,byteswap)
1469 VPMSUMW(v6,v6,v16)
1470 bdz .Lv6
1471
1472 lvx v7,off112,r4
1473 lvx v17,off112,r3
1474 VPERM(v7,v7,v17,byteswap)
1475 VPMSUMW(v7,v7,v17)
1476 bdz .Lv7
1477
1478 addi r3,r3,128
1479 addi r4,r4,128
1480
1481 lvx v8,0,r4
1482 lvx v16,0,r3
1483 VPERM(v8,v8,v16,byteswap)
1484 VPMSUMW(v8,v8,v16)
1485 bdz .Lv8
1486
1487 lvx v9,off16,r4
1488 lvx v17,off16,r3
1489 VPERM(v9,v9,v17,byteswap)
1490 VPMSUMW(v9,v9,v17)
1491 bdz .Lv9
1492
1493 lvx v10,off32,r4
1494 lvx v16,off32,r3
1495 VPERM(v10,v10,v16,byteswap)
1496 VPMSUMW(v10,v10,v16)
1497 bdz .Lv10
1498
1499 lvx v11,off48,r4
1500 lvx v17,off48,r3
1501 VPERM(v11,v11,v17,byteswap)
1502 VPMSUMW(v11,v11,v17)
1503 bdz .Lv11
1504
1505 lvx v12,off64,r4
1506 lvx v16,off64,r3
1507 VPERM(v12,v12,v16,byteswap)
1508 VPMSUMW(v12,v12,v16)
1509 bdz .Lv12
1510
1511 lvx v13,off80,r4
1512 lvx v17,off80,r3
1513 VPERM(v13,v13,v17,byteswap)
1514 VPMSUMW(v13,v13,v17)
1515 bdz .Lv13
1516
1517 lvx v14,off96,r4
1518 lvx v16,off96,r3
1519 VPERM(v14,v14,v16,byteswap)
1520 VPMSUMW(v14,v14,v16)
1521 bdz .Lv14
1522
1523 lvx v15,off112,r4
1524 lvx v17,off112,r3
1525 VPERM(v15,v15,v17,byteswap)
1526 VPMSUMW(v15,v15,v17)
1527
1528.Lv15: vxor v19,v19,v15
1529.Lv14: vxor v20,v20,v14
1530.Lv13: vxor v19,v19,v13
1531.Lv12: vxor v20,v20,v12
1532.Lv11: vxor v19,v19,v11
1533.Lv10: vxor v20,v20,v10
1534.Lv9: vxor v19,v19,v9
1535.Lv8: vxor v20,v20,v8
1536.Lv7: vxor v19,v19,v7
1537.Lv6: vxor v20,v20,v6
1538.Lv5: vxor v19,v19,v5
1539.Lv4: vxor v20,v20,v4
1540.Lv3: vxor v19,v19,v3
1541.Lv2: vxor v20,v20,v2
1542.Lv1: vxor v19,v19,v1
1543.Lv0: vxor v20,v20,v0
1544
1545 vxor v0,v19,v20
1546
1547 b .Lbarrett_reduction
1548
1549.Lzero:
1550 mr r3,r10
1551 b .Lout
1552
1553FUNC_END(__crc32_vpmsum)
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
new file mode 100644
index 000000000000..bfe3d37a24ef
--- /dev/null
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -0,0 +1,167 @@
1#include <linux/crc32.h>
2#include <crypto/internal/hash.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/string.h>
6#include <linux/kernel.h>
7#include <asm/switch_to.h>
8
9#define CHKSUM_BLOCK_SIZE 1
10#define CHKSUM_DIGEST_SIZE 4
11
12#define VMX_ALIGN 16
13#define VMX_ALIGN_MASK (VMX_ALIGN-1)
14
15#define VECTOR_BREAKPOINT 512
16
17u32 __crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len);
18
19static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
20{
21 unsigned int prealign;
22 unsigned int tail;
23
24 if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
25 return __crc32c_le(crc, p, len);
26
27 if ((unsigned long)p & VMX_ALIGN_MASK) {
28 prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
29 crc = __crc32c_le(crc, p, prealign);
30 len -= prealign;
31 p += prealign;
32 }
33
34 if (len & ~VMX_ALIGN_MASK) {
35 pagefault_disable();
36 enable_kernel_altivec();
37 crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
38 pagefault_enable();
39 }
40
41 tail = len & VMX_ALIGN_MASK;
42 if (tail) {
43 p += len & ~VMX_ALIGN_MASK;
44 crc = __crc32c_le(crc, p, tail);
45 }
46
47 return crc;
48}
49
50static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
51{
52 u32 *key = crypto_tfm_ctx(tfm);
53
54 *key = 0;
55
56 return 0;
57}
58
59/*
60 * Setting the seed allows arbitrary accumulators and flexible XOR policy
61 * If your algorithm starts with ~0, then XOR with ~0 before you set
62 * the seed.
63 */
64static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key,
65 unsigned int keylen)
66{
67 u32 *mctx = crypto_shash_ctx(hash);
68
69 if (keylen != sizeof(u32)) {
70 crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
71 return -EINVAL;
72 }
73 *mctx = le32_to_cpup((__le32 *)key);
74 return 0;
75}
76
77static int crc32c_vpmsum_init(struct shash_desc *desc)
78{
79 u32 *mctx = crypto_shash_ctx(desc->tfm);
80 u32 *crcp = shash_desc_ctx(desc);
81
82 *crcp = *mctx;
83
84 return 0;
85}
86
87static int crc32c_vpmsum_update(struct shash_desc *desc, const u8 *data,
88 unsigned int len)
89{
90 u32 *crcp = shash_desc_ctx(desc);
91
92 *crcp = crc32c_vpmsum(*crcp, data, len);
93
94 return 0;
95}
96
97static int __crc32c_vpmsum_finup(u32 *crcp, const u8 *data, unsigned int len,
98 u8 *out)
99{
100 *(__le32 *)out = ~cpu_to_le32(crc32c_vpmsum(*crcp, data, len));
101
102 return 0;
103}
104
105static int crc32c_vpmsum_finup(struct shash_desc *desc, const u8 *data,
106 unsigned int len, u8 *out)
107{
108 return __crc32c_vpmsum_finup(shash_desc_ctx(desc), data, len, out);
109}
110
111static int crc32c_vpmsum_final(struct shash_desc *desc, u8 *out)
112{
113 u32 *crcp = shash_desc_ctx(desc);
114
115 *(__le32 *)out = ~cpu_to_le32p(crcp);
116
117 return 0;
118}
119
120static int crc32c_vpmsum_digest(struct shash_desc *desc, const u8 *data,
121 unsigned int len, u8 *out)
122{
123 return __crc32c_vpmsum_finup(crypto_shash_ctx(desc->tfm), data, len,
124 out);
125}
126
127static struct shash_alg alg = {
128 .setkey = crc32c_vpmsum_setkey,
129 .init = crc32c_vpmsum_init,
130 .update = crc32c_vpmsum_update,
131 .final = crc32c_vpmsum_final,
132 .finup = crc32c_vpmsum_finup,
133 .digest = crc32c_vpmsum_digest,
134 .descsize = sizeof(u32),
135 .digestsize = CHKSUM_DIGEST_SIZE,
136 .base = {
137 .cra_name = "crc32c",
138 .cra_driver_name = "crc32c-vpmsum",
139 .cra_priority = 200,
140 .cra_blocksize = CHKSUM_BLOCK_SIZE,
141 .cra_ctxsize = sizeof(u32),
142 .cra_module = THIS_MODULE,
143 .cra_init = crc32c_vpmsum_cra_init,
144 }
145};
146
147static int __init crc32c_vpmsum_mod_init(void)
148{
149 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
150 return -ENODEV;
151
152 return crypto_register_shash(&alg);
153}
154
155static void __exit crc32c_vpmsum_mod_fini(void)
156{
157 crypto_unregister_shash(&alg);
158}
159
160module_init(crc32c_vpmsum_mod_init);
161module_exit(crc32c_vpmsum_mod_fini);
162
163MODULE_AUTHOR("Anton Blanchard <anton@samba.org>");
164MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions");
165MODULE_LICENSE("GPL");
166MODULE_ALIAS_CRYPTO("crc32c");
167MODULE_ALIAS_CRYPTO("crc32c-vpmsum");
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 1d035c1cc889..49cd8760aa7c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -174,6 +174,8 @@
174#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff 174#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
175#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 175#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
176#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff 176#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
177#define PPC_INST_MFVSRD 0x7c000066
178#define PPC_INST_MTVSRD 0x7c000166
177#define PPC_INST_SLBFEE 0x7c0007a7 179#define PPC_INST_SLBFEE 0x7c0007a7
178 180
179#define PPC_INST_STRING 0x7c00042a 181#define PPC_INST_STRING 0x7c00042a
@@ -188,6 +190,8 @@
188#define PPC_INST_WAIT 0x7c00007c 190#define PPC_INST_WAIT 0x7c00007c
189#define PPC_INST_TLBIVAX 0x7c000624 191#define PPC_INST_TLBIVAX 0x7c000624
190#define PPC_INST_TLBSRX_DOT 0x7c0006a5 192#define PPC_INST_TLBSRX_DOT 0x7c0006a5
193#define PPC_INST_VPMSUMW 0x10000488
194#define PPC_INST_VPMSUMD 0x100004c8
191#define PPC_INST_XXLOR 0xf0000510 195#define PPC_INST_XXLOR 0xf0000510
192#define PPC_INST_XXSWAPD 0xf0000250 196#define PPC_INST_XXSWAPD 0xf0000250
193#define PPC_INST_XVCPSGNDP 0xf0000780 197#define PPC_INST_XVCPSGNDP 0xf0000780
@@ -359,6 +363,14 @@
359 VSX_XX1((s), a, b)) 363 VSX_XX1((s), a, b))
360#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \ 364#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
361 VSX_XX1((s), a, b)) 365 VSX_XX1((s), a, b))
366#define MFVRD(a, t) stringify_in_c(.long PPC_INST_MFVSRD | \
367 VSX_XX1((t)+32, a, R0))
368#define MTVRD(t, a) stringify_in_c(.long PPC_INST_MTVSRD | \
369 VSX_XX1((t)+32, a, R0))
370#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMW | \
371 VSX_XX3((t), a, b))
372#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMD | \
373 VSX_XX3((t), a, b))
362#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ 374#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
363 VSX_XX3((t), a, b)) 375 VSX_XX3((t), a, b))
364#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \ 376#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 2b31632376a5..051af612a7e1 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -286,6 +286,9 @@ n:
286 286
287#endif 287#endif
288 288
289#define FUNC_START(name) _GLOBAL(name)
290#define FUNC_END(name)
291
289/* 292/*
290 * LOAD_REG_IMMEDIATE(rn, expr) 293 * LOAD_REG_IMMEDIATE(rn, expr)
291 * Loads the value of the constant expression 'expr' into register 'rn' 294 * Loads the value of the constant expression 'expr' into register 'rn'
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index 12e48d56f771..3963f0b68d52 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -38,6 +38,18 @@ EXPORT_SYMBOL(ioread16);
38EXPORT_SYMBOL(ioread16be); 38EXPORT_SYMBOL(ioread16be);
39EXPORT_SYMBOL(ioread32); 39EXPORT_SYMBOL(ioread32);
40EXPORT_SYMBOL(ioread32be); 40EXPORT_SYMBOL(ioread32be);
41#ifdef __powerpc64__
42u64 ioread64(void __iomem *addr)
43{
44 return readq(addr);
45}
46u64 ioread64be(void __iomem *addr)
47{
48 return readq_be(addr);
49}
50EXPORT_SYMBOL(ioread64);
51EXPORT_SYMBOL(ioread64be);
52#endif /* __powerpc64__ */
41 53
42void iowrite8(u8 val, void __iomem *addr) 54void iowrite8(u8 val, void __iomem *addr)
43{ 55{
@@ -64,6 +76,18 @@ EXPORT_SYMBOL(iowrite16);
64EXPORT_SYMBOL(iowrite16be); 76EXPORT_SYMBOL(iowrite16be);
65EXPORT_SYMBOL(iowrite32); 77EXPORT_SYMBOL(iowrite32);
66EXPORT_SYMBOL(iowrite32be); 78EXPORT_SYMBOL(iowrite32be);
79#ifdef __powerpc64__
80void iowrite64(u64 val, void __iomem *addr)
81{
82 writeq(val, addr);
83}
84void iowrite64be(u64 val, void __iomem *addr)
85{
86 writeq_be(val, addr);
87}
88EXPORT_SYMBOL(iowrite64);
89EXPORT_SYMBOL(iowrite64be);
90#endif /* __powerpc64__ */
67 91
68/* 92/*
69 * These are the "repeat read/write" functions. Note the 93 * These are the "repeat read/write" functions. Note the
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 7554a8bb2adc..2ea18b050309 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -22,6 +22,7 @@
22 22
23#include <crypto/aes.h> 23#include <crypto/aes.h>
24#include <crypto/algapi.h> 24#include <crypto/algapi.h>
25#include <crypto/internal/skcipher.h>
25#include <linux/err.h> 26#include <linux/err.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/cpufeature.h> 28#include <linux/cpufeature.h>
@@ -44,7 +45,7 @@ struct s390_aes_ctx {
44 long dec; 45 long dec;
45 int key_len; 46 int key_len;
46 union { 47 union {
47 struct crypto_blkcipher *blk; 48 struct crypto_skcipher *blk;
48 struct crypto_cipher *cip; 49 struct crypto_cipher *cip;
49 } fallback; 50 } fallback;
50}; 51};
@@ -63,7 +64,7 @@ struct s390_xts_ctx {
63 long enc; 64 long enc;
64 long dec; 65 long dec;
65 int key_len; 66 int key_len;
66 struct crypto_blkcipher *fallback; 67 struct crypto_skcipher *fallback;
67}; 68};
68 69
69/* 70/*
@@ -237,16 +238,16 @@ static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
237 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 238 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
238 unsigned int ret; 239 unsigned int ret;
239 240
240 sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 241 crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
241 sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & 242 crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
242 CRYPTO_TFM_REQ_MASK); 243 CRYPTO_TFM_REQ_MASK);
244
245 ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
246
247 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
248 tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
249 CRYPTO_TFM_RES_MASK;
243 250
244 ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
245 if (ret) {
246 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
247 tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
248 CRYPTO_TFM_RES_MASK);
249 }
250 return ret; 251 return ret;
251} 252}
252 253
@@ -255,15 +256,17 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
255 unsigned int nbytes) 256 unsigned int nbytes)
256{ 257{
257 unsigned int ret; 258 unsigned int ret;
258 struct crypto_blkcipher *tfm; 259 struct crypto_blkcipher *tfm = desc->tfm;
259 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 260 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
261 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
260 262
261 tfm = desc->tfm; 263 skcipher_request_set_tfm(req, sctx->fallback.blk);
262 desc->tfm = sctx->fallback.blk; 264 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
265 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
263 266
264 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); 267 ret = crypto_skcipher_decrypt(req);
265 268
266 desc->tfm = tfm; 269 skcipher_request_zero(req);
267 return ret; 270 return ret;
268} 271}
269 272
@@ -272,15 +275,15 @@ static int fallback_blk_enc(struct blkcipher_desc *desc,
272 unsigned int nbytes) 275 unsigned int nbytes)
273{ 276{
274 unsigned int ret; 277 unsigned int ret;
275 struct crypto_blkcipher *tfm; 278 struct crypto_blkcipher *tfm = desc->tfm;
276 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 279 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
280 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
277 281
278 tfm = desc->tfm; 282 skcipher_request_set_tfm(req, sctx->fallback.blk);
279 desc->tfm = sctx->fallback.blk; 283 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
284 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
280 285
281 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); 286 ret = crypto_skcipher_encrypt(req);
282
283 desc->tfm = tfm;
284 return ret; 287 return ret;
285} 288}
286 289
@@ -370,8 +373,9 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
370 const char *name = tfm->__crt_alg->cra_name; 373 const char *name = tfm->__crt_alg->cra_name;
371 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 374 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
372 375
373 sctx->fallback.blk = crypto_alloc_blkcipher(name, 0, 376 sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
374 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 377 CRYPTO_ALG_ASYNC |
378 CRYPTO_ALG_NEED_FALLBACK);
375 379
376 if (IS_ERR(sctx->fallback.blk)) { 380 if (IS_ERR(sctx->fallback.blk)) {
377 pr_err("Allocating AES fallback algorithm %s failed\n", 381 pr_err("Allocating AES fallback algorithm %s failed\n",
@@ -386,8 +390,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
386{ 390{
387 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 391 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
388 392
389 crypto_free_blkcipher(sctx->fallback.blk); 393 crypto_free_skcipher(sctx->fallback.blk);
390 sctx->fallback.blk = NULL;
391} 394}
392 395
393static struct crypto_alg ecb_aes_alg = { 396static struct crypto_alg ecb_aes_alg = {
@@ -536,16 +539,16 @@ static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
536 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 539 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
537 unsigned int ret; 540 unsigned int ret;
538 541
539 xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 542 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
540 xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags & 543 crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
541 CRYPTO_TFM_REQ_MASK); 544 CRYPTO_TFM_REQ_MASK);
545
546 ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
547
548 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
549 tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
550 CRYPTO_TFM_RES_MASK;
542 551
543 ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
544 if (ret) {
545 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
546 tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
547 CRYPTO_TFM_RES_MASK);
548 }
549 return ret; 552 return ret;
550} 553}
551 554
@@ -553,16 +556,18 @@ static int xts_fallback_decrypt(struct blkcipher_desc *desc,
553 struct scatterlist *dst, struct scatterlist *src, 556 struct scatterlist *dst, struct scatterlist *src,
554 unsigned int nbytes) 557 unsigned int nbytes)
555{ 558{
556 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); 559 struct crypto_blkcipher *tfm = desc->tfm;
557 struct crypto_blkcipher *tfm; 560 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
561 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
558 unsigned int ret; 562 unsigned int ret;
559 563
560 tfm = desc->tfm; 564 skcipher_request_set_tfm(req, xts_ctx->fallback);
561 desc->tfm = xts_ctx->fallback; 565 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
566 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
562 567
563 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); 568 ret = crypto_skcipher_decrypt(req);
564 569
565 desc->tfm = tfm; 570 skcipher_request_zero(req);
566 return ret; 571 return ret;
567} 572}
568 573
@@ -570,16 +575,18 @@ static int xts_fallback_encrypt(struct blkcipher_desc *desc,
570 struct scatterlist *dst, struct scatterlist *src, 575 struct scatterlist *dst, struct scatterlist *src,
571 unsigned int nbytes) 576 unsigned int nbytes)
572{ 577{
573 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); 578 struct crypto_blkcipher *tfm = desc->tfm;
574 struct crypto_blkcipher *tfm; 579 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
580 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
575 unsigned int ret; 581 unsigned int ret;
576 582
577 tfm = desc->tfm; 583 skcipher_request_set_tfm(req, xts_ctx->fallback);
578 desc->tfm = xts_ctx->fallback; 584 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
585 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
579 586
580 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); 587 ret = crypto_skcipher_encrypt(req);
581 588
582 desc->tfm = tfm; 589 skcipher_request_zero(req);
583 return ret; 590 return ret;
584} 591}
585 592
@@ -700,8 +707,9 @@ static int xts_fallback_init(struct crypto_tfm *tfm)
700 const char *name = tfm->__crt_alg->cra_name; 707 const char *name = tfm->__crt_alg->cra_name;
701 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 708 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
702 709
703 xts_ctx->fallback = crypto_alloc_blkcipher(name, 0, 710 xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
704 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 711 CRYPTO_ALG_ASYNC |
712 CRYPTO_ALG_NEED_FALLBACK);
705 713
706 if (IS_ERR(xts_ctx->fallback)) { 714 if (IS_ERR(xts_ctx->fallback)) {
707 pr_err("Allocating XTS fallback algorithm %s failed\n", 715 pr_err("Allocating XTS fallback algorithm %s failed\n",
@@ -715,8 +723,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
715{ 723{
716 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 724 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
717 725
718 crypto_free_blkcipher(xts_ctx->fallback); 726 crypto_free_skcipher(xts_ctx->fallback);
719 xts_ctx->fallback = NULL;
720} 727}
721 728
722static struct crypto_alg xts_aes_alg = { 729static struct crypto_alg xts_aes_alg = {
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index b9b912a44d61..34b3fa2889d1 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -49,7 +49,9 @@ endif
49ifeq ($(avx2_supported),yes) 49ifeq ($(avx2_supported),yes)
50 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o 50 obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
51 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o 51 obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
52 obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ 52 obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/
53 obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/
54 obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/
53endif 55endif
54 56
55aes-i586-y := aes-i586-asm_32.o aes_glue.o 57aes-i586-y := aes-i586-asm_32.o aes_glue.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 5b7fa1471007..0ab5ee1c26af 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -59,17 +59,6 @@ struct aesni_rfc4106_gcm_ctx {
59 u8 nonce[4]; 59 u8 nonce[4];
60}; 60};
61 61
62struct aesni_gcm_set_hash_subkey_result {
63 int err;
64 struct completion completion;
65};
66
67struct aesni_hash_subkey_req_data {
68 u8 iv[16];
69 struct aesni_gcm_set_hash_subkey_result result;
70 struct scatterlist sg;
71};
72
73struct aesni_lrw_ctx { 62struct aesni_lrw_ctx {
74 struct lrw_table_ctx lrw_table; 63 struct lrw_table_ctx lrw_table;
75 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1]; 64 u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
@@ -809,71 +798,28 @@ static void rfc4106_exit(struct crypto_aead *aead)
809 cryptd_free_aead(*ctx); 798 cryptd_free_aead(*ctx);
810} 799}
811 800
812static void
813rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
814{
815 struct aesni_gcm_set_hash_subkey_result *result = req->data;
816
817 if (err == -EINPROGRESS)
818 return;
819 result->err = err;
820 complete(&result->completion);
821}
822
823static int 801static int
824rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len) 802rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
825{ 803{
826 struct crypto_ablkcipher *ctr_tfm; 804 struct crypto_cipher *tfm;
827 struct ablkcipher_request *req; 805 int ret;
828 int ret = -EINVAL;
829 struct aesni_hash_subkey_req_data *req_data;
830 806
831 ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0); 807 tfm = crypto_alloc_cipher("aes", 0, 0);
832 if (IS_ERR(ctr_tfm)) 808 if (IS_ERR(tfm))
833 return PTR_ERR(ctr_tfm); 809 return PTR_ERR(tfm);
834 810
835 ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len); 811 ret = crypto_cipher_setkey(tfm, key, key_len);
836 if (ret) 812 if (ret)
837 goto out_free_ablkcipher; 813 goto out_free_cipher;
838
839 ret = -ENOMEM;
840 req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
841 if (!req)
842 goto out_free_ablkcipher;
843
844 req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
845 if (!req_data)
846 goto out_free_request;
847
848 memset(req_data->iv, 0, sizeof(req_data->iv));
849 814
850 /* Clear the data in the hash sub key container to zero.*/ 815 /* Clear the data in the hash sub key container to zero.*/
851 /* We want to cipher all zeros to create the hash sub key. */ 816 /* We want to cipher all zeros to create the hash sub key. */
852 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE); 817 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
853 818
854 init_completion(&req_data->result.completion); 819 crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
855 sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE); 820
856 ablkcipher_request_set_tfm(req, ctr_tfm); 821out_free_cipher:
857 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | 822 crypto_free_cipher(tfm);
858 CRYPTO_TFM_REQ_MAY_BACKLOG,
859 rfc4106_set_hash_subkey_done,
860 &req_data->result);
861
862 ablkcipher_request_set_crypt(req, &req_data->sg,
863 &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
864
865 ret = crypto_ablkcipher_encrypt(req);
866 if (ret == -EINPROGRESS || ret == -EBUSY) {
867 ret = wait_for_completion_interruptible
868 (&req_data->result.completion);
869 if (!ret)
870 ret = req_data->result.err;
871 }
872 kfree(req_data);
873out_free_request:
874 ablkcipher_request_free(req);
875out_free_ablkcipher:
876 crypto_free_ablkcipher(ctr_tfm);
877 return ret; 823 return ret;
878} 824}
879 825
@@ -1098,9 +1044,12 @@ static int rfc4106_encrypt(struct aead_request *req)
1098 struct cryptd_aead **ctx = crypto_aead_ctx(tfm); 1044 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1099 struct cryptd_aead *cryptd_tfm = *ctx; 1045 struct cryptd_aead *cryptd_tfm = *ctx;
1100 1046
1101 aead_request_set_tfm(req, irq_fpu_usable() ? 1047 tfm = &cryptd_tfm->base;
1102 cryptd_aead_child(cryptd_tfm) : 1048 if (irq_fpu_usable() && (!in_atomic() ||
1103 &cryptd_tfm->base); 1049 !cryptd_aead_queued(cryptd_tfm)))
1050 tfm = cryptd_aead_child(cryptd_tfm);
1051
1052 aead_request_set_tfm(req, tfm);
1104 1053
1105 return crypto_aead_encrypt(req); 1054 return crypto_aead_encrypt(req);
1106} 1055}
@@ -1111,9 +1060,12 @@ static int rfc4106_decrypt(struct aead_request *req)
1111 struct cryptd_aead **ctx = crypto_aead_ctx(tfm); 1060 struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1112 struct cryptd_aead *cryptd_tfm = *ctx; 1061 struct cryptd_aead *cryptd_tfm = *ctx;
1113 1062
1114 aead_request_set_tfm(req, irq_fpu_usable() ? 1063 tfm = &cryptd_tfm->base;
1115 cryptd_aead_child(cryptd_tfm) : 1064 if (irq_fpu_usable() && (!in_atomic() ||
1116 &cryptd_tfm->base); 1065 !cryptd_aead_queued(cryptd_tfm)))
1066 tfm = cryptd_aead_child(cryptd_tfm);
1067
1068 aead_request_set_tfm(req, tfm);
1117 1069
1118 return crypto_aead_decrypt(req); 1070 return crypto_aead_decrypt(req);
1119} 1071}
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 2d5c2e0bd939..f910d1d449f0 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -70,7 +70,7 @@ static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
70 struct blkcipher_walk walk; 70 struct blkcipher_walk walk;
71 int err; 71 int err;
72 72
73 if (!may_use_simd()) 73 if (nbytes <= CHACHA20_BLOCK_SIZE || !may_use_simd())
74 return crypto_chacha20_crypt(desc, dst, src, nbytes); 74 return crypto_chacha20_crypt(desc, dst, src, nbytes);
75 75
76 state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN); 76 state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index a69321a77783..0420bab19efb 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -168,30 +168,23 @@ static int ghash_async_init(struct ahash_request *req)
168 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); 168 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
169 struct ahash_request *cryptd_req = ahash_request_ctx(req); 169 struct ahash_request *cryptd_req = ahash_request_ctx(req);
170 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 170 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
171 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
172 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
171 173
172 if (!irq_fpu_usable()) { 174 desc->tfm = child;
173 memcpy(cryptd_req, req, sizeof(*req)); 175 desc->flags = req->base.flags;
174 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 176 return crypto_shash_init(desc);
175 return crypto_ahash_init(cryptd_req);
176 } else {
177 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
178 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
179
180 desc->tfm = child;
181 desc->flags = req->base.flags;
182 return crypto_shash_init(desc);
183 }
184} 177}
185 178
186static int ghash_async_update(struct ahash_request *req) 179static int ghash_async_update(struct ahash_request *req)
187{ 180{
188 struct ahash_request *cryptd_req = ahash_request_ctx(req); 181 struct ahash_request *cryptd_req = ahash_request_ctx(req);
182 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
183 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
184 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
189 185
190 if (!irq_fpu_usable()) { 186 if (!irq_fpu_usable() ||
191 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 187 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
192 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
193 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
194
195 memcpy(cryptd_req, req, sizeof(*req)); 188 memcpy(cryptd_req, req, sizeof(*req));
196 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 189 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
197 return crypto_ahash_update(cryptd_req); 190 return crypto_ahash_update(cryptd_req);
@@ -204,12 +197,12 @@ static int ghash_async_update(struct ahash_request *req)
204static int ghash_async_final(struct ahash_request *req) 197static int ghash_async_final(struct ahash_request *req)
205{ 198{
206 struct ahash_request *cryptd_req = ahash_request_ctx(req); 199 struct ahash_request *cryptd_req = ahash_request_ctx(req);
200 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
201 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
202 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
207 203
208 if (!irq_fpu_usable()) { 204 if (!irq_fpu_usable() ||
209 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 205 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
210 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
211 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
212
213 memcpy(cryptd_req, req, sizeof(*req)); 206 memcpy(cryptd_req, req, sizeof(*req));
214 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 207 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
215 return crypto_ahash_final(cryptd_req); 208 return crypto_ahash_final(cryptd_req);
@@ -249,7 +242,8 @@ static int ghash_async_digest(struct ahash_request *req)
249 struct ahash_request *cryptd_req = ahash_request_ctx(req); 242 struct ahash_request *cryptd_req = ahash_request_ctx(req);
250 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; 243 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
251 244
252 if (!irq_fpu_usable()) { 245 if (!irq_fpu_usable() ||
246 (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
253 memcpy(cryptd_req, req, sizeof(*req)); 247 memcpy(cryptd_req, req, sizeof(*req));
254 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); 248 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
255 return crypto_ahash_digest(cryptd_req); 249 return crypto_ahash_digest(cryptd_req);
diff --git a/arch/x86/crypto/sha-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
index 2f8756375df5..2f8756375df5 100644
--- a/arch/x86/crypto/sha-mb/Makefile
+++ b/arch/x86/crypto/sha1-mb/Makefile
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index 9c5af331a956..9e5b67127a09 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -67,7 +67,7 @@
67#include <asm/byteorder.h> 67#include <asm/byteorder.h>
68#include <linux/hardirq.h> 68#include <linux/hardirq.h>
69#include <asm/fpu/api.h> 69#include <asm/fpu/api.h>
70#include "sha_mb_ctx.h" 70#include "sha1_mb_ctx.h"
71 71
72#define FLUSH_INTERVAL 1000 /* in usec */ 72#define FLUSH_INTERVAL 1000 /* in usec */
73 73
@@ -77,30 +77,34 @@ struct sha1_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm; 77 struct mcryptd_ahash *mcryptd_tfm;
78}; 78};
79 79
80static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx) 80static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
81{ 82{
82 struct shash_desc *desc; 83 struct ahash_request *areq;
83 84
84 desc = container_of((void *) hash_ctx, struct shash_desc, __ctx); 85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
85 return container_of(desc, struct mcryptd_hash_request_ctx, desc); 86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
86} 87}
87 88
88static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) 89static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
89{ 91{
90 return container_of((void *) ctx, struct ahash_request, __ctx); 92 return container_of((void *) ctx, struct ahash_request, __ctx);
91} 93}
92 94
93static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, 95static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
94 struct shash_desc *desc) 96 struct ahash_request *areq)
95{ 97{
96 rctx->flag = HASH_UPDATE; 98 rctx->flag = HASH_UPDATE;
97} 99}
98 100
99static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state); 101static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
100static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state, 102static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
101 struct job_sha1 *job); 103 (struct sha1_mb_mgr *state, struct job_sha1 *job);
102static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state); 104static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
103static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state); 105 (struct sha1_mb_mgr *state);
106static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
107 (struct sha1_mb_mgr *state);
104 108
105static inline void sha1_init_digest(uint32_t *digest) 109static inline void sha1_init_digest(uint32_t *digest)
106{ 110{
@@ -131,7 +135,8 @@ static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
131 return i >> SHA1_LOG2_BLOCK_SIZE; 135 return i >> SHA1_LOG2_BLOCK_SIZE;
132} 136}
133 137
134static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx) 138static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
139 struct sha1_hash_ctx *ctx)
135{ 140{
136 while (ctx) { 141 while (ctx) {
137 if (ctx->status & HASH_CTX_STS_COMPLETE) { 142 if (ctx->status & HASH_CTX_STS_COMPLETE) {
@@ -177,8 +182,8 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
177 182
178 ctx->job.buffer = (uint8_t *) buffer; 183 ctx->job.buffer = (uint8_t *) buffer;
179 ctx->job.len = len; 184 ctx->job.len = len;
180 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, 185 ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
181 &ctx->job); 186 &ctx->job);
182 continue; 187 continue;
183 } 188 }
184 } 189 }
@@ -191,13 +196,15 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
191 if (ctx->status & HASH_CTX_STS_LAST) { 196 if (ctx->status & HASH_CTX_STS_LAST) {
192 197
193 uint8_t *buf = ctx->partial_block_buffer; 198 uint8_t *buf = ctx->partial_block_buffer;
194 uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length); 199 uint32_t n_extra_blocks =
200 sha1_pad(buf, ctx->total_length);
195 201
196 ctx->status = (HASH_CTX_STS_PROCESSING | 202 ctx->status = (HASH_CTX_STS_PROCESSING |
197 HASH_CTX_STS_COMPLETE); 203 HASH_CTX_STS_COMPLETE);
198 ctx->job.buffer = buf; 204 ctx->job.buffer = buf;
199 ctx->job.len = (uint32_t) n_extra_blocks; 205 ctx->job.len = (uint32_t) n_extra_blocks;
200 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); 206 ctx = (struct sha1_hash_ctx *)
207 sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
201 continue; 208 continue;
202 } 209 }
203 210
@@ -208,14 +215,17 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
208 return NULL; 215 return NULL;
209} 216}
210 217
211static struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr) 218static struct sha1_hash_ctx
219 *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
212{ 220{
213 /* 221 /*
214 * If get_comp_job returns NULL, there are no jobs complete. 222 * If get_comp_job returns NULL, there are no jobs complete.
215 * If get_comp_job returns a job, verify that it is safe to return to the user. 223 * If get_comp_job returns a job, verify that it is safe to return to
224 * the user.
216 * If it is not ready, resubmit the job to finish processing. 225 * If it is not ready, resubmit the job to finish processing.
217 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. 226 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
218 * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing. 227 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
228 * still need processing.
219 */ 229 */
220 struct sha1_hash_ctx *ctx; 230 struct sha1_hash_ctx *ctx;
221 231
@@ -235,7 +245,10 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
235 int flags) 245 int flags)
236{ 246{
237 if (flags & (~HASH_ENTIRE)) { 247 if (flags & (~HASH_ENTIRE)) {
238 /* User should not pass anything other than FIRST, UPDATE, or LAST */ 248 /*
249 * User should not pass anything other than FIRST, UPDATE, or
250 * LAST
251 */
239 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; 252 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
240 return ctx; 253 return ctx;
241 } 254 }
@@ -264,14 +277,20 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
264 ctx->partial_block_buffer_length = 0; 277 ctx->partial_block_buffer_length = 0;
265 } 278 }
266 279
267 /* If we made it here, there were no errors during this call to submit */ 280 /*
281 * If we made it here, there were no errors during this call to
282 * submit
283 */
268 ctx->error = HASH_CTX_ERROR_NONE; 284 ctx->error = HASH_CTX_ERROR_NONE;
269 285
270 /* Store buffer ptr info from user */ 286 /* Store buffer ptr info from user */
271 ctx->incoming_buffer = buffer; 287 ctx->incoming_buffer = buffer;
272 ctx->incoming_buffer_length = len; 288 ctx->incoming_buffer_length = len;
273 289
274 /* Store the user's request flags and mark this ctx as currently being processed. */ 290 /*
291 * Store the user's request flags and mark this ctx as currently
292 * being processed.
293 */
275 ctx->status = (flags & HASH_LAST) ? 294 ctx->status = (flags & HASH_LAST) ?
276 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : 295 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
277 HASH_CTX_STS_PROCESSING; 296 HASH_CTX_STS_PROCESSING;
@@ -285,9 +304,13 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
285 * Or if the user's buffer contains less than a whole block, 304 * Or if the user's buffer contains less than a whole block,
286 * append as much as possible to the extra block. 305 * append as much as possible to the extra block.
287 */ 306 */
288 if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) { 307 if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
289 /* Compute how many bytes to copy from user buffer into extra block */ 308 /*
290 uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length; 309 * Compute how many bytes to copy from user buffer into
310 * extra block
311 */
312 uint32_t copy_len = SHA1_BLOCK_SIZE -
313 ctx->partial_block_buffer_length;
291 if (len < copy_len) 314 if (len < copy_len)
292 copy_len = len; 315 copy_len = len;
293 316
@@ -297,20 +320,28 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
297 buffer, copy_len); 320 buffer, copy_len);
298 321
299 ctx->partial_block_buffer_length += copy_len; 322 ctx->partial_block_buffer_length += copy_len;
300 ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len); 323 ctx->incoming_buffer = (const void *)
324 ((const char *)buffer + copy_len);
301 ctx->incoming_buffer_length = len - copy_len; 325 ctx->incoming_buffer_length = len - copy_len;
302 } 326 }
303 327
304 /* The extra block should never contain more than 1 block here */ 328 /*
329 * The extra block should never contain more than 1 block
330 * here
331 */
305 assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE); 332 assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
306 333
307 /* If the extra block buffer contains exactly 1 block, it can be hashed. */ 334 /*
335 * If the extra block buffer contains exactly 1 block, it can
336 * be hashed.
337 */
308 if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) { 338 if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
309 ctx->partial_block_buffer_length = 0; 339 ctx->partial_block_buffer_length = 0;
310 340
311 ctx->job.buffer = ctx->partial_block_buffer; 341 ctx->job.buffer = ctx->partial_block_buffer;
312 ctx->job.len = 1; 342 ctx->job.len = 1;
313 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); 343 ctx = (struct sha1_hash_ctx *)
344 sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
314 } 345 }
315 } 346 }
316 347
@@ -329,23 +360,24 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
329 return NULL; 360 return NULL;
330 361
331 /* 362 /*
332 * If flush returned a job, resubmit the job to finish processing. 363 * If flush returned a job, resubmit the job to finish
364 * processing.
333 */ 365 */
334 ctx = sha1_ctx_mgr_resubmit(mgr, ctx); 366 ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
335 367
336 /* 368 /*
337 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. 369 * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
338 * Otherwise, all jobs currently being managed by the sha1_ctx_mgr 370 * returned. Otherwise, all jobs currently being managed by the
339 * still need processing. Loop. 371 * sha1_ctx_mgr still need processing. Loop.
340 */ 372 */
341 if (ctx) 373 if (ctx)
342 return ctx; 374 return ctx;
343 } 375 }
344} 376}
345 377
346static int sha1_mb_init(struct shash_desc *desc) 378static int sha1_mb_init(struct ahash_request *areq)
347{ 379{
348 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); 380 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
349 381
350 hash_ctx_init(sctx); 382 hash_ctx_init(sctx);
351 sctx->job.result_digest[0] = SHA1_H0; 383 sctx->job.result_digest[0] = SHA1_H0;
@@ -363,7 +395,7 @@ static int sha1_mb_init(struct shash_desc *desc)
363static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx) 395static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
364{ 396{
365 int i; 397 int i;
366 struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc); 398 struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
367 __be32 *dst = (__be32 *) rctx->out; 399 __be32 *dst = (__be32 *) rctx->out;
368 400
369 for (i = 0; i < 5; ++i) 401 for (i = 0; i < 5; ++i)
@@ -394,9 +426,11 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
394 flag |= HASH_LAST; 426 flag |= HASH_LAST;
395 427
396 } 428 }
397 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc); 429 sha_ctx = (struct sha1_hash_ctx *)
430 ahash_request_ctx(&rctx->areq);
398 kernel_fpu_begin(); 431 kernel_fpu_begin();
399 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); 432 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
433 rctx->walk.data, nbytes, flag);
400 if (!sha_ctx) { 434 if (!sha_ctx) {
401 if (flush) 435 if (flush)
402 sha_ctx = sha1_ctx_mgr_flush(cstate->mgr); 436 sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
@@ -485,11 +519,10 @@ static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
485 mcryptd_arm_flusher(cstate, delay); 519 mcryptd_arm_flusher(cstate, delay);
486} 520}
487 521
488static int sha1_mb_update(struct shash_desc *desc, const u8 *data, 522static int sha1_mb_update(struct ahash_request *areq)
489 unsigned int len)
490{ 523{
491 struct mcryptd_hash_request_ctx *rctx = 524 struct mcryptd_hash_request_ctx *rctx =
492 container_of(desc, struct mcryptd_hash_request_ctx, desc); 525 container_of(areq, struct mcryptd_hash_request_ctx, areq);
493 struct mcryptd_alg_cstate *cstate = 526 struct mcryptd_alg_cstate *cstate =
494 this_cpu_ptr(sha1_mb_alg_state.alg_cstate); 527 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
495 528
@@ -505,7 +538,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
505 } 538 }
506 539
507 /* need to init context */ 540 /* need to init context */
508 req_ctx_init(rctx, desc); 541 req_ctx_init(rctx, areq);
509 542
510 nbytes = crypto_ahash_walk_first(req, &rctx->walk); 543 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
511 544
@@ -518,10 +551,11 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
518 rctx->flag |= HASH_DONE; 551 rctx->flag |= HASH_DONE;
519 552
520 /* submit */ 553 /* submit */
521 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); 554 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
522 sha1_mb_add_list(rctx, cstate); 555 sha1_mb_add_list(rctx, cstate);
523 kernel_fpu_begin(); 556 kernel_fpu_begin();
524 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE); 557 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
558 nbytes, HASH_UPDATE);
525 kernel_fpu_end(); 559 kernel_fpu_end();
526 560
527 /* check if anything is returned */ 561 /* check if anything is returned */
@@ -544,11 +578,10 @@ done:
544 return ret; 578 return ret;
545} 579}
546 580
547static int sha1_mb_finup(struct shash_desc *desc, const u8 *data, 581static int sha1_mb_finup(struct ahash_request *areq)
548 unsigned int len, u8 *out)
549{ 582{
550 struct mcryptd_hash_request_ctx *rctx = 583 struct mcryptd_hash_request_ctx *rctx =
551 container_of(desc, struct mcryptd_hash_request_ctx, desc); 584 container_of(areq, struct mcryptd_hash_request_ctx, areq);
552 struct mcryptd_alg_cstate *cstate = 585 struct mcryptd_alg_cstate *cstate =
553 this_cpu_ptr(sha1_mb_alg_state.alg_cstate); 586 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
554 587
@@ -563,7 +596,7 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
563 } 596 }
564 597
565 /* need to init context */ 598 /* need to init context */
566 req_ctx_init(rctx, desc); 599 req_ctx_init(rctx, areq);
567 600
568 nbytes = crypto_ahash_walk_first(req, &rctx->walk); 601 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
569 602
@@ -576,15 +609,15 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
576 rctx->flag |= HASH_DONE; 609 rctx->flag |= HASH_DONE;
577 flag = HASH_LAST; 610 flag = HASH_LAST;
578 } 611 }
579 rctx->out = out;
580 612
581 /* submit */ 613 /* submit */
582 rctx->flag |= HASH_FINAL; 614 rctx->flag |= HASH_FINAL;
583 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); 615 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
584 sha1_mb_add_list(rctx, cstate); 616 sha1_mb_add_list(rctx, cstate);
585 617
586 kernel_fpu_begin(); 618 kernel_fpu_begin();
587 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); 619 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
620 nbytes, flag);
588 kernel_fpu_end(); 621 kernel_fpu_end();
589 622
590 /* check if anything is returned */ 623 /* check if anything is returned */
@@ -605,10 +638,10 @@ done:
605 return ret; 638 return ret;
606} 639}
607 640
608static int sha1_mb_final(struct shash_desc *desc, u8 *out) 641static int sha1_mb_final(struct ahash_request *areq)
609{ 642{
610 struct mcryptd_hash_request_ctx *rctx = 643 struct mcryptd_hash_request_ctx *rctx =
611 container_of(desc, struct mcryptd_hash_request_ctx, desc); 644 container_of(areq, struct mcryptd_hash_request_ctx, areq);
612 struct mcryptd_alg_cstate *cstate = 645 struct mcryptd_alg_cstate *cstate =
613 this_cpu_ptr(sha1_mb_alg_state.alg_cstate); 646 this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
614 647
@@ -623,16 +656,16 @@ static int sha1_mb_final(struct shash_desc *desc, u8 *out)
623 } 656 }
624 657
625 /* need to init context */ 658 /* need to init context */
626 req_ctx_init(rctx, desc); 659 req_ctx_init(rctx, areq);
627 660
628 rctx->out = out;
629 rctx->flag |= HASH_DONE | HASH_FINAL; 661 rctx->flag |= HASH_DONE | HASH_FINAL;
630 662
631 sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); 663 sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
632 /* flag HASH_FINAL and 0 data size */ 664 /* flag HASH_FINAL and 0 data size */
633 sha1_mb_add_list(rctx, cstate); 665 sha1_mb_add_list(rctx, cstate);
634 kernel_fpu_begin(); 666 kernel_fpu_begin();
635 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST); 667 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
668 HASH_LAST);
636 kernel_fpu_end(); 669 kernel_fpu_end();
637 670
638 /* check if anything is returned */ 671 /* check if anything is returned */
@@ -654,48 +687,98 @@ done:
654 return ret; 687 return ret;
655} 688}
656 689
657static int sha1_mb_export(struct shash_desc *desc, void *out) 690static int sha1_mb_export(struct ahash_request *areq, void *out)
658{ 691{
659 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); 692 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
660 693
661 memcpy(out, sctx, sizeof(*sctx)); 694 memcpy(out, sctx, sizeof(*sctx));
662 695
663 return 0; 696 return 0;
664} 697}
665 698
666static int sha1_mb_import(struct shash_desc *desc, const void *in) 699static int sha1_mb_import(struct ahash_request *areq, const void *in)
667{ 700{
668 struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); 701 struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
669 702
670 memcpy(sctx, in, sizeof(*sctx)); 703 memcpy(sctx, in, sizeof(*sctx));
671 704
672 return 0; 705 return 0;
673} 706}
674 707
708static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
709{
710 struct mcryptd_ahash *mcryptd_tfm;
711 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
712 struct mcryptd_hash_ctx *mctx;
675 713
676static struct shash_alg sha1_mb_shash_alg = { 714 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
677 .digestsize = SHA1_DIGEST_SIZE, 715 CRYPTO_ALG_INTERNAL,
716 CRYPTO_ALG_INTERNAL);
717 if (IS_ERR(mcryptd_tfm))
718 return PTR_ERR(mcryptd_tfm);
719 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
720 mctx->alg_state = &sha1_mb_alg_state;
721 ctx->mcryptd_tfm = mcryptd_tfm;
722 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
723 sizeof(struct ahash_request) +
724 crypto_ahash_reqsize(&mcryptd_tfm->base));
725
726 return 0;
727}
728
729static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
730{
731 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
732
733 mcryptd_free_ahash(ctx->mcryptd_tfm);
734}
735
736static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
737{
738 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
739 sizeof(struct ahash_request) +
740 sizeof(struct sha1_hash_ctx));
741
742 return 0;
743}
744
745static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
746{
747 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
748
749 mcryptd_free_ahash(ctx->mcryptd_tfm);
750}
751
752static struct ahash_alg sha1_mb_areq_alg = {
678 .init = sha1_mb_init, 753 .init = sha1_mb_init,
679 .update = sha1_mb_update, 754 .update = sha1_mb_update,
680 .final = sha1_mb_final, 755 .final = sha1_mb_final,
681 .finup = sha1_mb_finup, 756 .finup = sha1_mb_finup,
682 .export = sha1_mb_export, 757 .export = sha1_mb_export,
683 .import = sha1_mb_import, 758 .import = sha1_mb_import,
684 .descsize = sizeof(struct sha1_hash_ctx), 759 .halg = {
685 .statesize = sizeof(struct sha1_hash_ctx), 760 .digestsize = SHA1_DIGEST_SIZE,
686 .base = { 761 .statesize = sizeof(struct sha1_hash_ctx),
687 .cra_name = "__sha1-mb", 762 .base = {
688 .cra_driver_name = "__intel_sha1-mb", 763 .cra_name = "__sha1-mb",
689 .cra_priority = 100, 764 .cra_driver_name = "__intel_sha1-mb",
690 /* 765 .cra_priority = 100,
691 * use ASYNC flag as some buffers in multi-buffer 766 /*
692 * algo may not have completed before hashing thread sleep 767 * use ASYNC flag as some buffers in multi-buffer
693 */ 768 * algo may not have completed before hashing thread
694 .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC | 769 * sleep
695 CRYPTO_ALG_INTERNAL, 770 */
696 .cra_blocksize = SHA1_BLOCK_SIZE, 771 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
697 .cra_module = THIS_MODULE, 772 CRYPTO_ALG_ASYNC |
698 .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list), 773 CRYPTO_ALG_INTERNAL,
774 .cra_blocksize = SHA1_BLOCK_SIZE,
775 .cra_module = THIS_MODULE,
776 .cra_list = LIST_HEAD_INIT
777 (sha1_mb_areq_alg.halg.base.cra_list),
778 .cra_init = sha1_mb_areq_init_tfm,
779 .cra_exit = sha1_mb_areq_exit_tfm,
780 .cra_ctxsize = sizeof(struct sha1_hash_ctx),
781 }
699 } 782 }
700}; 783};
701 784
@@ -780,46 +863,20 @@ static int sha1_mb_async_import(struct ahash_request *req, const void *in)
780 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 863 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
781 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); 864 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
782 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; 865 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
783 struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm); 866 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
784 struct mcryptd_hash_request_ctx *rctx; 867 struct mcryptd_hash_request_ctx *rctx;
785 struct shash_desc *desc; 868 struct ahash_request *areq;
786 869
787 memcpy(mcryptd_req, req, sizeof(*req)); 870 memcpy(mcryptd_req, req, sizeof(*req));
788 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); 871 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
789 rctx = ahash_request_ctx(mcryptd_req); 872 rctx = ahash_request_ctx(mcryptd_req);
790 desc = &rctx->desc; 873 areq = &rctx->areq;
791 desc->tfm = child;
792 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
793
794 return crypto_ahash_import(mcryptd_req, in);
795}
796
797static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
798{
799 struct mcryptd_ahash *mcryptd_tfm;
800 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
801 struct mcryptd_hash_ctx *mctx;
802 874
803 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", 875 ahash_request_set_tfm(areq, child);
804 CRYPTO_ALG_INTERNAL, 876 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
805 CRYPTO_ALG_INTERNAL); 877 rctx->complete, req);
806 if (IS_ERR(mcryptd_tfm))
807 return PTR_ERR(mcryptd_tfm);
808 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
809 mctx->alg_state = &sha1_mb_alg_state;
810 ctx->mcryptd_tfm = mcryptd_tfm;
811 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
812 sizeof(struct ahash_request) +
813 crypto_ahash_reqsize(&mcryptd_tfm->base));
814 878
815 return 0; 879 return crypto_ahash_import(mcryptd_req, in);
816}
817
818static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
819{
820 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
821
822 mcryptd_free_ahash(ctx->mcryptd_tfm);
823} 880}
824 881
825static struct ahash_alg sha1_mb_async_alg = { 882static struct ahash_alg sha1_mb_async_alg = {
@@ -866,7 +923,8 @@ static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
866 if (time_before(cur_time, rctx->tag.expire)) 923 if (time_before(cur_time, rctx->tag.expire))
867 break; 924 break;
868 kernel_fpu_begin(); 925 kernel_fpu_begin();
869 sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr); 926 sha_ctx = (struct sha1_hash_ctx *)
927 sha1_ctx_mgr_flush(cstate->mgr);
870 kernel_fpu_end(); 928 kernel_fpu_end();
871 if (!sha_ctx) { 929 if (!sha_ctx) {
872 pr_err("sha1_mb error: nothing got flushed for non-empty list\n"); 930 pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
@@ -927,7 +985,7 @@ static int __init sha1_mb_mod_init(void)
927 } 985 }
928 sha1_mb_alg_state.flusher = &sha1_mb_flusher; 986 sha1_mb_alg_state.flusher = &sha1_mb_flusher;
929 987
930 err = crypto_register_shash(&sha1_mb_shash_alg); 988 err = crypto_register_ahash(&sha1_mb_areq_alg);
931 if (err) 989 if (err)
932 goto err2; 990 goto err2;
933 err = crypto_register_ahash(&sha1_mb_async_alg); 991 err = crypto_register_ahash(&sha1_mb_async_alg);
@@ -937,7 +995,7 @@ static int __init sha1_mb_mod_init(void)
937 995
938 return 0; 996 return 0;
939err1: 997err1:
940 crypto_unregister_shash(&sha1_mb_shash_alg); 998 crypto_unregister_ahash(&sha1_mb_areq_alg);
941err2: 999err2:
942 for_each_possible_cpu(cpu) { 1000 for_each_possible_cpu(cpu) {
943 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); 1001 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
@@ -953,7 +1011,7 @@ static void __exit sha1_mb_mod_fini(void)
953 struct mcryptd_alg_cstate *cpu_state; 1011 struct mcryptd_alg_cstate *cpu_state;
954 1012
955 crypto_unregister_ahash(&sha1_mb_async_alg); 1013 crypto_unregister_ahash(&sha1_mb_async_alg);
956 crypto_unregister_shash(&sha1_mb_shash_alg); 1014 crypto_unregister_ahash(&sha1_mb_areq_alg);
957 for_each_possible_cpu(cpu) { 1015 for_each_possible_cpu(cpu) {
958 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); 1016 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
959 kfree(cpu_state->mgr); 1017 kfree(cpu_state->mgr);
diff --git a/arch/x86/crypto/sha-mb/sha_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
index e36069d0c1bd..98a35bcc6f4a 100644
--- a/arch/x86/crypto/sha-mb/sha_mb_ctx.h
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
@@ -54,7 +54,7 @@
54#ifndef _SHA_MB_CTX_INTERNAL_H 54#ifndef _SHA_MB_CTX_INTERNAL_H
55#define _SHA_MB_CTX_INTERNAL_H 55#define _SHA_MB_CTX_INTERNAL_H
56 56
57#include "sha_mb_mgr.h" 57#include "sha1_mb_mgr.h"
58 58
59#define HASH_UPDATE 0x00 59#define HASH_UPDATE 0x00
60#define HASH_FIRST 0x01 60#define HASH_FIRST 0x01
diff --git a/arch/x86/crypto/sha-mb/sha_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
index 08ad1a9acfd7..08ad1a9acfd7 100644
--- a/arch/x86/crypto/sha-mb/sha_mb_mgr.h
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_datastruct.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
index 86688c6e7a25..86688c6e7a25 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_datastruct.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
index 96df6a39d7e2..96df6a39d7e2 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
index 822acb5b464c..d2add0d35f43 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_init_avx2.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
@@ -51,7 +51,7 @@
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */ 52 */
53 53
54#include "sha_mb_mgr.h" 54#include "sha1_mb_mgr.h"
55 55
56void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state) 56void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
57{ 57{
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
index 63a0d9c8e31f..63a0d9c8e31f 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
diff --git a/arch/x86/crypto/sha-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
index c9dae1cd2919..c9dae1cd2919 100644
--- a/arch/x86/crypto/sha-mb/sha1_x8_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 1024e378a358..fc61739150e7 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -374,3 +374,9 @@ MODULE_LICENSE("GPL");
374MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 374MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
375 375
376MODULE_ALIAS_CRYPTO("sha1"); 376MODULE_ALIAS_CRYPTO("sha1");
377MODULE_ALIAS_CRYPTO("sha1-ssse3");
378MODULE_ALIAS_CRYPTO("sha1-avx");
379MODULE_ALIAS_CRYPTO("sha1-avx2");
380#ifdef CONFIG_AS_SHA1_NI
381MODULE_ALIAS_CRYPTO("sha1-ni");
382#endif
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
new file mode 100644
index 000000000000..41089e7c400c
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/Makefile
@@ -0,0 +1,11 @@
1#
2# Arch-specific CryptoAPI modules.
3#
4
5avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
6 $(comma)4)$(comma)%ymm2,yes,no)
7ifeq ($(avx2_supported),yes)
8 obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o
9 sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \
10 sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o
11endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
new file mode 100644
index 000000000000..89fa85e8b10c
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -0,0 +1,1030 @@
1/*
2 * Multi buffer SHA256 algorithm Glue Code
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <crypto/internal/hash.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/mm.h>
60#include <linux/cryptohash.h>
61#include <linux/types.h>
62#include <linux/list.h>
63#include <crypto/scatterwalk.h>
64#include <crypto/sha.h>
65#include <crypto/mcryptd.h>
66#include <crypto/crypto_wq.h>
67#include <asm/byteorder.h>
68#include <linux/hardirq.h>
69#include <asm/fpu/api.h>
70#include "sha256_mb_ctx.h"
71
72#define FLUSH_INTERVAL 1000 /* in usec */
73
74static struct mcryptd_alg_state sha256_mb_alg_state;
75
76struct sha256_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
78};
79
80static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
82{
83 struct ahash_request *areq;
84
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
87}
88
89static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91{
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93}
94
95static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
97{
98 rctx->flag = HASH_UPDATE;
99}
100
101static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
102static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
103 (struct sha256_mb_mgr *state, struct job_sha256 *job);
104static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
105 (struct sha256_mb_mgr *state);
106static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
107 (struct sha256_mb_mgr *state);
108
109inline void sha256_init_digest(uint32_t *digest)
110{
111 static const uint32_t initial_digest[SHA256_DIGEST_LENGTH] = {
112 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
113 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7};
114 memcpy(digest, initial_digest, sizeof(initial_digest));
115}
116
117inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
118 uint32_t total_len)
119{
120 uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
121
122 memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
123 padblock[i] = 0x80;
124
125 i += ((SHA256_BLOCK_SIZE - 1) &
126 (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
127 + 1 + SHA256_PADLENGTHFIELD_SIZE;
128
129#if SHA256_PADLENGTHFIELD_SIZE == 16
130 *((uint64_t *) &padblock[i - 16]) = 0;
131#endif
132
133 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
134
135 /* Number of extra blocks to hash */
136 return i >> SHA256_LOG2_BLOCK_SIZE;
137}
138
139static struct sha256_hash_ctx
140 *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
141 struct sha256_hash_ctx *ctx)
142{
143 while (ctx) {
144 if (ctx->status & HASH_CTX_STS_COMPLETE) {
145 /* Clear PROCESSING bit */
146 ctx->status = HASH_CTX_STS_COMPLETE;
147 return ctx;
148 }
149
150 /*
151 * If the extra blocks are empty, begin hashing what remains
152 * in the user's buffer.
153 */
154 if (ctx->partial_block_buffer_length == 0 &&
155 ctx->incoming_buffer_length) {
156
157 const void *buffer = ctx->incoming_buffer;
158 uint32_t len = ctx->incoming_buffer_length;
159 uint32_t copy_len;
160
161 /*
162 * Only entire blocks can be hashed.
163 * Copy remainder to extra blocks buffer.
164 */
165 copy_len = len & (SHA256_BLOCK_SIZE-1);
166
167 if (copy_len) {
168 len -= copy_len;
169 memcpy(ctx->partial_block_buffer,
170 ((const char *) buffer + len),
171 copy_len);
172 ctx->partial_block_buffer_length = copy_len;
173 }
174
175 ctx->incoming_buffer_length = 0;
176
177 /* len should be a multiple of the block size now */
178 assert((len % SHA256_BLOCK_SIZE) == 0);
179
180 /* Set len to the number of blocks to be hashed */
181 len >>= SHA256_LOG2_BLOCK_SIZE;
182
183 if (len) {
184
185 ctx->job.buffer = (uint8_t *) buffer;
186 ctx->job.len = len;
187 ctx = (struct sha256_hash_ctx *)
188 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
189 continue;
190 }
191 }
192
193 /*
194 * If the extra blocks are not empty, then we are
195 * either on the last block(s) or we need more
196 * user input before continuing.
197 */
198 if (ctx->status & HASH_CTX_STS_LAST) {
199
200 uint8_t *buf = ctx->partial_block_buffer;
201 uint32_t n_extra_blocks =
202 sha256_pad(buf, ctx->total_length);
203
204 ctx->status = (HASH_CTX_STS_PROCESSING |
205 HASH_CTX_STS_COMPLETE);
206 ctx->job.buffer = buf;
207 ctx->job.len = (uint32_t) n_extra_blocks;
208 ctx = (struct sha256_hash_ctx *)
209 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
210 continue;
211 }
212
213 ctx->status = HASH_CTX_STS_IDLE;
214 return ctx;
215 }
216
217 return NULL;
218}
219
220static struct sha256_hash_ctx
221 *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
222{
223 /*
224 * If get_comp_job returns NULL, there are no jobs complete.
225 * If get_comp_job returns a job, verify that it is safe to return to
226 * the user. If it is not ready, resubmit the job to finish processing.
227 * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
228 * returned. Otherwise, all jobs currently being managed by the
229 * hash_ctx_mgr still need processing.
230 */
231 struct sha256_hash_ctx *ctx;
232
233 ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
234 return sha256_ctx_mgr_resubmit(mgr, ctx);
235}
236
237static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
238{
239 sha256_job_mgr_init(&mgr->mgr);
240}
241
242static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
243 struct sha256_hash_ctx *ctx,
244 const void *buffer,
245 uint32_t len,
246 int flags)
247{
248 if (flags & (~HASH_ENTIRE)) {
249 /* User should not pass anything other than FIRST, UPDATE
250 * or LAST
251 */
252 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
253 return ctx;
254 }
255
256 if (ctx->status & HASH_CTX_STS_PROCESSING) {
257 /* Cannot submit to a currently processing job. */
258 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
259 return ctx;
260 }
261
262 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
263 /* Cannot update a finished job. */
264 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
265 return ctx;
266 }
267
268 if (flags & HASH_FIRST) {
269 /* Init digest */
270 sha256_init_digest(ctx->job.result_digest);
271
272 /* Reset byte counter */
273 ctx->total_length = 0;
274
275 /* Clear extra blocks */
276 ctx->partial_block_buffer_length = 0;
277 }
278
279 /* If we made it here, there was no error during this call to submit */
280 ctx->error = HASH_CTX_ERROR_NONE;
281
282 /* Store buffer ptr info from user */
283 ctx->incoming_buffer = buffer;
284 ctx->incoming_buffer_length = len;
285
286 /*
287 * Store the user's request flags and mark this ctx as currently
288 * being processed.
289 */
290 ctx->status = (flags & HASH_LAST) ?
291 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
292 HASH_CTX_STS_PROCESSING;
293
294 /* Advance byte counter */
295 ctx->total_length += len;
296
297 /*
298 * If there is anything currently buffered in the extra blocks,
299 * append to it until it contains a whole block.
300 * Or if the user's buffer contains less than a whole block,
301 * append as much as possible to the extra block.
302 */
303 if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
304 /*
305 * Compute how many bytes to copy from user buffer into
306 * extra block
307 */
308 uint32_t copy_len = SHA256_BLOCK_SIZE -
309 ctx->partial_block_buffer_length;
310 if (len < copy_len)
311 copy_len = len;
312
313 if (copy_len) {
314 /* Copy and update relevant pointers and counters */
315 memcpy(
316 &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
317 buffer, copy_len);
318
319 ctx->partial_block_buffer_length += copy_len;
320 ctx->incoming_buffer = (const void *)
321 ((const char *)buffer + copy_len);
322 ctx->incoming_buffer_length = len - copy_len;
323 }
324
325 /* The extra block should never contain more than 1 block */
326 assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
327
328 /*
329 * If the extra block buffer contains exactly 1 block,
330 * it can be hashed.
331 */
332 if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
333 ctx->partial_block_buffer_length = 0;
334
335 ctx->job.buffer = ctx->partial_block_buffer;
336 ctx->job.len = 1;
337 ctx = (struct sha256_hash_ctx *)
338 sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
339 }
340 }
341
342 return sha256_ctx_mgr_resubmit(mgr, ctx);
343}
344
345static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
346{
347 struct sha256_hash_ctx *ctx;
348
349 while (1) {
350 ctx = (struct sha256_hash_ctx *)
351 sha256_job_mgr_flush(&mgr->mgr);
352
353 /* If flush returned 0, there are no more jobs in flight. */
354 if (!ctx)
355 return NULL;
356
357 /*
358 * If flush returned a job, resubmit the job to finish
359 * processing.
360 */
361 ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
362
363 /*
364 * If sha256_ctx_mgr_resubmit returned a job, it is ready to
365 * be returned. Otherwise, all jobs currently being managed by
366 * the sha256_ctx_mgr still need processing. Loop.
367 */
368 if (ctx)
369 return ctx;
370 }
371}
372
373static int sha256_mb_init(struct ahash_request *areq)
374{
375 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
376
377 hash_ctx_init(sctx);
378 sctx->job.result_digest[0] = SHA256_H0;
379 sctx->job.result_digest[1] = SHA256_H1;
380 sctx->job.result_digest[2] = SHA256_H2;
381 sctx->job.result_digest[3] = SHA256_H3;
382 sctx->job.result_digest[4] = SHA256_H4;
383 sctx->job.result_digest[5] = SHA256_H5;
384 sctx->job.result_digest[6] = SHA256_H6;
385 sctx->job.result_digest[7] = SHA256_H7;
386 sctx->total_length = 0;
387 sctx->partial_block_buffer_length = 0;
388 sctx->status = HASH_CTX_STS_IDLE;
389
390 return 0;
391}
392
393static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
394{
395 int i;
396 struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
397 __be32 *dst = (__be32 *) rctx->out;
398
399 for (i = 0; i < 8; ++i)
400 dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
401
402 return 0;
403}
404
405static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
406 struct mcryptd_alg_cstate *cstate, bool flush)
407{
408 int flag = HASH_UPDATE;
409 int nbytes, err = 0;
410 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
411 struct sha256_hash_ctx *sha_ctx;
412
413 /* more work ? */
414 while (!(rctx->flag & HASH_DONE)) {
415 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
416 if (nbytes < 0) {
417 err = nbytes;
418 goto out;
419 }
420 /* check if the walk is done */
421 if (crypto_ahash_walk_last(&rctx->walk)) {
422 rctx->flag |= HASH_DONE;
423 if (rctx->flag & HASH_FINAL)
424 flag |= HASH_LAST;
425
426 }
427 sha_ctx = (struct sha256_hash_ctx *)
428 ahash_request_ctx(&rctx->areq);
429 kernel_fpu_begin();
430 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
431 rctx->walk.data, nbytes, flag);
432 if (!sha_ctx) {
433 if (flush)
434 sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
435 }
436 kernel_fpu_end();
437 if (sha_ctx)
438 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
439 else {
440 rctx = NULL;
441 goto out;
442 }
443 }
444
445 /* copy the results */
446 if (rctx->flag & HASH_FINAL)
447 sha256_mb_set_results(rctx);
448
449out:
450 *ret_rctx = rctx;
451 return err;
452}
453
454static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
455 struct mcryptd_alg_cstate *cstate,
456 int err)
457{
458 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
459 struct sha256_hash_ctx *sha_ctx;
460 struct mcryptd_hash_request_ctx *req_ctx;
461 int ret;
462
463 /* remove from work list */
464 spin_lock(&cstate->work_lock);
465 list_del(&rctx->waiter);
466 spin_unlock(&cstate->work_lock);
467
468 if (irqs_disabled())
469 rctx->complete(&req->base, err);
470 else {
471 local_bh_disable();
472 rctx->complete(&req->base, err);
473 local_bh_enable();
474 }
475
476 /* check to see if there are other jobs that are done */
477 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
478 while (sha_ctx) {
479 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
480 ret = sha_finish_walk(&req_ctx, cstate, false);
481 if (req_ctx) {
482 spin_lock(&cstate->work_lock);
483 list_del(&req_ctx->waiter);
484 spin_unlock(&cstate->work_lock);
485
486 req = cast_mcryptd_ctx_to_req(req_ctx);
487 if (irqs_disabled())
488 rctx->complete(&req->base, ret);
489 else {
490 local_bh_disable();
491 rctx->complete(&req->base, ret);
492 local_bh_enable();
493 }
494 }
495 sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
496 }
497
498 return 0;
499}
500
501static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
502 struct mcryptd_alg_cstate *cstate)
503{
504 unsigned long next_flush;
505 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
506
507 /* initialize tag */
508 rctx->tag.arrival = jiffies; /* tag the arrival time */
509 rctx->tag.seq_num = cstate->next_seq_num++;
510 next_flush = rctx->tag.arrival + delay;
511 rctx->tag.expire = next_flush;
512
513 spin_lock(&cstate->work_lock);
514 list_add_tail(&rctx->waiter, &cstate->work_list);
515 spin_unlock(&cstate->work_lock);
516
517 mcryptd_arm_flusher(cstate, delay);
518}
519
520static int sha256_mb_update(struct ahash_request *areq)
521{
522 struct mcryptd_hash_request_ctx *rctx =
523 container_of(areq, struct mcryptd_hash_request_ctx, areq);
524 struct mcryptd_alg_cstate *cstate =
525 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
526
527 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
528 struct sha256_hash_ctx *sha_ctx;
529 int ret = 0, nbytes;
530
531 /* sanity check */
532 if (rctx->tag.cpu != smp_processor_id()) {
533 pr_err("mcryptd error: cpu clash\n");
534 goto done;
535 }
536
537 /* need to init context */
538 req_ctx_init(rctx, areq);
539
540 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
541
542 if (nbytes < 0) {
543 ret = nbytes;
544 goto done;
545 }
546
547 if (crypto_ahash_walk_last(&rctx->walk))
548 rctx->flag |= HASH_DONE;
549
550 /* submit */
551 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
552 sha256_mb_add_list(rctx, cstate);
553 kernel_fpu_begin();
554 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
555 nbytes, HASH_UPDATE);
556 kernel_fpu_end();
557
558 /* check if anything is returned */
559 if (!sha_ctx)
560 return -EINPROGRESS;
561
562 if (sha_ctx->error) {
563 ret = sha_ctx->error;
564 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
565 goto done;
566 }
567
568 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
569 ret = sha_finish_walk(&rctx, cstate, false);
570
571 if (!rctx)
572 return -EINPROGRESS;
573done:
574 sha_complete_job(rctx, cstate, ret);
575 return ret;
576}
577
578static int sha256_mb_finup(struct ahash_request *areq)
579{
580 struct mcryptd_hash_request_ctx *rctx =
581 container_of(areq, struct mcryptd_hash_request_ctx, areq);
582 struct mcryptd_alg_cstate *cstate =
583 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
584
585 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
586 struct sha256_hash_ctx *sha_ctx;
587 int ret = 0, flag = HASH_UPDATE, nbytes;
588
589 /* sanity check */
590 if (rctx->tag.cpu != smp_processor_id()) {
591 pr_err("mcryptd error: cpu clash\n");
592 goto done;
593 }
594
595 /* need to init context */
596 req_ctx_init(rctx, areq);
597
598 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
599
600 if (nbytes < 0) {
601 ret = nbytes;
602 goto done;
603 }
604
605 if (crypto_ahash_walk_last(&rctx->walk)) {
606 rctx->flag |= HASH_DONE;
607 flag = HASH_LAST;
608 }
609
610 /* submit */
611 rctx->flag |= HASH_FINAL;
612 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
613 sha256_mb_add_list(rctx, cstate);
614
615 kernel_fpu_begin();
616 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
617 nbytes, flag);
618 kernel_fpu_end();
619
620 /* check if anything is returned */
621 if (!sha_ctx)
622 return -EINPROGRESS;
623
624 if (sha_ctx->error) {
625 ret = sha_ctx->error;
626 goto done;
627 }
628
629 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
630 ret = sha_finish_walk(&rctx, cstate, false);
631 if (!rctx)
632 return -EINPROGRESS;
633done:
634 sha_complete_job(rctx, cstate, ret);
635 return ret;
636}
637
638static int sha256_mb_final(struct ahash_request *areq)
639{
640 struct mcryptd_hash_request_ctx *rctx =
641 container_of(areq, struct mcryptd_hash_request_ctx,
642 areq);
643 struct mcryptd_alg_cstate *cstate =
644 this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
645
646 struct sha256_hash_ctx *sha_ctx;
647 int ret = 0;
648 u8 data;
649
650 /* sanity check */
651 if (rctx->tag.cpu != smp_processor_id()) {
652 pr_err("mcryptd error: cpu clash\n");
653 goto done;
654 }
655
656 /* need to init context */
657 req_ctx_init(rctx, areq);
658
659 rctx->flag |= HASH_DONE | HASH_FINAL;
660
661 sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
662 /* flag HASH_FINAL and 0 data size */
663 sha256_mb_add_list(rctx, cstate);
664 kernel_fpu_begin();
665 sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
666 HASH_LAST);
667 kernel_fpu_end();
668
669 /* check if anything is returned */
670 if (!sha_ctx)
671 return -EINPROGRESS;
672
673 if (sha_ctx->error) {
674 ret = sha_ctx->error;
675 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
676 goto done;
677 }
678
679 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
680 ret = sha_finish_walk(&rctx, cstate, false);
681 if (!rctx)
682 return -EINPROGRESS;
683done:
684 sha_complete_job(rctx, cstate, ret);
685 return ret;
686}
687
688static int sha256_mb_export(struct ahash_request *areq, void *out)
689{
690 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
691
692 memcpy(out, sctx, sizeof(*sctx));
693
694 return 0;
695}
696
697static int sha256_mb_import(struct ahash_request *areq, const void *in)
698{
699 struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
700
701 memcpy(sctx, in, sizeof(*sctx));
702
703 return 0;
704}
705
706static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
707{
708 struct mcryptd_ahash *mcryptd_tfm;
709 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
710 struct mcryptd_hash_ctx *mctx;
711
712 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
713 CRYPTO_ALG_INTERNAL,
714 CRYPTO_ALG_INTERNAL);
715 if (IS_ERR(mcryptd_tfm))
716 return PTR_ERR(mcryptd_tfm);
717 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
718 mctx->alg_state = &sha256_mb_alg_state;
719 ctx->mcryptd_tfm = mcryptd_tfm;
720 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
721 sizeof(struct ahash_request) +
722 crypto_ahash_reqsize(&mcryptd_tfm->base));
723
724 return 0;
725}
726
727static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
728{
729 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
730
731 mcryptd_free_ahash(ctx->mcryptd_tfm);
732}
733
734static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
735{
736 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
737 sizeof(struct ahash_request) +
738 sizeof(struct sha256_hash_ctx));
739
740 return 0;
741}
742
743static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
744{
745 struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
746
747 mcryptd_free_ahash(ctx->mcryptd_tfm);
748}
749
750static struct ahash_alg sha256_mb_areq_alg = {
751 .init = sha256_mb_init,
752 .update = sha256_mb_update,
753 .final = sha256_mb_final,
754 .finup = sha256_mb_finup,
755 .export = sha256_mb_export,
756 .import = sha256_mb_import,
757 .halg = {
758 .digestsize = SHA256_DIGEST_SIZE,
759 .statesize = sizeof(struct sha256_hash_ctx),
760 .base = {
761 .cra_name = "__sha256-mb",
762 .cra_driver_name = "__intel_sha256-mb",
763 .cra_priority = 100,
764 /*
765 * use ASYNC flag as some buffers in multi-buffer
766 * algo may not have completed before hashing thread
767 * sleep
768 */
769 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
770 CRYPTO_ALG_ASYNC |
771 CRYPTO_ALG_INTERNAL,
772 .cra_blocksize = SHA256_BLOCK_SIZE,
773 .cra_module = THIS_MODULE,
774 .cra_list = LIST_HEAD_INIT
775 (sha256_mb_areq_alg.halg.base.cra_list),
776 .cra_init = sha256_mb_areq_init_tfm,
777 .cra_exit = sha256_mb_areq_exit_tfm,
778 .cra_ctxsize = sizeof(struct sha256_hash_ctx),
779 }
780 }
781};
782
783static int sha256_mb_async_init(struct ahash_request *req)
784{
785 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
786 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
787 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
788 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
789
790 memcpy(mcryptd_req, req, sizeof(*req));
791 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
792 return crypto_ahash_init(mcryptd_req);
793}
794
795static int sha256_mb_async_update(struct ahash_request *req)
796{
797 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
798
799 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
800 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
801 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
802
803 memcpy(mcryptd_req, req, sizeof(*req));
804 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
805 return crypto_ahash_update(mcryptd_req);
806}
807
808static int sha256_mb_async_finup(struct ahash_request *req)
809{
810 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
811
812 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
813 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
814 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
815
816 memcpy(mcryptd_req, req, sizeof(*req));
817 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
818 return crypto_ahash_finup(mcryptd_req);
819}
820
821static int sha256_mb_async_final(struct ahash_request *req)
822{
823 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
824
825 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
826 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
827 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
828
829 memcpy(mcryptd_req, req, sizeof(*req));
830 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
831 return crypto_ahash_final(mcryptd_req);
832}
833
834static int sha256_mb_async_digest(struct ahash_request *req)
835{
836 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
837 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
838 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
839 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
840
841 memcpy(mcryptd_req, req, sizeof(*req));
842 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
843 return crypto_ahash_digest(mcryptd_req);
844}
845
846static int sha256_mb_async_export(struct ahash_request *req, void *out)
847{
848 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
849 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
850 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
851 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
852
853 memcpy(mcryptd_req, req, sizeof(*req));
854 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
855 return crypto_ahash_export(mcryptd_req, out);
856}
857
858static int sha256_mb_async_import(struct ahash_request *req, const void *in)
859{
860 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
861 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
862 struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
863 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
864 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
865 struct mcryptd_hash_request_ctx *rctx;
866 struct ahash_request *areq;
867
868 memcpy(mcryptd_req, req, sizeof(*req));
869 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
870 rctx = ahash_request_ctx(mcryptd_req);
871 areq = &rctx->areq;
872
873 ahash_request_set_tfm(areq, child);
874 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
875 rctx->complete, req);
876
877 return crypto_ahash_import(mcryptd_req, in);
878}
879
880static struct ahash_alg sha256_mb_async_alg = {
881 .init = sha256_mb_async_init,
882 .update = sha256_mb_async_update,
883 .final = sha256_mb_async_final,
884 .finup = sha256_mb_async_finup,
885 .export = sha256_mb_async_export,
886 .import = sha256_mb_async_import,
887 .digest = sha256_mb_async_digest,
888 .halg = {
889 .digestsize = SHA256_DIGEST_SIZE,
890 .statesize = sizeof(struct sha256_hash_ctx),
891 .base = {
892 .cra_name = "sha256",
893 .cra_driver_name = "sha256_mb",
894 .cra_priority = 200,
895 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
896 CRYPTO_ALG_ASYNC,
897 .cra_blocksize = SHA256_BLOCK_SIZE,
898 .cra_type = &crypto_ahash_type,
899 .cra_module = THIS_MODULE,
900 .cra_list = LIST_HEAD_INIT
901 (sha256_mb_async_alg.halg.base.cra_list),
902 .cra_init = sha256_mb_async_init_tfm,
903 .cra_exit = sha256_mb_async_exit_tfm,
904 .cra_ctxsize = sizeof(struct sha256_mb_ctx),
905 .cra_alignmask = 0,
906 },
907 },
908};
909
910static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
911{
912 struct mcryptd_hash_request_ctx *rctx;
913 unsigned long cur_time;
914 unsigned long next_flush = 0;
915 struct sha256_hash_ctx *sha_ctx;
916
917
918 cur_time = jiffies;
919
920 while (!list_empty(&cstate->work_list)) {
921 rctx = list_entry(cstate->work_list.next,
922 struct mcryptd_hash_request_ctx, waiter);
923 if (time_before(cur_time, rctx->tag.expire))
924 break;
925 kernel_fpu_begin();
926 sha_ctx = (struct sha256_hash_ctx *)
927 sha256_ctx_mgr_flush(cstate->mgr);
928 kernel_fpu_end();
929 if (!sha_ctx) {
930 pr_err("sha256_mb error: nothing got"
931 " flushed for non-empty list\n");
932 break;
933 }
934 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
935 sha_finish_walk(&rctx, cstate, true);
936 sha_complete_job(rctx, cstate, 0);
937 }
938
939 if (!list_empty(&cstate->work_list)) {
940 rctx = list_entry(cstate->work_list.next,
941 struct mcryptd_hash_request_ctx, waiter);
942 /* get the hash context and then flush time */
943 next_flush = rctx->tag.expire;
944 mcryptd_arm_flusher(cstate, get_delay(next_flush));
945 }
946 return next_flush;
947}
948
949static int __init sha256_mb_mod_init(void)
950{
951
952 int cpu;
953 int err;
954 struct mcryptd_alg_cstate *cpu_state;
955
956 /* check for dependent cpu features */
957 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
958 !boot_cpu_has(X86_FEATURE_BMI2))
959 return -ENODEV;
960
961 /* initialize multibuffer structures */
962 sha256_mb_alg_state.alg_cstate = alloc_percpu
963 (struct mcryptd_alg_cstate);
964
965 sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
966 sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
967 sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
968 sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
969
970 if (!sha256_mb_alg_state.alg_cstate)
971 return -ENOMEM;
972 for_each_possible_cpu(cpu) {
973 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
974 cpu_state->next_flush = 0;
975 cpu_state->next_seq_num = 0;
976 cpu_state->flusher_engaged = false;
977 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
978 cpu_state->cpu = cpu;
979 cpu_state->alg_state = &sha256_mb_alg_state;
980 cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
981 GFP_KERNEL);
982 if (!cpu_state->mgr)
983 goto err2;
984 sha256_ctx_mgr_init(cpu_state->mgr);
985 INIT_LIST_HEAD(&cpu_state->work_list);
986 spin_lock_init(&cpu_state->work_lock);
987 }
988 sha256_mb_alg_state.flusher = &sha256_mb_flusher;
989
990 err = crypto_register_ahash(&sha256_mb_areq_alg);
991 if (err)
992 goto err2;
993 err = crypto_register_ahash(&sha256_mb_async_alg);
994 if (err)
995 goto err1;
996
997
998 return 0;
999err1:
1000 crypto_unregister_ahash(&sha256_mb_areq_alg);
1001err2:
1002 for_each_possible_cpu(cpu) {
1003 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
1004 kfree(cpu_state->mgr);
1005 }
1006 free_percpu(sha256_mb_alg_state.alg_cstate);
1007 return -ENODEV;
1008}
1009
1010static void __exit sha256_mb_mod_fini(void)
1011{
1012 int cpu;
1013 struct mcryptd_alg_cstate *cpu_state;
1014
1015 crypto_unregister_ahash(&sha256_mb_async_alg);
1016 crypto_unregister_ahash(&sha256_mb_areq_alg);
1017 for_each_possible_cpu(cpu) {
1018 cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
1019 kfree(cpu_state->mgr);
1020 }
1021 free_percpu(sha256_mb_alg_state.alg_cstate);
1022}
1023
1024module_init(sha256_mb_mod_init);
1025module_exit(sha256_mb_mod_fini);
1026
1027MODULE_LICENSE("GPL");
1028MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
1029
1030MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
new file mode 100644
index 000000000000..edd252b73206
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
@@ -0,0 +1,136 @@
1/*
2 * Header file for multi buffer SHA256 context
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifndef _SHA_MB_CTX_INTERNAL_H
55#define _SHA_MB_CTX_INTERNAL_H
56
57#include "sha256_mb_mgr.h"
58
59#define HASH_UPDATE 0x00
60#define HASH_FIRST 0x01
61#define HASH_LAST 0x02
62#define HASH_ENTIRE 0x03
63#define HASH_DONE 0x04
64#define HASH_FINAL 0x08
65
66#define HASH_CTX_STS_IDLE 0x00
67#define HASH_CTX_STS_PROCESSING 0x01
68#define HASH_CTX_STS_LAST 0x02
69#define HASH_CTX_STS_COMPLETE 0x04
70
71enum hash_ctx_error {
72 HASH_CTX_ERROR_NONE = 0,
73 HASH_CTX_ERROR_INVALID_FLAGS = -1,
74 HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
75 HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
76
77#ifdef HASH_CTX_DEBUG
78 HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
79#endif
80};
81
82
83#define hash_ctx_user_data(ctx) ((ctx)->user_data)
84#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
85#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
86#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
87#define hash_ctx_status(ctx) ((ctx)->status)
88#define hash_ctx_error(ctx) ((ctx)->error)
89#define hash_ctx_init(ctx) \
90 do { \
91 (ctx)->error = HASH_CTX_ERROR_NONE; \
92 (ctx)->status = HASH_CTX_STS_COMPLETE; \
93 } while (0)
94
95
96/* Hash Constants and Typedefs */
97#define SHA256_DIGEST_LENGTH 8
98#define SHA256_LOG2_BLOCK_SIZE 6
99
100#define SHA256_PADLENGTHFIELD_SIZE 8
101
102#ifdef SHA_MB_DEBUG
103#define assert(expr) \
104do { \
105 if (unlikely(!(expr))) { \
106 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
107 #expr, __FILE__, __func__, __LINE__); \
108 } \
109} while (0)
110#else
111#define assert(expr) do {} while (0)
112#endif
113
114struct sha256_ctx_mgr {
115 struct sha256_mb_mgr mgr;
116};
117
118/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */
119
120struct sha256_hash_ctx {
121 /* Must be at struct offset 0 */
122 struct job_sha256 job;
123 /* status flag */
124 int status;
125 /* error flag */
126 int error;
127
128 uint32_t total_length;
129 const void *incoming_buffer;
130 uint32_t incoming_buffer_length;
131 uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2];
132 uint32_t partial_block_buffer_length;
133 void *user_data;
134};
135
136#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
new file mode 100644
index 000000000000..b01ae408c56d
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
@@ -0,0 +1,108 @@
1/*
2 * Header file for multi buffer SHA256 algorithm manager
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53#ifndef __SHA_MB_MGR_H
54#define __SHA_MB_MGR_H
55
56#include <linux/types.h>
57
58#define NUM_SHA256_DIGEST_WORDS 8
59
60enum job_sts { STS_UNKNOWN = 0,
61 STS_BEING_PROCESSED = 1,
62 STS_COMPLETED = 2,
63 STS_INTERNAL_ERROR = 3,
64 STS_ERROR = 4
65};
66
67struct job_sha256 {
68 u8 *buffer;
69 u32 len;
70 u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32);
71 enum job_sts status;
72 void *user_data;
73};
74
75/* SHA256 out-of-order scheduler */
76
77/* typedef uint32_t sha8_digest_array[8][8]; */
78
79struct sha256_args_x8 {
80 uint32_t digest[8][8];
81 uint8_t *data_ptr[8];
82};
83
84struct sha256_lane_data {
85 struct job_sha256 *job_in_lane;
86};
87
88struct sha256_mb_mgr {
89 struct sha256_args_x8 args;
90
91 uint32_t lens[8];
92
93 /* each byte is index (0...7) of unused lanes */
94 uint64_t unused_lanes;
95 /* byte 4 is set to FF as a flag */
96 struct sha256_lane_data ldata[8];
97};
98
99
100#define SHA256_MB_MGR_NUM_LANES_AVX2 8
101
102void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state);
103struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state,
104 struct job_sha256 *job);
105struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state);
106struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state);
107
108#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
new file mode 100644
index 000000000000..5c377bac21d0
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
@@ -0,0 +1,304 @@
1/*
2 * Header file for multi buffer SHA256 algorithm data structure
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54# Macros for defining data structures
55
56# Usage example
57
58#START_FIELDS # JOB_AES
59### name size align
60#FIELD _plaintext, 8, 8 # pointer to plaintext
61#FIELD _ciphertext, 8, 8 # pointer to ciphertext
62#FIELD _IV, 16, 8 # IV
63#FIELD _keys, 8, 8 # pointer to keys
64#FIELD _len, 4, 4 # length in bytes
65#FIELD _status, 4, 4 # status enumeration
66#FIELD _user_data, 8, 8 # pointer to user data
67#UNION _union, size1, align1, \
68# size2, align2, \
69# size3, align3, \
70# ...
71#END_FIELDS
72#%assign _JOB_AES_size _FIELD_OFFSET
73#%assign _JOB_AES_align _STRUCT_ALIGN
74
75#########################################################################
76
77# Alternate "struc-like" syntax:
78# STRUCT job_aes2
79# RES_Q .plaintext, 1
80# RES_Q .ciphertext, 1
81# RES_DQ .IV, 1
82# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
83# RES_U .union, size1, align1, \
84# size2, align2, \
85# ...
86# ENDSTRUCT
87# # Following only needed if nesting
88# %assign job_aes2_size _FIELD_OFFSET
89# %assign job_aes2_align _STRUCT_ALIGN
90#
91# RES_* macros take a name, a count and an optional alignment.
92# The count in in terms of the base size of the macro, and the
93# default alignment is the base size.
94# The macros are:
95# Macro Base size
96# RES_B 1
97# RES_W 2
98# RES_D 4
99# RES_Q 8
100# RES_DQ 16
101# RES_Y 32
102# RES_Z 64
103#
104# RES_U defines a union. It's arguments are a name and two or more
105# pairs of "size, alignment"
106#
107# The two assigns are only needed if this structure is being nested
108# within another. Even if the assigns are not done, one can still use
109# STRUCT_NAME_size as the size of the structure.
110#
111# Note that for nesting, you still need to assign to STRUCT_NAME_size.
112#
113# The differences between this and using "struc" directly are that each
114# type is implicitly aligned to its natural length (although this can be
115# over-ridden with an explicit third parameter), and that the structure
116# is padded at the end to its overall alignment.
117#
118
119#########################################################################
120
121#ifndef _DATASTRUCT_ASM_
122#define _DATASTRUCT_ASM_
123
124#define SZ8 8*SHA256_DIGEST_WORD_SIZE
125#define ROUNDS 64*SZ8
126#define PTR_SZ 8
127#define SHA256_DIGEST_WORD_SIZE 4
128#define MAX_SHA256_LANES 8
129#define SHA256_DIGEST_WORDS 8
130#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE)
131#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS)
132#define SHA256_BLK_SZ 64
133
134# START_FIELDS
135.macro START_FIELDS
136 _FIELD_OFFSET = 0
137 _STRUCT_ALIGN = 0
138.endm
139
140# FIELD name size align
141.macro FIELD name size align
142 _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
143 \name = _FIELD_OFFSET
144 _FIELD_OFFSET = _FIELD_OFFSET + (\size)
145.if (\align > _STRUCT_ALIGN)
146 _STRUCT_ALIGN = \align
147.endif
148.endm
149
150# END_FIELDS
151.macro END_FIELDS
152 _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
153.endm
154
155########################################################################
156
157.macro STRUCT p1
158START_FIELDS
159.struc \p1
160.endm
161
162.macro ENDSTRUCT
163 tmp = _FIELD_OFFSET
164 END_FIELDS
165 tmp = (_FIELD_OFFSET - %%tmp)
166.if (tmp > 0)
167 .lcomm tmp
168.endif
169.endstruc
170.endm
171
172## RES_int name size align
173.macro RES_int p1 p2 p3
174 name = \p1
175 size = \p2
176 align = .\p3
177
178 _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
179.align align
180.lcomm name size
181 _FIELD_OFFSET = _FIELD_OFFSET + (size)
182.if (align > _STRUCT_ALIGN)
183 _STRUCT_ALIGN = align
184.endif
185.endm
186
187# macro RES_B name, size [, align]
188.macro RES_B _name, _size, _align=1
189RES_int _name _size _align
190.endm
191
192# macro RES_W name, size [, align]
193.macro RES_W _name, _size, _align=2
194RES_int _name 2*(_size) _align
195.endm
196
197# macro RES_D name, size [, align]
198.macro RES_D _name, _size, _align=4
199RES_int _name 4*(_size) _align
200.endm
201
202# macro RES_Q name, size [, align]
203.macro RES_Q _name, _size, _align=8
204RES_int _name 8*(_size) _align
205.endm
206
207# macro RES_DQ name, size [, align]
208.macro RES_DQ _name, _size, _align=16
209RES_int _name 16*(_size) _align
210.endm
211
212# macro RES_Y name, size [, align]
213.macro RES_Y _name, _size, _align=32
214RES_int _name 32*(_size) _align
215.endm
216
217# macro RES_Z name, size [, align]
218.macro RES_Z _name, _size, _align=64
219RES_int _name 64*(_size) _align
220.endm
221
222#endif
223
224
225########################################################################
226#### Define SHA256 Out Of Order Data Structures
227########################################################################
228
229START_FIELDS # LANE_DATA
230### name size align
231FIELD _job_in_lane, 8, 8 # pointer to job object
232END_FIELDS
233
234 _LANE_DATA_size = _FIELD_OFFSET
235 _LANE_DATA_align = _STRUCT_ALIGN
236
237########################################################################
238
239START_FIELDS # SHA256_ARGS_X4
240### name size align
241FIELD _digest, 4*8*8, 4 # transposed digest
242FIELD _data_ptr, 8*8, 8 # array of pointers to data
243END_FIELDS
244
245 _SHA256_ARGS_X4_size = _FIELD_OFFSET
246 _SHA256_ARGS_X4_align = _STRUCT_ALIGN
247 _SHA256_ARGS_X8_size = _FIELD_OFFSET
248 _SHA256_ARGS_X8_align = _STRUCT_ALIGN
249
250#######################################################################
251
252START_FIELDS # MB_MGR
253### name size align
254FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align
255FIELD _lens, 4*8, 8
256FIELD _unused_lanes, 8, 8
257FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
258END_FIELDS
259
260 _MB_MGR_size = _FIELD_OFFSET
261 _MB_MGR_align = _STRUCT_ALIGN
262
263_args_digest = _args + _digest
264_args_data_ptr = _args + _data_ptr
265
266#######################################################################
267
268START_FIELDS #STACK_FRAME
269### name size align
270FIELD _data, 16*SZ8, 1 # transposed digest
271FIELD _digest, 8*SZ8, 1 # array of pointers to data
272FIELD _ytmp, 4*SZ8, 1
273FIELD _rsp, 8, 1
274END_FIELDS
275
276 _STACK_FRAME_size = _FIELD_OFFSET
277 _STACK_FRAME_align = _STRUCT_ALIGN
278
279#######################################################################
280
281########################################################################
282#### Define constants
283########################################################################
284
285#define STS_UNKNOWN 0
286#define STS_BEING_PROCESSED 1
287#define STS_COMPLETED 2
288
289########################################################################
290#### Define JOB_SHA256 structure
291########################################################################
292
293START_FIELDS # JOB_SHA256
294
295### name size align
296FIELD _buffer, 8, 8 # pointer to buffer
297FIELD _len, 8, 8 # length in bytes
298FIELD _result_digest, 8*4, 32 # Digest (output)
299FIELD _status, 4, 4
300FIELD _user_data, 8, 8
301END_FIELDS
302
303 _JOB_SHA256_size = _FIELD_OFFSET
304 _JOB_SHA256_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
new file mode 100644
index 000000000000..b691da981cd9
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -0,0 +1,304 @@
1/*
2 * Flush routine for SHA256 multibuffer
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53#include <linux/linkage.h>
54#include <asm/frame.h>
55#include "sha256_mb_mgr_datastruct.S"
56
57.extern sha256_x8_avx2
58
59#LINUX register definitions
60#define arg1 %rdi
61#define arg2 %rsi
62
63# Common register definitions
64#define state arg1
65#define job arg2
66#define len2 arg2
67
68# idx must be a register not clobberred by sha1_mult
69#define idx %r8
70#define DWORD_idx %r8d
71
72#define unused_lanes %rbx
73#define lane_data %rbx
74#define tmp2 %rbx
75#define tmp2_w %ebx
76
77#define job_rax %rax
78#define tmp1 %rax
79#define size_offset %rax
80#define tmp %rax
81#define start_offset %rax
82
83#define tmp3 %arg1
84
85#define extra_blocks %arg2
86#define p %arg2
87
88.macro LABEL prefix n
89\prefix\n\():
90.endm
91
92.macro JNE_SKIP i
93jne skip_\i
94.endm
95
96.altmacro
97.macro SET_OFFSET _offset
98offset = \_offset
99.endm
100.noaltmacro
101
102# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
103# arg 1 : rcx : state
104ENTRY(sha256_mb_mgr_flush_avx2)
105 FRAME_BEGIN
106 push %rbx
107
108 # If bit (32+3) is set, then all lanes are empty
109 mov _unused_lanes(state), unused_lanes
110 bt $32+3, unused_lanes
111 jc return_null
112
113 # find a lane with a non-null job
114 xor idx, idx
115 offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
116 cmpq $0, offset(state)
117 cmovne one(%rip), idx
118 offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
119 cmpq $0, offset(state)
120 cmovne two(%rip), idx
121 offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
122 cmpq $0, offset(state)
123 cmovne three(%rip), idx
124 offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
125 cmpq $0, offset(state)
126 cmovne four(%rip), idx
127 offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
128 cmpq $0, offset(state)
129 cmovne five(%rip), idx
130 offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
131 cmpq $0, offset(state)
132 cmovne six(%rip), idx
133 offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
134 cmpq $0, offset(state)
135 cmovne seven(%rip), idx
136
137 # copy idx to empty lanes
138copy_lane_data:
139 offset = (_args + _data_ptr)
140 mov offset(state,idx,8), tmp
141
142 I = 0
143.rep 8
144 offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
145 cmpq $0, offset(state)
146.altmacro
147 JNE_SKIP %I
148 offset = (_args + _data_ptr + 8*I)
149 mov tmp, offset(state)
150 offset = (_lens + 4*I)
151 movl $0xFFFFFFFF, offset(state)
152LABEL skip_ %I
153 I = (I+1)
154.noaltmacro
155.endr
156
157 # Find min length
158 vmovdqa _lens+0*16(state), %xmm0
159 vmovdqa _lens+1*16(state), %xmm1
160
161 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
162 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
163 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
164 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
165 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
166
167 vmovd %xmm2, DWORD_idx
168 mov idx, len2
169 and $0xF, idx
170 shr $4, len2
171 jz len_is_0
172
173 vpand clear_low_nibble(%rip), %xmm2, %xmm2
174 vpshufd $0, %xmm2, %xmm2
175
176 vpsubd %xmm2, %xmm0, %xmm0
177 vpsubd %xmm2, %xmm1, %xmm1
178
179 vmovdqa %xmm0, _lens+0*16(state)
180 vmovdqa %xmm1, _lens+1*16(state)
181
182 # "state" and "args" are the same address, arg1
183 # len is arg2
184 call sha256_x8_avx2
185 # state and idx are intact
186
187len_is_0:
188 # process completed job "idx"
189 imul $_LANE_DATA_size, idx, lane_data
190 lea _ldata(state, lane_data), lane_data
191
192 mov _job_in_lane(lane_data), job_rax
193 movq $0, _job_in_lane(lane_data)
194 movl $STS_COMPLETED, _status(job_rax)
195 mov _unused_lanes(state), unused_lanes
196 shl $4, unused_lanes
197 or idx, unused_lanes
198
199 mov unused_lanes, _unused_lanes(state)
200 movl $0xFFFFFFFF, _lens(state,idx,4)
201
202 vmovd _args_digest(state , idx, 4) , %xmm0
203 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
204 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
205 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
206 vmovd _args_digest+4*32(state, idx, 4), %xmm1
207 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
208 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
209 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
210
211 vmovdqu %xmm0, _result_digest(job_rax)
212 offset = (_result_digest + 1*16)
213 vmovdqu %xmm1, offset(job_rax)
214
215return:
216 pop %rbx
217 FRAME_END
218 ret
219
220return_null:
221 xor job_rax, job_rax
222 jmp return
223ENDPROC(sha256_mb_mgr_flush_avx2)
224
225##############################################################################
226
227.align 16
228ENTRY(sha256_mb_mgr_get_comp_job_avx2)
229 push %rbx
230
231 ## if bit 32+3 is set, then all lanes are empty
232 mov _unused_lanes(state), unused_lanes
233 bt $(32+3), unused_lanes
234 jc .return_null
235
236 # Find min length
237 vmovdqa _lens(state), %xmm0
238 vmovdqa _lens+1*16(state), %xmm1
239
240 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
241 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
242 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
243 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
244 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
245
246 vmovd %xmm2, DWORD_idx
247 test $~0xF, idx
248 jnz .return_null
249
250 # process completed job "idx"
251 imul $_LANE_DATA_size, idx, lane_data
252 lea _ldata(state, lane_data), lane_data
253
254 mov _job_in_lane(lane_data), job_rax
255 movq $0, _job_in_lane(lane_data)
256 movl $STS_COMPLETED, _status(job_rax)
257 mov _unused_lanes(state), unused_lanes
258 shl $4, unused_lanes
259 or idx, unused_lanes
260 mov unused_lanes, _unused_lanes(state)
261
262 movl $0xFFFFFFFF, _lens(state, idx, 4)
263
264 vmovd _args_digest(state, idx, 4), %xmm0
265 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
266 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
267 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
268 movl _args_digest+4*32(state, idx, 4), tmp2_w
269 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
270 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
271 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
272
273 vmovdqu %xmm0, _result_digest(job_rax)
274 movl tmp2_w, _result_digest+1*16(job_rax)
275
276 pop %rbx
277
278 ret
279
280.return_null:
281 xor job_rax, job_rax
282 pop %rbx
283 ret
284ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
285
286.data
287
288.align 16
289clear_low_nibble:
290.octa 0x000000000000000000000000FFFFFFF0
291one:
292.quad 1
293two:
294.quad 2
295three:
296.quad 3
297four:
298.quad 4
299five:
300.quad 5
301six:
302.quad 6
303seven:
304.quad 7
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
new file mode 100644
index 000000000000..b0c498371e67
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
@@ -0,0 +1,65 @@
1/*
2 * Initialization code for multi buffer SHA256 algorithm for AVX2
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include "sha256_mb_mgr.h"
55
56void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state)
57{
58 unsigned int j;
59
60 state->unused_lanes = 0xF76543210ULL;
61 for (j = 0; j < 8; j++) {
62 state->lens[j] = 0xFFFFFFFF;
63 state->ldata[j].job_in_lane = NULL;
64 }
65}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
new file mode 100644
index 000000000000..7ea670e25acc
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
@@ -0,0 +1,215 @@
1/*
2 * Buffer submit code for multi buffer SHA256 algorithm
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <linux/linkage.h>
55#include <asm/frame.h>
56#include "sha256_mb_mgr_datastruct.S"
57
58.extern sha256_x8_avx2
59
60# LINUX register definitions
61arg1 = %rdi
62arg2 = %rsi
63size_offset = %rcx
64tmp2 = %rcx
65extra_blocks = %rdx
66
67# Common definitions
68#define state arg1
69#define job %rsi
70#define len2 arg2
71#define p2 arg2
72
73# idx must be a register not clobberred by sha1_x8_avx2
74idx = %r8
75DWORD_idx = %r8d
76last_len = %r8
77
78p = %r11
79start_offset = %r11
80
81unused_lanes = %rbx
82BYTE_unused_lanes = %bl
83
84job_rax = %rax
85len = %rax
86DWORD_len = %eax
87
88lane = %r12
89tmp3 = %r12
90
91tmp = %r9
92DWORD_tmp = %r9d
93
94lane_data = %r10
95
96# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
97# arg 1 : rcx : state
98# arg 2 : rdx : job
99ENTRY(sha256_mb_mgr_submit_avx2)
100 FRAME_BEGIN
101 push %rbx
102 push %r12
103
104 mov _unused_lanes(state), unused_lanes
105 mov unused_lanes, lane
106 and $0xF, lane
107 shr $4, unused_lanes
108 imul $_LANE_DATA_size, lane, lane_data
109 movl $STS_BEING_PROCESSED, _status(job)
110 lea _ldata(state, lane_data), lane_data
111 mov unused_lanes, _unused_lanes(state)
112 movl _len(job), DWORD_len
113
114 mov job, _job_in_lane(lane_data)
115 shl $4, len
116 or lane, len
117
118 movl DWORD_len, _lens(state , lane, 4)
119
120 # Load digest words from result_digest
121 vmovdqu _result_digest(job), %xmm0
122 vmovdqu _result_digest+1*16(job), %xmm1
123 vmovd %xmm0, _args_digest(state, lane, 4)
124 vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
125 vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
126 vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
127 vmovd %xmm1, _args_digest+4*32(state , lane, 4)
128
129 vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4)
130 vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4)
131 vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4)
132
133 mov _buffer(job), p
134 mov p, _args_data_ptr(state, lane, 8)
135
136 cmp $0xF, unused_lanes
137 jne return_null
138
139start_loop:
140 # Find min length
141 vmovdqa _lens(state), %xmm0
142 vmovdqa _lens+1*16(state), %xmm1
143
144 vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
145 vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
146 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
147 vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
148 vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
149
150 vmovd %xmm2, DWORD_idx
151 mov idx, len2
152 and $0xF, idx
153 shr $4, len2
154 jz len_is_0
155
156 vpand clear_low_nibble(%rip), %xmm2, %xmm2
157 vpshufd $0, %xmm2, %xmm2
158
159 vpsubd %xmm2, %xmm0, %xmm0
160 vpsubd %xmm2, %xmm1, %xmm1
161
162 vmovdqa %xmm0, _lens + 0*16(state)
163 vmovdqa %xmm1, _lens + 1*16(state)
164
165 # "state" and "args" are the same address, arg1
166 # len is arg2
167 call sha256_x8_avx2
168
169 # state and idx are intact
170
171len_is_0:
172 # process completed job "idx"
173 imul $_LANE_DATA_size, idx, lane_data
174 lea _ldata(state, lane_data), lane_data
175
176 mov _job_in_lane(lane_data), job_rax
177 mov _unused_lanes(state), unused_lanes
178 movq $0, _job_in_lane(lane_data)
179 movl $STS_COMPLETED, _status(job_rax)
180 shl $4, unused_lanes
181 or idx, unused_lanes
182 mov unused_lanes, _unused_lanes(state)
183
184 movl $0xFFFFFFFF, _lens(state,idx,4)
185
186 vmovd _args_digest(state, idx, 4), %xmm0
187 vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
188 vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
189 vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
190 vmovd _args_digest+4*32(state, idx, 4), %xmm1
191
192 vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1
193 vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1
194 vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1
195
196 vmovdqu %xmm0, _result_digest(job_rax)
197 vmovdqu %xmm1, _result_digest+1*16(job_rax)
198
199return:
200 pop %r12
201 pop %rbx
202 FRAME_END
203 ret
204
205return_null:
206 xor job_rax, job_rax
207 jmp return
208
209ENDPROC(sha256_mb_mgr_submit_avx2)
210
211.data
212
213.align 16
214clear_low_nibble:
215 .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
new file mode 100644
index 000000000000..aa21aea4c722
--- /dev/null
+++ b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
@@ -0,0 +1,593 @@
1/*
2 * Multi-buffer SHA256 algorithm hash compute routine
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <linux/linkage.h>
55#include "sha256_mb_mgr_datastruct.S"
56
57## code to compute oct SHA256 using SSE-256
58## outer calling routine takes care of save and restore of XMM registers
59## Logic designed/laid out by JDG
60
61## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15
62## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
63## Linux preserves: rdi rbp r8
64##
65## clobbers %ymm0-15
66
67arg1 = %rdi
68arg2 = %rsi
69reg3 = %rcx
70reg4 = %rdx
71
72# Common definitions
73STATE = arg1
74INP_SIZE = arg2
75
76IDX = %rax
77ROUND = %rbx
78TBL = reg3
79
80inp0 = %r9
81inp1 = %r10
82inp2 = %r11
83inp3 = %r12
84inp4 = %r13
85inp5 = %r14
86inp6 = %r15
87inp7 = reg4
88
89a = %ymm0
90b = %ymm1
91c = %ymm2
92d = %ymm3
93e = %ymm4
94f = %ymm5
95g = %ymm6
96h = %ymm7
97
98T1 = %ymm8
99
100a0 = %ymm12
101a1 = %ymm13
102a2 = %ymm14
103TMP = %ymm15
104TMP0 = %ymm6
105TMP1 = %ymm7
106
107TT0 = %ymm8
108TT1 = %ymm9
109TT2 = %ymm10
110TT3 = %ymm11
111TT4 = %ymm12
112TT5 = %ymm13
113TT6 = %ymm14
114TT7 = %ymm15
115
116# Define stack usage
117
118# Assume stack aligned to 32 bytes before call
119# Therefore FRAMESZ mod 32 must be 32-8 = 24
120
121#define FRAMESZ 0x388
122
123#define VMOVPS vmovups
124
125# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
126# "transpose" data in {r0...r7} using temps {t0...t1}
127# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
128# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
129# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
130# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
131# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
132# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
133# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
134# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
135# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
136#
137# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
138# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
139# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
140# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
141# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
142# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
143# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
144# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
145# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
146#
147
148.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
149 # process top half (r0..r3) {a...d}
150 vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
151 vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
152 vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
153 vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
154 vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
155 vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
156 vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
157 vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
158
159 # use r2 in place of t0
160 # process bottom half (r4..r7) {e...h}
161 vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
162 vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
163 vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
164 vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
165 vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
166 vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
167 vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
168 vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
169
170 vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
171 vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
172 vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
173 vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
174 vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
175 vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
176 vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
177 vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
178
179.endm
180
181.macro ROTATE_ARGS
182TMP_ = h
183h = g
184g = f
185f = e
186e = d
187d = c
188c = b
189b = a
190a = TMP_
191.endm
192
193.macro _PRORD reg imm tmp
194 vpslld $(32-\imm),\reg,\tmp
195 vpsrld $\imm,\reg, \reg
196 vpor \tmp,\reg, \reg
197.endm
198
199# PRORD_nd reg, imm, tmp, src
200.macro _PRORD_nd reg imm tmp src
201 vpslld $(32-\imm), \src, \tmp
202 vpsrld $\imm, \src, \reg
203 vpor \tmp, \reg, \reg
204.endm
205
206# PRORD dst/src, amt
207.macro PRORD reg imm
208 _PRORD \reg,\imm,TMP
209.endm
210
211# PRORD_nd dst, src, amt
212.macro PRORD_nd reg tmp imm
213 _PRORD_nd \reg, \imm, TMP, \tmp
214.endm
215
216# arguments passed implicitly in preprocessor symbols i, a...h
217.macro ROUND_00_15 _T1 i
218 PRORD_nd a0,e,5 # sig1: a0 = (e >> 5)
219
220 vpxor g, f, a2 # ch: a2 = f^g
221 vpand e,a2, a2 # ch: a2 = (f^g)&e
222 vpxor g, a2, a2 # a2 = ch
223
224 PRORD_nd a1,e,25 # sig1: a1 = (e >> 25)
225
226 vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp)
227 vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
228 vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
229 PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11)
230 vpaddd a2, h, h # h = h + ch
231 PRORD_nd a2,a,11 # sig0: a2 = (a >> 11)
232 vpaddd \_T1,h, h # h = h + ch + W + K
233 vpxor a1, a0, a0 # a0 = sigma1
234 PRORD_nd a1,a,22 # sig0: a1 = (a >> 22)
235 vpxor c, a, \_T1 # maj: T1 = a^c
236 add $SZ8, ROUND # ROUND++
237 vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
238 vpaddd a0, h, h
239 vpaddd h, d, d
240 vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
241 PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13)
242 vpxor a1, a2, a2 # a2 = sig0
243 vpand c, a, a1 # maj: a1 = a&c
244 vpor \_T1, a1, a1 # a1 = maj
245 vpaddd a1, h, h # h = h + ch + W + K + maj
246 vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0
247 ROTATE_ARGS
248.endm
249
250# arguments passed implicitly in preprocessor symbols i, a...h
251.macro ROUND_16_XX _T1 i
252 vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1
253 vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1
254 vmovdqu \_T1, a0
255 PRORD \_T1,11
256 vmovdqu a1, a2
257 PRORD a1,2
258 vpxor a0, \_T1, \_T1
259 PRORD \_T1, 7
260 vpxor a2, a1, a1
261 PRORD a1, 17
262 vpsrld $3, a0, a0
263 vpxor a0, \_T1, \_T1
264 vpsrld $10, a2, a2
265 vpxor a2, a1, a1
266 vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1
267 vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1
268 vpaddd a1, \_T1, \_T1
269
270 ROUND_00_15 \_T1,\i
271.endm
272
273# SHA256_ARGS:
274# UINT128 digest[8]; // transposed digests
275# UINT8 *data_ptr[4];
276
277# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes);
278# arg 1 : STATE : pointer to array of pointers to input data
279# arg 2 : INP_SIZE : size of input in blocks
280 # general registers preserved in outer calling routine
281 # outer calling routine saves all the XMM registers
282 # save rsp, allocate 32-byte aligned for local variables
283ENTRY(sha256_x8_avx2)
284
285 # save callee-saved clobbered registers to comply with C function ABI
286 push %r12
287 push %r13
288 push %r14
289 push %r15
290
291 mov %rsp, IDX
292 sub $FRAMESZ, %rsp
293 and $~0x1F, %rsp
294 mov IDX, _rsp(%rsp)
295
296 # Load the pre-transposed incoming digest.
297 vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a
298 vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b
299 vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c
300 vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d
301 vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e
302 vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f
303 vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g
304 vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h
305
306 lea K256_8(%rip),TBL
307
308 # load the address of each of the 4 message lanes
309 # getting ready to transpose input onto stack
310 mov _args_data_ptr+0*PTR_SZ(STATE),inp0
311 mov _args_data_ptr+1*PTR_SZ(STATE),inp1
312 mov _args_data_ptr+2*PTR_SZ(STATE),inp2
313 mov _args_data_ptr+3*PTR_SZ(STATE),inp3
314 mov _args_data_ptr+4*PTR_SZ(STATE),inp4
315 mov _args_data_ptr+5*PTR_SZ(STATE),inp5
316 mov _args_data_ptr+6*PTR_SZ(STATE),inp6
317 mov _args_data_ptr+7*PTR_SZ(STATE),inp7
318
319 xor IDX, IDX
320lloop:
321 xor ROUND, ROUND
322
323 # save old digest
324 vmovdqu a, _digest(%rsp)
325 vmovdqu b, _digest+1*SZ8(%rsp)
326 vmovdqu c, _digest+2*SZ8(%rsp)
327 vmovdqu d, _digest+3*SZ8(%rsp)
328 vmovdqu e, _digest+4*SZ8(%rsp)
329 vmovdqu f, _digest+5*SZ8(%rsp)
330 vmovdqu g, _digest+6*SZ8(%rsp)
331 vmovdqu h, _digest+7*SZ8(%rsp)
332 i = 0
333.rep 2
334 VMOVPS i*32(inp0, IDX), TT0
335 VMOVPS i*32(inp1, IDX), TT1
336 VMOVPS i*32(inp2, IDX), TT2
337 VMOVPS i*32(inp3, IDX), TT3
338 VMOVPS i*32(inp4, IDX), TT4
339 VMOVPS i*32(inp5, IDX), TT5
340 VMOVPS i*32(inp6, IDX), TT6
341 VMOVPS i*32(inp7, IDX), TT7
342 vmovdqu g, _ytmp(%rsp)
343 vmovdqu h, _ytmp+1*SZ8(%rsp)
344 TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1
345 vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1
346 vmovdqu _ytmp(%rsp), g
347 vpshufb TMP1, TT0, TT0
348 vpshufb TMP1, TT1, TT1
349 vpshufb TMP1, TT2, TT2
350 vpshufb TMP1, TT3, TT3
351 vpshufb TMP1, TT4, TT4
352 vpshufb TMP1, TT5, TT5
353 vpshufb TMP1, TT6, TT6
354 vpshufb TMP1, TT7, TT7
355 vmovdqu _ytmp+1*SZ8(%rsp), h
356 vmovdqu TT4, _ytmp(%rsp)
357 vmovdqu TT5, _ytmp+1*SZ8(%rsp)
358 vmovdqu TT6, _ytmp+2*SZ8(%rsp)
359 vmovdqu TT7, _ytmp+3*SZ8(%rsp)
360 ROUND_00_15 TT0,(i*8+0)
361 vmovdqu _ytmp(%rsp), TT0
362 ROUND_00_15 TT1,(i*8+1)
363 vmovdqu _ytmp+1*SZ8(%rsp), TT1
364 ROUND_00_15 TT2,(i*8+2)
365 vmovdqu _ytmp+2*SZ8(%rsp), TT2
366 ROUND_00_15 TT3,(i*8+3)
367 vmovdqu _ytmp+3*SZ8(%rsp), TT3
368 ROUND_00_15 TT0,(i*8+4)
369 ROUND_00_15 TT1,(i*8+5)
370 ROUND_00_15 TT2,(i*8+6)
371 ROUND_00_15 TT3,(i*8+7)
372 i = (i+1)
373.endr
374 add $64, IDX
375 i = (i*8)
376
377 jmp Lrounds_16_xx
378.align 16
379Lrounds_16_xx:
380.rep 16
381 ROUND_16_XX T1, i
382 i = (i+1)
383.endr
384
385 cmp $ROUNDS,ROUND
386 jb Lrounds_16_xx
387
388 # add old digest
389 vpaddd _digest+0*SZ8(%rsp), a, a
390 vpaddd _digest+1*SZ8(%rsp), b, b
391 vpaddd _digest+2*SZ8(%rsp), c, c
392 vpaddd _digest+3*SZ8(%rsp), d, d
393 vpaddd _digest+4*SZ8(%rsp), e, e
394 vpaddd _digest+5*SZ8(%rsp), f, f
395 vpaddd _digest+6*SZ8(%rsp), g, g
396 vpaddd _digest+7*SZ8(%rsp), h, h
397
398 sub $1, INP_SIZE # unit is blocks
399 jne lloop
400
401 # write back to memory (state object) the transposed digest
402 vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE)
403 vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE)
404 vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE)
405 vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE)
406 vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE)
407 vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE)
408 vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE)
409 vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE)
410
411 # update input pointers
412 add IDX, inp0
413 mov inp0, _args_data_ptr+0*8(STATE)
414 add IDX, inp1
415 mov inp1, _args_data_ptr+1*8(STATE)
416 add IDX, inp2
417 mov inp2, _args_data_ptr+2*8(STATE)
418 add IDX, inp3
419 mov inp3, _args_data_ptr+3*8(STATE)
420 add IDX, inp4
421 mov inp4, _args_data_ptr+4*8(STATE)
422 add IDX, inp5
423 mov inp5, _args_data_ptr+5*8(STATE)
424 add IDX, inp6
425 mov inp6, _args_data_ptr+6*8(STATE)
426 add IDX, inp7
427 mov inp7, _args_data_ptr+7*8(STATE)
428
429 # Postamble
430 mov _rsp(%rsp), %rsp
431
432 # restore callee-saved clobbered registers
433 pop %r15
434 pop %r14
435 pop %r13
436 pop %r12
437
438 ret
439ENDPROC(sha256_x8_avx2)
440.data
441.align 64
442K256_8:
443 .octa 0x428a2f98428a2f98428a2f98428a2f98
444 .octa 0x428a2f98428a2f98428a2f98428a2f98
445 .octa 0x71374491713744917137449171374491
446 .octa 0x71374491713744917137449171374491
447 .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
448 .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
449 .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
450 .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
451 .octa 0x3956c25b3956c25b3956c25b3956c25b
452 .octa 0x3956c25b3956c25b3956c25b3956c25b
453 .octa 0x59f111f159f111f159f111f159f111f1
454 .octa 0x59f111f159f111f159f111f159f111f1
455 .octa 0x923f82a4923f82a4923f82a4923f82a4
456 .octa 0x923f82a4923f82a4923f82a4923f82a4
457 .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
458 .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
459 .octa 0xd807aa98d807aa98d807aa98d807aa98
460 .octa 0xd807aa98d807aa98d807aa98d807aa98
461 .octa 0x12835b0112835b0112835b0112835b01
462 .octa 0x12835b0112835b0112835b0112835b01
463 .octa 0x243185be243185be243185be243185be
464 .octa 0x243185be243185be243185be243185be
465 .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
466 .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
467 .octa 0x72be5d7472be5d7472be5d7472be5d74
468 .octa 0x72be5d7472be5d7472be5d7472be5d74
469 .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
470 .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
471 .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
472 .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
473 .octa 0xc19bf174c19bf174c19bf174c19bf174
474 .octa 0xc19bf174c19bf174c19bf174c19bf174
475 .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
476 .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
477 .octa 0xefbe4786efbe4786efbe4786efbe4786
478 .octa 0xefbe4786efbe4786efbe4786efbe4786
479 .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
480 .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
481 .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
482 .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
483 .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
484 .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
485 .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
486 .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
487 .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
488 .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
489 .octa 0x76f988da76f988da76f988da76f988da
490 .octa 0x76f988da76f988da76f988da76f988da
491 .octa 0x983e5152983e5152983e5152983e5152
492 .octa 0x983e5152983e5152983e5152983e5152
493 .octa 0xa831c66da831c66da831c66da831c66d
494 .octa 0xa831c66da831c66da831c66da831c66d
495 .octa 0xb00327c8b00327c8b00327c8b00327c8
496 .octa 0xb00327c8b00327c8b00327c8b00327c8
497 .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
498 .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
499 .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
500 .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
501 .octa 0xd5a79147d5a79147d5a79147d5a79147
502 .octa 0xd5a79147d5a79147d5a79147d5a79147
503 .octa 0x06ca635106ca635106ca635106ca6351
504 .octa 0x06ca635106ca635106ca635106ca6351
505 .octa 0x14292967142929671429296714292967
506 .octa 0x14292967142929671429296714292967
507 .octa 0x27b70a8527b70a8527b70a8527b70a85
508 .octa 0x27b70a8527b70a8527b70a8527b70a85
509 .octa 0x2e1b21382e1b21382e1b21382e1b2138
510 .octa 0x2e1b21382e1b21382e1b21382e1b2138
511 .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
512 .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
513 .octa 0x53380d1353380d1353380d1353380d13
514 .octa 0x53380d1353380d1353380d1353380d13
515 .octa 0x650a7354650a7354650a7354650a7354
516 .octa 0x650a7354650a7354650a7354650a7354
517 .octa 0x766a0abb766a0abb766a0abb766a0abb
518 .octa 0x766a0abb766a0abb766a0abb766a0abb
519 .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
520 .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
521 .octa 0x92722c8592722c8592722c8592722c85
522 .octa 0x92722c8592722c8592722c8592722c85
523 .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
524 .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
525 .octa 0xa81a664ba81a664ba81a664ba81a664b
526 .octa 0xa81a664ba81a664ba81a664ba81a664b
527 .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
528 .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
529 .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
530 .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
531 .octa 0xd192e819d192e819d192e819d192e819
532 .octa 0xd192e819d192e819d192e819d192e819
533 .octa 0xd6990624d6990624d6990624d6990624
534 .octa 0xd6990624d6990624d6990624d6990624
535 .octa 0xf40e3585f40e3585f40e3585f40e3585
536 .octa 0xf40e3585f40e3585f40e3585f40e3585
537 .octa 0x106aa070106aa070106aa070106aa070
538 .octa 0x106aa070106aa070106aa070106aa070
539 .octa 0x19a4c11619a4c11619a4c11619a4c116
540 .octa 0x19a4c11619a4c11619a4c11619a4c116
541 .octa 0x1e376c081e376c081e376c081e376c08
542 .octa 0x1e376c081e376c081e376c081e376c08
543 .octa 0x2748774c2748774c2748774c2748774c
544 .octa 0x2748774c2748774c2748774c2748774c
545 .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
546 .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
547 .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
548 .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
549 .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
550 .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
551 .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
552 .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
553 .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
554 .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
555 .octa 0x748f82ee748f82ee748f82ee748f82ee
556 .octa 0x748f82ee748f82ee748f82ee748f82ee
557 .octa 0x78a5636f78a5636f78a5636f78a5636f
558 .octa 0x78a5636f78a5636f78a5636f78a5636f
559 .octa 0x84c8781484c8781484c8781484c87814
560 .octa 0x84c8781484c8781484c8781484c87814
561 .octa 0x8cc702088cc702088cc702088cc70208
562 .octa 0x8cc702088cc702088cc702088cc70208
563 .octa 0x90befffa90befffa90befffa90befffa
564 .octa 0x90befffa90befffa90befffa90befffa
565 .octa 0xa4506ceba4506ceba4506ceba4506ceb
566 .octa 0xa4506ceba4506ceba4506ceba4506ceb
567 .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
568 .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
569 .octa 0xc67178f2c67178f2c67178f2c67178f2
570 .octa 0xc67178f2c67178f2c67178f2c67178f2
571PSHUFFLE_BYTE_FLIP_MASK:
572.octa 0x0c0d0e0f08090a0b0405060700010203
573.octa 0x0c0d0e0f08090a0b0405060700010203
574
575.align 64
576.global K256
577K256:
578 .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
579 .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
580 .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
581 .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
582 .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
583 .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
584 .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
585 .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
586 .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
587 .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
588 .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
589 .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
590 .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
591 .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
592 .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
593 .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 3ae0f43ebd37..9e79baf03a4b 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -427,4 +427,14 @@ MODULE_LICENSE("GPL");
427MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 427MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
428 428
429MODULE_ALIAS_CRYPTO("sha256"); 429MODULE_ALIAS_CRYPTO("sha256");
430MODULE_ALIAS_CRYPTO("sha256-ssse3");
431MODULE_ALIAS_CRYPTO("sha256-avx");
432MODULE_ALIAS_CRYPTO("sha256-avx2");
430MODULE_ALIAS_CRYPTO("sha224"); 433MODULE_ALIAS_CRYPTO("sha224");
434MODULE_ALIAS_CRYPTO("sha224-ssse3");
435MODULE_ALIAS_CRYPTO("sha224-avx");
436MODULE_ALIAS_CRYPTO("sha224-avx2");
437#ifdef CONFIG_AS_SHA256_NI
438MODULE_ALIAS_CRYPTO("sha256-ni");
439MODULE_ALIAS_CRYPTO("sha224-ni");
440#endif
diff --git a/arch/x86/crypto/sha512-mb/Makefile b/arch/x86/crypto/sha512-mb/Makefile
new file mode 100644
index 000000000000..0a57e2103980
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/Makefile
@@ -0,0 +1,11 @@
1#
2# Arch-specific CryptoAPI modules.
3#
4
5avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
6 $(comma)4)$(comma)%ymm2,yes,no)
7ifeq ($(avx2_supported),yes)
8 obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o
9 sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \
10 sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o
11endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
new file mode 100644
index 000000000000..f4cf5b78fd36
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -0,0 +1,1046 @@
1/*
2 * Multi buffer SHA512 algorithm Glue Code
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <crypto/internal/hash.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/mm.h>
60#include <linux/cryptohash.h>
61#include <linux/types.h>
62#include <linux/list.h>
63#include <crypto/scatterwalk.h>
64#include <crypto/sha.h>
65#include <crypto/mcryptd.h>
66#include <crypto/crypto_wq.h>
67#include <asm/byteorder.h>
68#include <linux/hardirq.h>
69#include <asm/fpu/api.h>
70#include "sha512_mb_ctx.h"
71
72#define FLUSH_INTERVAL 1000 /* in usec */
73
74static struct mcryptd_alg_state sha512_mb_alg_state;
75
76struct sha512_mb_ctx {
77 struct mcryptd_ahash *mcryptd_tfm;
78};
79
80static inline struct mcryptd_hash_request_ctx
81 *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx)
82{
83 struct ahash_request *areq;
84
85 areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86 return container_of(areq, struct mcryptd_hash_request_ctx, areq);
87}
88
89static inline struct ahash_request
90 *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
91{
92 return container_of((void *) ctx, struct ahash_request, __ctx);
93}
94
95static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96 struct ahash_request *areq)
97{
98 rctx->flag = HASH_UPDATE;
99}
100
101static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state);
102static asmlinkage struct job_sha512* (*sha512_job_mgr_submit)
103 (struct sha512_mb_mgr *state,
104 struct job_sha512 *job);
105static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
106 (struct sha512_mb_mgr *state);
107static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
108 (struct sha512_mb_mgr *state);
109
110inline void sha512_init_digest(uint64_t *digest)
111{
112 static const uint64_t initial_digest[SHA512_DIGEST_LENGTH] = {
113 SHA512_H0, SHA512_H1, SHA512_H2,
114 SHA512_H3, SHA512_H4, SHA512_H5,
115 SHA512_H6, SHA512_H7 };
116 memcpy(digest, initial_digest, sizeof(initial_digest));
117}
118
119inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
120 uint32_t total_len)
121{
122 uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
123
124 memset(&padblock[i], 0, SHA512_BLOCK_SIZE);
125 padblock[i] = 0x80;
126
127 i += ((SHA512_BLOCK_SIZE - 1) &
128 (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1)))
129 + 1 + SHA512_PADLENGTHFIELD_SIZE;
130
131#if SHA512_PADLENGTHFIELD_SIZE == 16
132 *((uint64_t *) &padblock[i - 16]) = 0;
133#endif
134
135 *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
136
137 /* Number of extra blocks to hash */
138 return i >> SHA512_LOG2_BLOCK_SIZE;
139}
140
141static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
142 (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx)
143{
144 while (ctx) {
145 if (ctx->status & HASH_CTX_STS_COMPLETE) {
146 /* Clear PROCESSING bit */
147 ctx->status = HASH_CTX_STS_COMPLETE;
148 return ctx;
149 }
150
151 /*
152 * If the extra blocks are empty, begin hashing what remains
153 * in the user's buffer.
154 */
155 if (ctx->partial_block_buffer_length == 0 &&
156 ctx->incoming_buffer_length) {
157
158 const void *buffer = ctx->incoming_buffer;
159 uint32_t len = ctx->incoming_buffer_length;
160 uint32_t copy_len;
161
162 /*
163 * Only entire blocks can be hashed.
164 * Copy remainder to extra blocks buffer.
165 */
166 copy_len = len & (SHA512_BLOCK_SIZE-1);
167
168 if (copy_len) {
169 len -= copy_len;
170 memcpy(ctx->partial_block_buffer,
171 ((const char *) buffer + len),
172 copy_len);
173 ctx->partial_block_buffer_length = copy_len;
174 }
175
176 ctx->incoming_buffer_length = 0;
177
178 /* len should be a multiple of the block size now */
179 assert((len % SHA512_BLOCK_SIZE) == 0);
180
181 /* Set len to the number of blocks to be hashed */
182 len >>= SHA512_LOG2_BLOCK_SIZE;
183
184 if (len) {
185
186 ctx->job.buffer = (uint8_t *) buffer;
187 ctx->job.len = len;
188 ctx = (struct sha512_hash_ctx *)
189 sha512_job_mgr_submit(&mgr->mgr,
190 &ctx->job);
191 continue;
192 }
193 }
194
195 /*
196 * If the extra blocks are not empty, then we are
197 * either on the last block(s) or we need more
198 * user input before continuing.
199 */
200 if (ctx->status & HASH_CTX_STS_LAST) {
201
202 uint8_t *buf = ctx->partial_block_buffer;
203 uint32_t n_extra_blocks =
204 sha512_pad(buf, ctx->total_length);
205
206 ctx->status = (HASH_CTX_STS_PROCESSING |
207 HASH_CTX_STS_COMPLETE);
208 ctx->job.buffer = buf;
209 ctx->job.len = (uint32_t) n_extra_blocks;
210 ctx = (struct sha512_hash_ctx *)
211 sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
212 continue;
213 }
214
215 if (ctx)
216 ctx->status = HASH_CTX_STS_IDLE;
217 return ctx;
218 }
219
220 return NULL;
221}
222
223static struct sha512_hash_ctx
224 *sha512_ctx_mgr_get_comp_ctx(struct sha512_ctx_mgr *mgr)
225{
226 /*
227 * If get_comp_job returns NULL, there are no jobs complete.
228 * If get_comp_job returns a job, verify that it is safe to return to
229 * the user.
230 * If it is not ready, resubmit the job to finish processing.
231 * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
232 * returned.
233 * Otherwise, all jobs currently being managed by the hash_ctx_mgr
234 * still need processing.
235 */
236 struct sha512_hash_ctx *ctx;
237
238 ctx = (struct sha512_hash_ctx *)
239 sha512_job_mgr_get_comp_job(&mgr->mgr);
240 return sha512_ctx_mgr_resubmit(mgr, ctx);
241}
242
243static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
244{
245 sha512_job_mgr_init(&mgr->mgr);
246}
247
248static struct sha512_hash_ctx
249 *sha512_ctx_mgr_submit(struct sha512_ctx_mgr *mgr,
250 struct sha512_hash_ctx *ctx,
251 const void *buffer,
252 uint32_t len,
253 int flags)
254{
255 if (flags & (~HASH_ENTIRE)) {
256 /*
257 * User should not pass anything other than FIRST, UPDATE, or
258 * LAST
259 */
260 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
261 return ctx;
262 }
263
264 if (ctx->status & HASH_CTX_STS_PROCESSING) {
265 /* Cannot submit to a currently processing job. */
266 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
267 return ctx;
268 }
269
270 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
271 /* Cannot update a finished job. */
272 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
273 return ctx;
274 }
275
276
277 if (flags & HASH_FIRST) {
278 /* Init digest */
279 sha512_init_digest(ctx->job.result_digest);
280
281 /* Reset byte counter */
282 ctx->total_length = 0;
283
284 /* Clear extra blocks */
285 ctx->partial_block_buffer_length = 0;
286 }
287
288 /*
289 * If we made it here, there were no errors during this call to
290 * submit
291 */
292 ctx->error = HASH_CTX_ERROR_NONE;
293
294 /* Store buffer ptr info from user */
295 ctx->incoming_buffer = buffer;
296 ctx->incoming_buffer_length = len;
297
298 /*
299 * Store the user's request flags and mark this ctx as currently being
300 * processed.
301 */
302 ctx->status = (flags & HASH_LAST) ?
303 (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
304 HASH_CTX_STS_PROCESSING;
305
306 /* Advance byte counter */
307 ctx->total_length += len;
308
309 /*
310 * If there is anything currently buffered in the extra blocks,
311 * append to it until it contains a whole block.
312 * Or if the user's buffer contains less than a whole block,
313 * append as much as possible to the extra block.
314 */
315 if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) {
316 /* Compute how many bytes to copy from user buffer into extra
317 * block
318 */
319 uint32_t copy_len = SHA512_BLOCK_SIZE -
320 ctx->partial_block_buffer_length;
321 if (len < copy_len)
322 copy_len = len;
323
324 if (copy_len) {
325 /* Copy and update relevant pointers and counters */
326 memcpy
327 (&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
328 buffer, copy_len);
329
330 ctx->partial_block_buffer_length += copy_len;
331 ctx->incoming_buffer = (const void *)
332 ((const char *)buffer + copy_len);
333 ctx->incoming_buffer_length = len - copy_len;
334 }
335
336 /* The extra block should never contain more than 1 block
337 * here
338 */
339 assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE);
340
341 /* If the extra block buffer contains exactly 1 block, it can
342 * be hashed.
343 */
344 if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) {
345 ctx->partial_block_buffer_length = 0;
346
347 ctx->job.buffer = ctx->partial_block_buffer;
348 ctx->job.len = 1;
349 ctx = (struct sha512_hash_ctx *)
350 sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
351 }
352 }
353
354 return sha512_ctx_mgr_resubmit(mgr, ctx);
355}
356
357static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct sha512_ctx_mgr *mgr)
358{
359 struct sha512_hash_ctx *ctx;
360
361 while (1) {
362 ctx = (struct sha512_hash_ctx *)
363 sha512_job_mgr_flush(&mgr->mgr);
364
365 /* If flush returned 0, there are no more jobs in flight. */
366 if (!ctx)
367 return NULL;
368
369 /*
370 * If flush returned a job, resubmit the job to finish
371 * processing.
372 */
373 ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
374
375 /*
376 * If sha512_ctx_mgr_resubmit returned a job, it is ready to
377 * be returned. Otherwise, all jobs currently being managed by
378 * the sha512_ctx_mgr still need processing. Loop.
379 */
380 if (ctx)
381 return ctx;
382 }
383}
384
385static int sha512_mb_init(struct ahash_request *areq)
386{
387 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
388
389 hash_ctx_init(sctx);
390 sctx->job.result_digest[0] = SHA512_H0;
391 sctx->job.result_digest[1] = SHA512_H1;
392 sctx->job.result_digest[2] = SHA512_H2;
393 sctx->job.result_digest[3] = SHA512_H3;
394 sctx->job.result_digest[4] = SHA512_H4;
395 sctx->job.result_digest[5] = SHA512_H5;
396 sctx->job.result_digest[6] = SHA512_H6;
397 sctx->job.result_digest[7] = SHA512_H7;
398 sctx->total_length = 0;
399 sctx->partial_block_buffer_length = 0;
400 sctx->status = HASH_CTX_STS_IDLE;
401
402 return 0;
403}
404
405static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
406{
407 int i;
408 struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
409 __be64 *dst = (__be64 *) rctx->out;
410
411 for (i = 0; i < 8; ++i)
412 dst[i] = cpu_to_be64(sctx->job.result_digest[i]);
413
414 return 0;
415}
416
417static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
418 struct mcryptd_alg_cstate *cstate, bool flush)
419{
420 int flag = HASH_UPDATE;
421 int nbytes, err = 0;
422 struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
423 struct sha512_hash_ctx *sha_ctx;
424
425 /* more work ? */
426 while (!(rctx->flag & HASH_DONE)) {
427 nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
428 if (nbytes < 0) {
429 err = nbytes;
430 goto out;
431 }
432 /* check if the walk is done */
433 if (crypto_ahash_walk_last(&rctx->walk)) {
434 rctx->flag |= HASH_DONE;
435 if (rctx->flag & HASH_FINAL)
436 flag |= HASH_LAST;
437
438 }
439 sha_ctx = (struct sha512_hash_ctx *)
440 ahash_request_ctx(&rctx->areq);
441 kernel_fpu_begin();
442 sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx,
443 rctx->walk.data, nbytes, flag);
444 if (!sha_ctx) {
445 if (flush)
446 sha_ctx = sha512_ctx_mgr_flush(cstate->mgr);
447 }
448 kernel_fpu_end();
449 if (sha_ctx)
450 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
451 else {
452 rctx = NULL;
453 goto out;
454 }
455 }
456
457 /* copy the results */
458 if (rctx->flag & HASH_FINAL)
459 sha512_mb_set_results(rctx);
460
461out:
462 *ret_rctx = rctx;
463 return err;
464}
465
466static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
467 struct mcryptd_alg_cstate *cstate,
468 int err)
469{
470 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
471 struct sha512_hash_ctx *sha_ctx;
472 struct mcryptd_hash_request_ctx *req_ctx;
473 int ret;
474
475 /* remove from work list */
476 spin_lock(&cstate->work_lock);
477 list_del(&rctx->waiter);
478 spin_unlock(&cstate->work_lock);
479
480 if (irqs_disabled())
481 rctx->complete(&req->base, err);
482 else {
483 local_bh_disable();
484 rctx->complete(&req->base, err);
485 local_bh_enable();
486 }
487
488 /* check to see if there are other jobs that are done */
489 sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
490 while (sha_ctx) {
491 req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
492 ret = sha_finish_walk(&req_ctx, cstate, false);
493 if (req_ctx) {
494 spin_lock(&cstate->work_lock);
495 list_del(&req_ctx->waiter);
496 spin_unlock(&cstate->work_lock);
497
498 req = cast_mcryptd_ctx_to_req(req_ctx);
499 if (irqs_disabled())
500 rctx->complete(&req->base, ret);
501 else {
502 local_bh_disable();
503 rctx->complete(&req->base, ret);
504 local_bh_enable();
505 }
506 }
507 sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate->mgr);
508 }
509
510 return 0;
511}
512
513static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
514 struct mcryptd_alg_cstate *cstate)
515{
516 unsigned long next_flush;
517 unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
518
519 /* initialize tag */
520 rctx->tag.arrival = jiffies; /* tag the arrival time */
521 rctx->tag.seq_num = cstate->next_seq_num++;
522 next_flush = rctx->tag.arrival + delay;
523 rctx->tag.expire = next_flush;
524
525 spin_lock(&cstate->work_lock);
526 list_add_tail(&rctx->waiter, &cstate->work_list);
527 spin_unlock(&cstate->work_lock);
528
529 mcryptd_arm_flusher(cstate, delay);
530}
531
532static int sha512_mb_update(struct ahash_request *areq)
533{
534 struct mcryptd_hash_request_ctx *rctx =
535 container_of(areq, struct mcryptd_hash_request_ctx,
536 areq);
537 struct mcryptd_alg_cstate *cstate =
538 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
539
540 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
541 struct sha512_hash_ctx *sha_ctx;
542 int ret = 0, nbytes;
543
544
545 /* sanity check */
546 if (rctx->tag.cpu != smp_processor_id()) {
547 pr_err("mcryptd error: cpu clash\n");
548 goto done;
549 }
550
551 /* need to init context */
552 req_ctx_init(rctx, areq);
553
554 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
555
556 if (nbytes < 0) {
557 ret = nbytes;
558 goto done;
559 }
560
561 if (crypto_ahash_walk_last(&rctx->walk))
562 rctx->flag |= HASH_DONE;
563
564 /* submit */
565 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
566 sha512_mb_add_list(rctx, cstate);
567 kernel_fpu_begin();
568 sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
569 nbytes, HASH_UPDATE);
570 kernel_fpu_end();
571
572 /* check if anything is returned */
573 if (!sha_ctx)
574 return -EINPROGRESS;
575
576 if (sha_ctx->error) {
577 ret = sha_ctx->error;
578 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
579 goto done;
580 }
581
582 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
583 ret = sha_finish_walk(&rctx, cstate, false);
584
585 if (!rctx)
586 return -EINPROGRESS;
587done:
588 sha_complete_job(rctx, cstate, ret);
589 return ret;
590}
591
592static int sha512_mb_finup(struct ahash_request *areq)
593{
594 struct mcryptd_hash_request_ctx *rctx =
595 container_of(areq, struct mcryptd_hash_request_ctx,
596 areq);
597 struct mcryptd_alg_cstate *cstate =
598 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
599
600 struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
601 struct sha512_hash_ctx *sha_ctx;
602 int ret = 0, flag = HASH_UPDATE, nbytes;
603
604 /* sanity check */
605 if (rctx->tag.cpu != smp_processor_id()) {
606 pr_err("mcryptd error: cpu clash\n");
607 goto done;
608 }
609
610 /* need to init context */
611 req_ctx_init(rctx, areq);
612
613 nbytes = crypto_ahash_walk_first(req, &rctx->walk);
614
615 if (nbytes < 0) {
616 ret = nbytes;
617 goto done;
618 }
619
620 if (crypto_ahash_walk_last(&rctx->walk)) {
621 rctx->flag |= HASH_DONE;
622 flag = HASH_LAST;
623 }
624
625 /* submit */
626 rctx->flag |= HASH_FINAL;
627 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
628 sha512_mb_add_list(rctx, cstate);
629
630 kernel_fpu_begin();
631 sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
632 nbytes, flag);
633 kernel_fpu_end();
634
635 /* check if anything is returned */
636 if (!sha_ctx)
637 return -EINPROGRESS;
638
639 if (sha_ctx->error) {
640 ret = sha_ctx->error;
641 goto done;
642 }
643
644 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
645 ret = sha_finish_walk(&rctx, cstate, false);
646 if (!rctx)
647 return -EINPROGRESS;
648done:
649 sha_complete_job(rctx, cstate, ret);
650 return ret;
651}
652
653static int sha512_mb_final(struct ahash_request *areq)
654{
655 struct mcryptd_hash_request_ctx *rctx =
656 container_of(areq, struct mcryptd_hash_request_ctx,
657 areq);
658 struct mcryptd_alg_cstate *cstate =
659 this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
660
661 struct sha512_hash_ctx *sha_ctx;
662 int ret = 0;
663 u8 data;
664
665 /* sanity check */
666 if (rctx->tag.cpu != smp_processor_id()) {
667 pr_err("mcryptd error: cpu clash\n");
668 goto done;
669 }
670
671 /* need to init context */
672 req_ctx_init(rctx, areq);
673
674 rctx->flag |= HASH_DONE | HASH_FINAL;
675
676 sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
677 /* flag HASH_FINAL and 0 data size */
678 sha512_mb_add_list(rctx, cstate);
679 kernel_fpu_begin();
680 sha_ctx = sha512_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
681 HASH_LAST);
682 kernel_fpu_end();
683
684 /* check if anything is returned */
685 if (!sha_ctx)
686 return -EINPROGRESS;
687
688 if (sha_ctx->error) {
689 ret = sha_ctx->error;
690 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
691 goto done;
692 }
693
694 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
695 ret = sha_finish_walk(&rctx, cstate, false);
696 if (!rctx)
697 return -EINPROGRESS;
698done:
699 sha_complete_job(rctx, cstate, ret);
700 return ret;
701}
702
703static int sha512_mb_export(struct ahash_request *areq, void *out)
704{
705 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
706
707 memcpy(out, sctx, sizeof(*sctx));
708
709 return 0;
710}
711
712static int sha512_mb_import(struct ahash_request *areq, const void *in)
713{
714 struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
715
716 memcpy(sctx, in, sizeof(*sctx));
717
718 return 0;
719}
720
721static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm)
722{
723 struct mcryptd_ahash *mcryptd_tfm;
724 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
725 struct mcryptd_hash_ctx *mctx;
726
727 mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb",
728 CRYPTO_ALG_INTERNAL,
729 CRYPTO_ALG_INTERNAL);
730 if (IS_ERR(mcryptd_tfm))
731 return PTR_ERR(mcryptd_tfm);
732 mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
733 mctx->alg_state = &sha512_mb_alg_state;
734 ctx->mcryptd_tfm = mcryptd_tfm;
735 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
736 sizeof(struct ahash_request) +
737 crypto_ahash_reqsize(&mcryptd_tfm->base));
738
739 return 0;
740}
741
742static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm)
743{
744 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
745
746 mcryptd_free_ahash(ctx->mcryptd_tfm);
747}
748
749static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm)
750{
751 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
752 sizeof(struct ahash_request) +
753 sizeof(struct sha512_hash_ctx));
754
755 return 0;
756}
757
758static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm)
759{
760 struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
761
762 mcryptd_free_ahash(ctx->mcryptd_tfm);
763}
764
765static struct ahash_alg sha512_mb_areq_alg = {
766 .init = sha512_mb_init,
767 .update = sha512_mb_update,
768 .final = sha512_mb_final,
769 .finup = sha512_mb_finup,
770 .export = sha512_mb_export,
771 .import = sha512_mb_import,
772 .halg = {
773 .digestsize = SHA512_DIGEST_SIZE,
774 .statesize = sizeof(struct sha512_hash_ctx),
775 .base = {
776 .cra_name = "__sha512-mb",
777 .cra_driver_name = "__intel_sha512-mb",
778 .cra_priority = 100,
779 /*
780 * use ASYNC flag as some buffers in multi-buffer
781 * algo may not have completed before hashing thread
782 * sleep
783 */
784 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
785 CRYPTO_ALG_ASYNC |
786 CRYPTO_ALG_INTERNAL,
787 .cra_blocksize = SHA512_BLOCK_SIZE,
788 .cra_module = THIS_MODULE,
789 .cra_list = LIST_HEAD_INIT
790 (sha512_mb_areq_alg.halg.base.cra_list),
791 .cra_init = sha512_mb_areq_init_tfm,
792 .cra_exit = sha512_mb_areq_exit_tfm,
793 .cra_ctxsize = sizeof(struct sha512_hash_ctx),
794 }
795 }
796};
797
798static int sha512_mb_async_init(struct ahash_request *req)
799{
800 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
801 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
802 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
803 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
804
805 memcpy(mcryptd_req, req, sizeof(*req));
806 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
807 return crypto_ahash_init(mcryptd_req);
808}
809
810static int sha512_mb_async_update(struct ahash_request *req)
811{
812 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
813
814 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
815 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
816 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
817
818 memcpy(mcryptd_req, req, sizeof(*req));
819 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
820 return crypto_ahash_update(mcryptd_req);
821}
822
823static int sha512_mb_async_finup(struct ahash_request *req)
824{
825 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
826
827 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
828 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
829 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
830
831 memcpy(mcryptd_req, req, sizeof(*req));
832 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
833 return crypto_ahash_finup(mcryptd_req);
834}
835
836static int sha512_mb_async_final(struct ahash_request *req)
837{
838 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
839
840 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
841 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
842 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
843
844 memcpy(mcryptd_req, req, sizeof(*req));
845 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
846 return crypto_ahash_final(mcryptd_req);
847}
848
849static int sha512_mb_async_digest(struct ahash_request *req)
850{
851 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
852 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
853 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
854 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
855
856 memcpy(mcryptd_req, req, sizeof(*req));
857 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
858 return crypto_ahash_digest(mcryptd_req);
859}
860
861static int sha512_mb_async_export(struct ahash_request *req, void *out)
862{
863 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
864 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
865 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
866 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
867
868 memcpy(mcryptd_req, req, sizeof(*req));
869 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
870 return crypto_ahash_export(mcryptd_req, out);
871}
872
873static int sha512_mb_async_import(struct ahash_request *req, const void *in)
874{
875 struct ahash_request *mcryptd_req = ahash_request_ctx(req);
876 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
877 struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
878 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
879 struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
880 struct mcryptd_hash_request_ctx *rctx;
881 struct ahash_request *areq;
882
883 memcpy(mcryptd_req, req, sizeof(*req));
884 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
885 rctx = ahash_request_ctx(mcryptd_req);
886
887 areq = &rctx->areq;
888
889 ahash_request_set_tfm(areq, child);
890 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
891 rctx->complete, req);
892
893 return crypto_ahash_import(mcryptd_req, in);
894}
895
896static struct ahash_alg sha512_mb_async_alg = {
897 .init = sha512_mb_async_init,
898 .update = sha512_mb_async_update,
899 .final = sha512_mb_async_final,
900 .finup = sha512_mb_async_finup,
901 .digest = sha512_mb_async_digest,
902 .export = sha512_mb_async_export,
903 .import = sha512_mb_async_import,
904 .halg = {
905 .digestsize = SHA512_DIGEST_SIZE,
906 .statesize = sizeof(struct sha512_hash_ctx),
907 .base = {
908 .cra_name = "sha512",
909 .cra_driver_name = "sha512_mb",
910 .cra_priority = 200,
911 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
912 CRYPTO_ALG_ASYNC,
913 .cra_blocksize = SHA512_BLOCK_SIZE,
914 .cra_type = &crypto_ahash_type,
915 .cra_module = THIS_MODULE,
916 .cra_list = LIST_HEAD_INIT
917 (sha512_mb_async_alg.halg.base.cra_list),
918 .cra_init = sha512_mb_async_init_tfm,
919 .cra_exit = sha512_mb_async_exit_tfm,
920 .cra_ctxsize = sizeof(struct sha512_mb_ctx),
921 .cra_alignmask = 0,
922 },
923 },
924};
925
926static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
927{
928 struct mcryptd_hash_request_ctx *rctx;
929 unsigned long cur_time;
930 unsigned long next_flush = 0;
931 struct sha512_hash_ctx *sha_ctx;
932
933
934 cur_time = jiffies;
935
936 while (!list_empty(&cstate->work_list)) {
937 rctx = list_entry(cstate->work_list.next,
938 struct mcryptd_hash_request_ctx, waiter);
939 if time_before(cur_time, rctx->tag.expire)
940 break;
941 kernel_fpu_begin();
942 sha_ctx = (struct sha512_hash_ctx *)
943 sha512_ctx_mgr_flush(cstate->mgr);
944 kernel_fpu_end();
945 if (!sha_ctx) {
946 pr_err("sha512_mb error: nothing got flushed for"
947 " non-empty list\n");
948 break;
949 }
950 rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
951 sha_finish_walk(&rctx, cstate, true);
952 sha_complete_job(rctx, cstate, 0);
953 }
954
955 if (!list_empty(&cstate->work_list)) {
956 rctx = list_entry(cstate->work_list.next,
957 struct mcryptd_hash_request_ctx, waiter);
958 /* get the hash context and then flush time */
959 next_flush = rctx->tag.expire;
960 mcryptd_arm_flusher(cstate, get_delay(next_flush));
961 }
962 return next_flush;
963}
964
965static int __init sha512_mb_mod_init(void)
966{
967
968 int cpu;
969 int err;
970 struct mcryptd_alg_cstate *cpu_state;
971
972 /* check for dependent cpu features */
973 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
974 !boot_cpu_has(X86_FEATURE_BMI2))
975 return -ENODEV;
976
977 /* initialize multibuffer structures */
978 sha512_mb_alg_state.alg_cstate =
979 alloc_percpu(struct mcryptd_alg_cstate);
980
981 sha512_job_mgr_init = sha512_mb_mgr_init_avx2;
982 sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2;
983 sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2;
984 sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2;
985
986 if (!sha512_mb_alg_state.alg_cstate)
987 return -ENOMEM;
988 for_each_possible_cpu(cpu) {
989 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
990 cpu_state->next_flush = 0;
991 cpu_state->next_seq_num = 0;
992 cpu_state->flusher_engaged = false;
993 INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
994 cpu_state->cpu = cpu;
995 cpu_state->alg_state = &sha512_mb_alg_state;
996 cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr),
997 GFP_KERNEL);
998 if (!cpu_state->mgr)
999 goto err2;
1000 sha512_ctx_mgr_init(cpu_state->mgr);
1001 INIT_LIST_HEAD(&cpu_state->work_list);
1002 spin_lock_init(&cpu_state->work_lock);
1003 }
1004 sha512_mb_alg_state.flusher = &sha512_mb_flusher;
1005
1006 err = crypto_register_ahash(&sha512_mb_areq_alg);
1007 if (err)
1008 goto err2;
1009 err = crypto_register_ahash(&sha512_mb_async_alg);
1010 if (err)
1011 goto err1;
1012
1013
1014 return 0;
1015err1:
1016 crypto_unregister_ahash(&sha512_mb_areq_alg);
1017err2:
1018 for_each_possible_cpu(cpu) {
1019 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
1020 kfree(cpu_state->mgr);
1021 }
1022 free_percpu(sha512_mb_alg_state.alg_cstate);
1023 return -ENODEV;
1024}
1025
1026static void __exit sha512_mb_mod_fini(void)
1027{
1028 int cpu;
1029 struct mcryptd_alg_cstate *cpu_state;
1030
1031 crypto_unregister_ahash(&sha512_mb_async_alg);
1032 crypto_unregister_ahash(&sha512_mb_areq_alg);
1033 for_each_possible_cpu(cpu) {
1034 cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
1035 kfree(cpu_state->mgr);
1036 }
1037 free_percpu(sha512_mb_alg_state.alg_cstate);
1038}
1039
1040module_init(sha512_mb_mod_init);
1041module_exit(sha512_mb_mod_fini);
1042
1043MODULE_LICENSE("GPL");
1044MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
1045
1046MODULE_ALIAS("sha512");
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
new file mode 100644
index 000000000000..9d4b2c8208d5
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
@@ -0,0 +1,130 @@
1/*
2 * Header file for multi buffer SHA512 context
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifndef _SHA_MB_CTX_INTERNAL_H
55#define _SHA_MB_CTX_INTERNAL_H
56
57#include "sha512_mb_mgr.h"
58
59#define HASH_UPDATE 0x00
60#define HASH_FIRST 0x01
61#define HASH_LAST 0x02
62#define HASH_ENTIRE 0x03
63#define HASH_DONE 0x04
64#define HASH_FINAL 0x08
65
66#define HASH_CTX_STS_IDLE 0x00
67#define HASH_CTX_STS_PROCESSING 0x01
68#define HASH_CTX_STS_LAST 0x02
69#define HASH_CTX_STS_COMPLETE 0x04
70
71enum hash_ctx_error {
72 HASH_CTX_ERROR_NONE = 0,
73 HASH_CTX_ERROR_INVALID_FLAGS = -1,
74 HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
75 HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
76};
77
78#define hash_ctx_user_data(ctx) ((ctx)->user_data)
79#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
80#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
81#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
82#define hash_ctx_status(ctx) ((ctx)->status)
83#define hash_ctx_error(ctx) ((ctx)->error)
84#define hash_ctx_init(ctx) \
85 do { \
86 (ctx)->error = HASH_CTX_ERROR_NONE; \
87 (ctx)->status = HASH_CTX_STS_COMPLETE; \
88 } while (0)
89
90/* Hash Constants and Typedefs */
91#define SHA512_DIGEST_LENGTH 8
92#define SHA512_LOG2_BLOCK_SIZE 7
93
94#define SHA512_PADLENGTHFIELD_SIZE 16
95
96#ifdef SHA_MB_DEBUG
97#define assert(expr) \
98do { \
99 if (unlikely(!(expr))) { \
100 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
101 #expr, __FILE__, __func__, __LINE__); \
102 } \
103} while (0)
104#else
105#define assert(expr) do {} while (0)
106#endif
107
108struct sha512_ctx_mgr {
109 struct sha512_mb_mgr mgr;
110};
111
112/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */
113
114struct sha512_hash_ctx {
115 /* Must be at struct offset 0 */
116 struct job_sha512 job;
117 /* status flag */
118 int status;
119 /* error flag */
120 int error;
121
122 uint32_t total_length;
123 const void *incoming_buffer;
124 uint32_t incoming_buffer_length;
125 uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2];
126 uint32_t partial_block_buffer_length;
127 void *user_data;
128};
129
130#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
new file mode 100644
index 000000000000..178f17eef382
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
@@ -0,0 +1,104 @@
1/*
2 * Header file for multi buffer SHA512 algorithm manager
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifndef __SHA_MB_MGR_H
55#define __SHA_MB_MGR_H
56
57#include <linux/types.h>
58
59#define NUM_SHA512_DIGEST_WORDS 8
60
61enum job_sts {STS_UNKNOWN = 0,
62 STS_BEING_PROCESSED = 1,
63 STS_COMPLETED = 2,
64 STS_INTERNAL_ERROR = 3,
65 STS_ERROR = 4
66};
67
68struct job_sha512 {
69 u8 *buffer;
70 u64 len;
71 u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32);
72 enum job_sts status;
73 void *user_data;
74};
75
76struct sha512_args_x4 {
77 uint64_t digest[8][4];
78 uint8_t *data_ptr[4];
79};
80
81struct sha512_lane_data {
82 struct job_sha512 *job_in_lane;
83};
84
85struct sha512_mb_mgr {
86 struct sha512_args_x4 args;
87
88 uint64_t lens[4];
89
90 /* each byte is index (0...7) of unused lanes */
91 uint64_t unused_lanes;
92 /* byte 4 is set to FF as a flag */
93 struct sha512_lane_data ldata[4];
94};
95
96#define SHA512_MB_MGR_NUM_LANES_AVX2 4
97
98void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state);
99struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state,
100 struct job_sha512 *job);
101struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state);
102struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state);
103
104#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
new file mode 100644
index 000000000000..cf2636d4c9ba
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
@@ -0,0 +1,281 @@
1/*
2 * Header file for multi buffer SHA256 algorithm data structure
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54# Macros for defining data structures
55
56# Usage example
57
58#START_FIELDS # JOB_AES
59### name size align
60#FIELD _plaintext, 8, 8 # pointer to plaintext
61#FIELD _ciphertext, 8, 8 # pointer to ciphertext
62#FIELD _IV, 16, 8 # IV
63#FIELD _keys, 8, 8 # pointer to keys
64#FIELD _len, 4, 4 # length in bytes
65#FIELD _status, 4, 4 # status enumeration
66#FIELD _user_data, 8, 8 # pointer to user data
67#UNION _union, size1, align1, \
68# size2, align2, \
69# size3, align3, \
70# ...
71#END_FIELDS
72#%assign _JOB_AES_size _FIELD_OFFSET
73#%assign _JOB_AES_align _STRUCT_ALIGN
74
75#########################################################################
76
77# Alternate "struc-like" syntax:
78# STRUCT job_aes2
79# RES_Q .plaintext, 1
80# RES_Q .ciphertext, 1
81# RES_DQ .IV, 1
82# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
83# RES_U .union, size1, align1, \
84# size2, align2, \
85# ...
86# ENDSTRUCT
87# # Following only needed if nesting
88# %assign job_aes2_size _FIELD_OFFSET
89# %assign job_aes2_align _STRUCT_ALIGN
90#
91# RES_* macros take a name, a count and an optional alignment.
92# The count in in terms of the base size of the macro, and the
93# default alignment is the base size.
94# The macros are:
95# Macro Base size
96# RES_B 1
97# RES_W 2
98# RES_D 4
99# RES_Q 8
100# RES_DQ 16
101# RES_Y 32
102# RES_Z 64
103#
104# RES_U defines a union. It's arguments are a name and two or more
105# pairs of "size, alignment"
106#
107# The two assigns are only needed if this structure is being nested
108# within another. Even if the assigns are not done, one can still use
109# STRUCT_NAME_size as the size of the structure.
110#
111# Note that for nesting, you still need to assign to STRUCT_NAME_size.
112#
113# The differences between this and using "struc" directly are that each
114# type is implicitly aligned to its natural length (although this can be
115# over-ridden with an explicit third parameter), and that the structure
116# is padded at the end to its overall alignment.
117#
118
119#########################################################################
120
121#ifndef _DATASTRUCT_ASM_
122#define _DATASTRUCT_ASM_
123
124#define PTR_SZ 8
125#define SHA512_DIGEST_WORD_SIZE 8
126#define SHA512_MB_MGR_NUM_LANES_AVX2 4
127#define NUM_SHA512_DIGEST_WORDS 8
128#define SZ4 4*SHA512_DIGEST_WORD_SIZE
129#define ROUNDS 80*SZ4
130#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8)
131
132# START_FIELDS
133.macro START_FIELDS
134 _FIELD_OFFSET = 0
135 _STRUCT_ALIGN = 0
136.endm
137
138# FIELD name size align
139.macro FIELD name size align
140 _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
141 \name = _FIELD_OFFSET
142 _FIELD_OFFSET = _FIELD_OFFSET + (\size)
143.if (\align > _STRUCT_ALIGN)
144 _STRUCT_ALIGN = \align
145.endif
146.endm
147
148# END_FIELDS
149.macro END_FIELDS
150 _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
151.endm
152
153.macro STRUCT p1
154START_FIELDS
155.struc \p1
156.endm
157
158.macro ENDSTRUCT
159 tmp = _FIELD_OFFSET
160 END_FIELDS
161 tmp = (_FIELD_OFFSET - ##tmp)
162.if (tmp > 0)
163 .lcomm tmp
164.endm
165
166## RES_int name size align
167.macro RES_int p1 p2 p3
168 name = \p1
169 size = \p2
170 align = .\p3
171
172 _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
173.align align
174.lcomm name size
175 _FIELD_OFFSET = _FIELD_OFFSET + (size)
176.if (align > _STRUCT_ALIGN)
177 _STRUCT_ALIGN = align
178.endif
179.endm
180
181# macro RES_B name, size [, align]
182.macro RES_B _name, _size, _align=1
183RES_int _name _size _align
184.endm
185
186# macro RES_W name, size [, align]
187.macro RES_W _name, _size, _align=2
188RES_int _name 2*(_size) _align
189.endm
190
191# macro RES_D name, size [, align]
192.macro RES_D _name, _size, _align=4
193RES_int _name 4*(_size) _align
194.endm
195
196# macro RES_Q name, size [, align]
197.macro RES_Q _name, _size, _align=8
198RES_int _name 8*(_size) _align
199.endm
200
201# macro RES_DQ name, size [, align]
202.macro RES_DQ _name, _size, _align=16
203RES_int _name 16*(_size) _align
204.endm
205
206# macro RES_Y name, size [, align]
207.macro RES_Y _name, _size, _align=32
208RES_int _name 32*(_size) _align
209.endm
210
211# macro RES_Z name, size [, align]
212.macro RES_Z _name, _size, _align=64
213RES_int _name 64*(_size) _align
214.endm
215
216#endif
217
218###################################################################
219### Define SHA512 Out Of Order Data Structures
220###################################################################
221
222START_FIELDS # LANE_DATA
223### name size align
224FIELD _job_in_lane, 8, 8 # pointer to job object
225END_FIELDS
226
227 _LANE_DATA_size = _FIELD_OFFSET
228 _LANE_DATA_align = _STRUCT_ALIGN
229
230####################################################################
231
232START_FIELDS # SHA512_ARGS_X4
233### name size align
234FIELD _digest, 8*8*4, 4 # transposed digest
235FIELD _data_ptr, 8*4, 8 # array of pointers to data
236END_FIELDS
237
238 _SHA512_ARGS_X4_size = _FIELD_OFFSET
239 _SHA512_ARGS_X4_align = _STRUCT_ALIGN
240
241#####################################################################
242
243START_FIELDS # MB_MGR
244### name size align
245FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align
246FIELD _lens, 8*4, 8
247FIELD _unused_lanes, 8, 8
248FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align
249END_FIELDS
250
251 _MB_MGR_size = _FIELD_OFFSET
252 _MB_MGR_align = _STRUCT_ALIGN
253
254_args_digest = _args + _digest
255_args_data_ptr = _args + _data_ptr
256
257#######################################################################
258
259#######################################################################
260#### Define constants
261#######################################################################
262
263#define STS_UNKNOWN 0
264#define STS_BEING_PROCESSED 1
265#define STS_COMPLETED 2
266
267#######################################################################
268#### Define JOB_SHA512 structure
269#######################################################################
270
271START_FIELDS # JOB_SHA512
272### name size align
273FIELD _buffer, 8, 8 # pointer to buffer
274FIELD _len, 8, 8 # length in bytes
275FIELD _result_digest, 8*8, 32 # Digest (output)
276FIELD _status, 4, 4
277FIELD _user_data, 8, 8
278END_FIELDS
279
280 _JOB_SHA512_size = _FIELD_OFFSET
281 _JOB_SHA512_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
new file mode 100644
index 000000000000..3ddba19a0db6
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
@@ -0,0 +1,291 @@
1/*
2 * Flush routine for SHA512 multibuffer
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <linux/linkage.h>
55#include <asm/frame.h>
56#include "sha512_mb_mgr_datastruct.S"
57
58.extern sha512_x4_avx2
59
60# LINUX register definitions
61#define arg1 %rdi
62#define arg2 %rsi
63
64# idx needs to be other than arg1, arg2, rbx, r12
65#define idx %rdx
66
67# Common definitions
68#define state arg1
69#define job arg2
70#define len2 arg2
71
72#define unused_lanes %rbx
73#define lane_data %rbx
74#define tmp2 %rbx
75
76#define job_rax %rax
77#define tmp1 %rax
78#define size_offset %rax
79#define tmp %rax
80#define start_offset %rax
81
82#define tmp3 arg1
83
84#define extra_blocks arg2
85#define p arg2
86
87#define tmp4 %r8
88#define lens0 %r8
89
90#define lens1 %r9
91#define lens2 %r10
92#define lens3 %r11
93
94.macro LABEL prefix n
95\prefix\n\():
96.endm
97
98.macro JNE_SKIP i
99jne skip_\i
100.endm
101
102.altmacro
103.macro SET_OFFSET _offset
104offset = \_offset
105.endm
106.noaltmacro
107
108# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
109# arg 1 : rcx : state
110ENTRY(sha512_mb_mgr_flush_avx2)
111 FRAME_BEGIN
112 push %rbx
113
114 # If bit (32+3) is set, then all lanes are empty
115 mov _unused_lanes(state), unused_lanes
116 bt $32+7, unused_lanes
117 jc return_null
118
119 # find a lane with a non-null job
120 xor idx, idx
121 offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane)
122 cmpq $0, offset(state)
123 cmovne one(%rip), idx
124 offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane)
125 cmpq $0, offset(state)
126 cmovne two(%rip), idx
127 offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane)
128 cmpq $0, offset(state)
129 cmovne three(%rip), idx
130
131 # copy idx to empty lanes
132copy_lane_data:
133 offset = (_args + _data_ptr)
134 mov offset(state,idx,8), tmp
135
136 I = 0
137.rep 4
138 offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
139 cmpq $0, offset(state)
140.altmacro
141 JNE_SKIP %I
142 offset = (_args + _data_ptr + 8*I)
143 mov tmp, offset(state)
144 offset = (_lens + 8*I +4)
145 movl $0xFFFFFFFF, offset(state)
146LABEL skip_ %I
147 I = (I+1)
148.noaltmacro
149.endr
150
151 # Find min length
152 mov _lens + 0*8(state),lens0
153 mov lens0,idx
154 mov _lens + 1*8(state),lens1
155 cmp idx,lens1
156 cmovb lens1,idx
157 mov _lens + 2*8(state),lens2
158 cmp idx,lens2
159 cmovb lens2,idx
160 mov _lens + 3*8(state),lens3
161 cmp idx,lens3
162 cmovb lens3,idx
163 mov idx,len2
164 and $0xF,idx
165 and $~0xFF,len2
166 jz len_is_0
167
168 sub len2, lens0
169 sub len2, lens1
170 sub len2, lens2
171 sub len2, lens3
172 shr $32,len2
173 mov lens0, _lens + 0*8(state)
174 mov lens1, _lens + 1*8(state)
175 mov lens2, _lens + 2*8(state)
176 mov lens3, _lens + 3*8(state)
177
178 # "state" and "args" are the same address, arg1
179 # len is arg2
180 call sha512_x4_avx2
181 # state and idx are intact
182
183len_is_0:
184 # process completed job "idx"
185 imul $_LANE_DATA_size, idx, lane_data
186 lea _ldata(state, lane_data), lane_data
187
188 mov _job_in_lane(lane_data), job_rax
189 movq $0, _job_in_lane(lane_data)
190 movl $STS_COMPLETED, _status(job_rax)
191 mov _unused_lanes(state), unused_lanes
192 shl $8, unused_lanes
193 or idx, unused_lanes
194 mov unused_lanes, _unused_lanes(state)
195
196 movl $0xFFFFFFFF, _lens+4(state, idx, 8)
197
198 vmovq _args_digest+0*32(state, idx, 8), %xmm0
199 vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
200 vmovq _args_digest+2*32(state, idx, 8), %xmm1
201 vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
202 vmovq _args_digest+4*32(state, idx, 8), %xmm2
203 vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
204 vmovq _args_digest+6*32(state, idx, 8), %xmm3
205 vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
206
207 vmovdqu %xmm0, _result_digest(job_rax)
208 vmovdqu %xmm1, _result_digest+1*16(job_rax)
209 vmovdqu %xmm2, _result_digest+2*16(job_rax)
210 vmovdqu %xmm3, _result_digest+3*16(job_rax)
211
212return:
213 pop %rbx
214 FRAME_END
215 ret
216
217return_null:
218 xor job_rax, job_rax
219 jmp return
220ENDPROC(sha512_mb_mgr_flush_avx2)
221.align 16
222
223ENTRY(sha512_mb_mgr_get_comp_job_avx2)
224 push %rbx
225
226 mov _unused_lanes(state), unused_lanes
227 bt $(32+7), unused_lanes
228 jc .return_null
229
230 # Find min length
231 mov _lens(state),lens0
232 mov lens0,idx
233 mov _lens+1*8(state),lens1
234 cmp idx,lens1
235 cmovb lens1,idx
236 mov _lens+2*8(state),lens2
237 cmp idx,lens2
238 cmovb lens2,idx
239 mov _lens+3*8(state),lens3
240 cmp idx,lens3
241 cmovb lens3,idx
242 test $~0xF,idx
243 jnz .return_null
244 and $0xF,idx
245
246 #process completed job "idx"
247 imul $_LANE_DATA_size, idx, lane_data
248 lea _ldata(state, lane_data), lane_data
249
250 mov _job_in_lane(lane_data), job_rax
251 movq $0, _job_in_lane(lane_data)
252 movl $STS_COMPLETED, _status(job_rax)
253 mov _unused_lanes(state), unused_lanes
254 shl $8, unused_lanes
255 or idx, unused_lanes
256 mov unused_lanes, _unused_lanes(state)
257
258 movl $0xFFFFFFFF, _lens+4(state, idx, 8)
259
260 vmovq _args_digest(state, idx, 8), %xmm0
261 vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
262 vmovq _args_digest+2*32(state, idx, 8), %xmm1
263 vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
264 vmovq _args_digest+4*32(state, idx, 8), %xmm2
265 vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
266 vmovq _args_digest+6*32(state, idx, 8), %xmm3
267 vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
268
269 vmovdqu %xmm0, _result_digest+0*16(job_rax)
270 vmovdqu %xmm1, _result_digest+1*16(job_rax)
271 vmovdqu %xmm2, _result_digest+2*16(job_rax)
272 vmovdqu %xmm3, _result_digest+3*16(job_rax)
273
274 pop %rbx
275
276 ret
277
278.return_null:
279 xor job_rax, job_rax
280 pop %rbx
281 ret
282ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
283.data
284
285.align 16
286one:
287.quad 1
288two:
289.quad 2
290three:
291.quad 3
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
new file mode 100644
index 000000000000..36870b26067a
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
@@ -0,0 +1,67 @@
1/*
2 * Initialization code for multi buffer SHA256 algorithm for AVX2
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include "sha512_mb_mgr.h"
55
56void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
57{
58 unsigned int j;
59
60 state->lens[0] = 0;
61 state->lens[1] = 1;
62 state->lens[2] = 2;
63 state->lens[3] = 3;
64 state->unused_lanes = 0xFF03020100;
65 for (j = 0; j < 4; j++)
66 state->ldata[j].job_in_lane = NULL;
67}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
new file mode 100644
index 000000000000..815f07bdd1f8
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
@@ -0,0 +1,222 @@
1/*
2 * Buffer submit code for multi buffer SHA512 algorithm
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <linux/linkage.h>
55#include <asm/frame.h>
56#include "sha512_mb_mgr_datastruct.S"
57
58.extern sha512_x4_avx2
59
60#define arg1 %rdi
61#define arg2 %rsi
62
63#define idx %rdx
64#define last_len %rdx
65
66#define size_offset %rcx
67#define tmp2 %rcx
68
69# Common definitions
70#define state arg1
71#define job arg2
72#define len2 arg2
73#define p2 arg2
74
75#define p %r11
76#define start_offset %r11
77
78#define unused_lanes %rbx
79
80#define job_rax %rax
81#define len %rax
82
83#define lane %r12
84#define tmp3 %r12
85#define lens3 %r12
86
87#define extra_blocks %r8
88#define lens0 %r8
89
90#define tmp %r9
91#define lens1 %r9
92
93#define lane_data %r10
94#define lens2 %r10
95
96#define DWORD_len %eax
97
98# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
99# arg 1 : rcx : state
100# arg 2 : rdx : job
101ENTRY(sha512_mb_mgr_submit_avx2)
102 FRAME_BEGIN
103 push %rbx
104 push %r12
105
106 mov _unused_lanes(state), unused_lanes
107 movzb %bl,lane
108 shr $8, unused_lanes
109 imul $_LANE_DATA_size, lane,lane_data
110 movl $STS_BEING_PROCESSED, _status(job)
111 lea _ldata(state, lane_data), lane_data
112 mov unused_lanes, _unused_lanes(state)
113 movl _len(job), DWORD_len
114
115 mov job, _job_in_lane(lane_data)
116 movl DWORD_len,_lens+4(state , lane, 8)
117
118 # Load digest words from result_digest
119 vmovdqu _result_digest+0*16(job), %xmm0
120 vmovdqu _result_digest+1*16(job), %xmm1
121 vmovdqu _result_digest+2*16(job), %xmm2
122 vmovdqu _result_digest+3*16(job), %xmm3
123
124 vmovq %xmm0, _args_digest(state, lane, 8)
125 vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8)
126 vmovq %xmm1, _args_digest+2*32(state , lane, 8)
127 vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8)
128 vmovq %xmm2, _args_digest+4*32(state , lane, 8)
129 vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8)
130 vmovq %xmm3, _args_digest+6*32(state , lane, 8)
131 vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8)
132
133 mov _buffer(job), p
134 mov p, _args_data_ptr(state, lane, 8)
135
136 cmp $0xFF, unused_lanes
137 jne return_null
138
139start_loop:
140
141 # Find min length
142 mov _lens+0*8(state),lens0
143 mov lens0,idx
144 mov _lens+1*8(state),lens1
145 cmp idx,lens1
146 cmovb lens1, idx
147 mov _lens+2*8(state),lens2
148 cmp idx,lens2
149 cmovb lens2,idx
150 mov _lens+3*8(state),lens3
151 cmp idx,lens3
152 cmovb lens3,idx
153 mov idx,len2
154 and $0xF,idx
155 and $~0xFF,len2
156 jz len_is_0
157
158 sub len2,lens0
159 sub len2,lens1
160 sub len2,lens2
161 sub len2,lens3
162 shr $32,len2
163 mov lens0, _lens + 0*8(state)
164 mov lens1, _lens + 1*8(state)
165 mov lens2, _lens + 2*8(state)
166 mov lens3, _lens + 3*8(state)
167
168 # "state" and "args" are the same address, arg1
169 # len is arg2
170 call sha512_x4_avx2
171 # state and idx are intact
172
173len_is_0:
174
175 # process completed job "idx"
176 imul $_LANE_DATA_size, idx, lane_data
177 lea _ldata(state, lane_data), lane_data
178
179 mov _job_in_lane(lane_data), job_rax
180 mov _unused_lanes(state), unused_lanes
181 movq $0, _job_in_lane(lane_data)
182 movl $STS_COMPLETED, _status(job_rax)
183 shl $8, unused_lanes
184 or idx, unused_lanes
185 mov unused_lanes, _unused_lanes(state)
186
187 movl $0xFFFFFFFF,_lens+4(state,idx,8)
188 vmovq _args_digest+0*32(state , idx, 8), %xmm0
189 vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0
190 vmovq _args_digest+2*32(state , idx, 8), %xmm1
191 vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1
192 vmovq _args_digest+4*32(state , idx, 8), %xmm2
193 vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2
194 vmovq _args_digest+6*32(state , idx, 8), %xmm3
195 vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3
196
197 vmovdqu %xmm0, _result_digest + 0*16(job_rax)
198 vmovdqu %xmm1, _result_digest + 1*16(job_rax)
199 vmovdqu %xmm2, _result_digest + 2*16(job_rax)
200 vmovdqu %xmm3, _result_digest + 3*16(job_rax)
201
202return:
203 pop %r12
204 pop %rbx
205 FRAME_END
206 ret
207
208return_null:
209 xor job_rax, job_rax
210 jmp return
211ENDPROC(sha512_mb_mgr_submit_avx2)
212.data
213
214.align 16
215H0: .int 0x6a09e667
216H1: .int 0xbb67ae85
217H2: .int 0x3c6ef372
218H3: .int 0xa54ff53a
219H4: .int 0x510e527f
220H5: .int 0x9b05688c
221H6: .int 0x1f83d9ab
222H7: .int 0x5be0cd19
diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
new file mode 100644
index 000000000000..31ab1eff6413
--- /dev/null
+++ b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
@@ -0,0 +1,529 @@
1/*
2 * Multi-buffer SHA512 algorithm hash compute routine
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Intel Corporation.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * Contact Information:
21 * Megha Dey <megha.dey@linux.intel.com>
22 *
23 * BSD LICENSE
24 *
25 * Copyright(c) 2016 Intel Corporation.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 *
31 * * Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * * Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in
35 * the documentation and/or other materials provided with the
36 * distribution.
37 * * Neither the name of Intel Corporation nor the names of its
38 * contributors may be used to endorse or promote products derived
39 * from this software without specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
45 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
47 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54# code to compute quad SHA512 using AVX2
55# use YMMs to tackle the larger digest size
56# outer calling routine takes care of save and restore of XMM registers
57# Logic designed/laid out by JDG
58
59# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
60# Stack must be aligned to 32 bytes before call
61# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12
62# Linux preserves: rcx rdx rdi rbp r13 r14 r15
63# clobbers ymm0-15
64
65#include <linux/linkage.h>
66#include "sha512_mb_mgr_datastruct.S"
67
68arg1 = %rdi
69arg2 = %rsi
70
71# Common definitions
72STATE = arg1
73INP_SIZE = arg2
74
75IDX = %rax
76ROUND = %rbx
77TBL = %r8
78
79inp0 = %r9
80inp1 = %r10
81inp2 = %r11
82inp3 = %r12
83
84a = %ymm0
85b = %ymm1
86c = %ymm2
87d = %ymm3
88e = %ymm4
89f = %ymm5
90g = %ymm6
91h = %ymm7
92
93a0 = %ymm8
94a1 = %ymm9
95a2 = %ymm10
96
97TT0 = %ymm14
98TT1 = %ymm13
99TT2 = %ymm12
100TT3 = %ymm11
101TT4 = %ymm10
102TT5 = %ymm9
103
104T1 = %ymm14
105TMP = %ymm15
106
107# Define stack usage
108STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24
109
110#define VMOVPD vmovupd
111_digest = SZ4*16
112
113# transpose r0, r1, r2, r3, t0, t1
114# "transpose" data in {r0..r3} using temps {t0..t3}
115# Input looks like: {r0 r1 r2 r3}
116# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
117# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
118# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
119# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
120#
121# output looks like: {t0 r1 r0 r3}
122# t0 = {d1 d0 c1 c0 b1 b0 a1 a0}
123# r1 = {d3 d2 c3 c2 b3 b2 a3 a2}
124# r0 = {d5 d4 c5 c4 b5 b4 a5 a4}
125# r3 = {d7 d6 c7 c6 b7 b6 a7 a6}
126
127.macro TRANSPOSE r0 r1 r2 r3 t0 t1
128 vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
129 vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
130 vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
131 vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
132
133 vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6
134 vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2
135 vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5
136 vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1
137.endm
138
139.macro ROTATE_ARGS
140TMP_ = h
141h = g
142g = f
143f = e
144e = d
145d = c
146c = b
147b = a
148a = TMP_
149.endm
150
151# PRORQ reg, imm, tmp
152# packed-rotate-right-double
153# does a rotate by doing two shifts and an or
154.macro _PRORQ reg imm tmp
155 vpsllq $(64-\imm),\reg,\tmp
156 vpsrlq $\imm,\reg, \reg
157 vpor \tmp,\reg, \reg
158.endm
159
160# non-destructive
161# PRORQ_nd reg, imm, tmp, src
162.macro _PRORQ_nd reg imm tmp src
163 vpsllq $(64-\imm), \src, \tmp
164 vpsrlq $\imm, \src, \reg
165 vpor \tmp, \reg, \reg
166.endm
167
168# PRORQ dst/src, amt
169.macro PRORQ reg imm
170 _PRORQ \reg, \imm, TMP
171.endm
172
173# PRORQ_nd dst, src, amt
174.macro PRORQ_nd reg tmp imm
175 _PRORQ_nd \reg, \imm, TMP, \tmp
176.endm
177
178#; arguments passed implicitly in preprocessor symbols i, a...h
179.macro ROUND_00_15 _T1 i
180 PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4)
181
182 vpxor g, f, a2 # ch: a2 = f^g
183 vpand e,a2, a2 # ch: a2 = (f^g)&e
184 vpxor g, a2, a2 # a2 = ch
185
186 PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25)
187
188 offset = SZ4*(\i & 0xf)
189 vmovdqu \_T1,offset(%rsp)
190 vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
191 vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
192 PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11)
193 vpaddq a2, h, h # h = h + ch
194 PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11)
195 vpaddq \_T1,h, h # h = h + ch + W + K
196 vpxor a1, a0, a0 # a0 = sigma1
197 vmovdqu a,\_T1
198 PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22)
199 vpxor c, \_T1, \_T1 # maj: T1 = a^c
200 add $SZ4, ROUND # ROUND++
201 vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
202 vpaddq a0, h, h
203 vpaddq h, d, d
204 vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
205 PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13)
206 vpxor a1, a2, a2 # a2 = sig0
207 vpand c, a, a1 # maj: a1 = a&c
208 vpor \_T1, a1, a1 # a1 = maj
209 vpaddq a1, h, h # h = h + ch + W + K + maj
210 vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0
211 ROTATE_ARGS
212.endm
213
214
215#; arguments passed implicitly in preprocessor symbols i, a...h
216.macro ROUND_16_XX _T1 i
217 vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1
218 vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1
219 vmovdqu \_T1, a0
220 PRORQ \_T1,7
221 vmovdqu a1, a2
222 PRORQ a1,42
223 vpxor a0, \_T1, \_T1
224 PRORQ \_T1, 1
225 vpxor a2, a1, a1
226 PRORQ a1, 19
227 vpsrlq $7, a0, a0
228 vpxor a0, \_T1, \_T1
229 vpsrlq $6, a2, a2
230 vpxor a2, a1, a1
231 vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1
232 vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1
233 vpaddq a1, \_T1, \_T1
234
235 ROUND_00_15 \_T1,\i
236.endm
237
238
239# void sha512_x4_avx2(void *STATE, const int INP_SIZE)
240# arg 1 : STATE : pointer to input data
241# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
242ENTRY(sha512_x4_avx2)
243 # general registers preserved in outer calling routine
244 # outer calling routine saves all the XMM registers
245 # save callee-saved clobbered registers to comply with C function ABI
246 push %r12
247 push %r13
248 push %r14
249 push %r15
250
251 sub $STACK_SPACE1, %rsp
252
253 # Load the pre-transposed incoming digest.
254 vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a
255 vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b
256 vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c
257 vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d
258 vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e
259 vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f
260 vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g
261 vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h
262
263 lea K512_4(%rip),TBL
264
265 # load the address of each of the 4 message lanes
266 # getting ready to transpose input onto stack
267 mov _data_ptr+0*PTR_SZ(STATE),inp0
268 mov _data_ptr+1*PTR_SZ(STATE),inp1
269 mov _data_ptr+2*PTR_SZ(STATE),inp2
270 mov _data_ptr+3*PTR_SZ(STATE),inp3
271
272 xor IDX, IDX
273lloop:
274 xor ROUND, ROUND
275
276 # save old digest
277 vmovdqu a, _digest(%rsp)
278 vmovdqu b, _digest+1*SZ4(%rsp)
279 vmovdqu c, _digest+2*SZ4(%rsp)
280 vmovdqu d, _digest+3*SZ4(%rsp)
281 vmovdqu e, _digest+4*SZ4(%rsp)
282 vmovdqu f, _digest+5*SZ4(%rsp)
283 vmovdqu g, _digest+6*SZ4(%rsp)
284 vmovdqu h, _digest+7*SZ4(%rsp)
285 i = 0
286.rep 4
287 vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP
288 VMOVPD i*32(inp0, IDX), TT2
289 VMOVPD i*32(inp1, IDX), TT1
290 VMOVPD i*32(inp2, IDX), TT4
291 VMOVPD i*32(inp3, IDX), TT3
292 TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
293 vpshufb TMP, TT0, TT0
294 vpshufb TMP, TT1, TT1
295 vpshufb TMP, TT2, TT2
296 vpshufb TMP, TT3, TT3
297 ROUND_00_15 TT0,(i*4+0)
298 ROUND_00_15 TT1,(i*4+1)
299 ROUND_00_15 TT2,(i*4+2)
300 ROUND_00_15 TT3,(i*4+3)
301 i = (i+1)
302.endr
303 add $128, IDX
304
305 i = (i*4)
306
307 jmp Lrounds_16_xx
308.align 16
309Lrounds_16_xx:
310.rep 16
311 ROUND_16_XX T1, i
312 i = (i+1)
313.endr
314 cmp $0xa00,ROUND
315 jb Lrounds_16_xx
316
317 # add old digest
318 vpaddq _digest(%rsp), a, a
319 vpaddq _digest+1*SZ4(%rsp), b, b
320 vpaddq _digest+2*SZ4(%rsp), c, c
321 vpaddq _digest+3*SZ4(%rsp), d, d
322 vpaddq _digest+4*SZ4(%rsp), e, e
323 vpaddq _digest+5*SZ4(%rsp), f, f
324 vpaddq _digest+6*SZ4(%rsp), g, g
325 vpaddq _digest+7*SZ4(%rsp), h, h
326
327 sub $1, INP_SIZE # unit is blocks
328 jne lloop
329
330 # write back to memory (state object) the transposed digest
331 vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE)
332 vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE)
333 vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE)
334 vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE)
335 vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE)
336 vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE)
337 vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE)
338 vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE)
339
340 # update input data pointers
341 add IDX, inp0
342 mov inp0, _data_ptr+0*PTR_SZ(STATE)
343 add IDX, inp1
344 mov inp1, _data_ptr+1*PTR_SZ(STATE)
345 add IDX, inp2
346 mov inp2, _data_ptr+2*PTR_SZ(STATE)
347 add IDX, inp3
348 mov inp3, _data_ptr+3*PTR_SZ(STATE)
349
350 #;;;;;;;;;;;;;;;
351 #; Postamble
352 add $STACK_SPACE1, %rsp
353 # restore callee-saved clobbered registers
354
355 pop %r15
356 pop %r14
357 pop %r13
358 pop %r12
359
360 # outer calling routine restores XMM and other GP registers
361 ret
362ENDPROC(sha512_x4_avx2)
363
364.data
365.align 64
366K512_4:
367 .octa 0x428a2f98d728ae22428a2f98d728ae22,\
368 0x428a2f98d728ae22428a2f98d728ae22
369 .octa 0x7137449123ef65cd7137449123ef65cd,\
370 0x7137449123ef65cd7137449123ef65cd
371 .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\
372 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f
373 .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\
374 0xe9b5dba58189dbbce9b5dba58189dbbc
375 .octa 0x3956c25bf348b5383956c25bf348b538,\
376 0x3956c25bf348b5383956c25bf348b538
377 .octa 0x59f111f1b605d01959f111f1b605d019,\
378 0x59f111f1b605d01959f111f1b605d019
379 .octa 0x923f82a4af194f9b923f82a4af194f9b,\
380 0x923f82a4af194f9b923f82a4af194f9b
381 .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\
382 0xab1c5ed5da6d8118ab1c5ed5da6d8118
383 .octa 0xd807aa98a3030242d807aa98a3030242,\
384 0xd807aa98a3030242d807aa98a3030242
385 .octa 0x12835b0145706fbe12835b0145706fbe,\
386 0x12835b0145706fbe12835b0145706fbe
387 .octa 0x243185be4ee4b28c243185be4ee4b28c,\
388 0x243185be4ee4b28c243185be4ee4b28c
389 .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\
390 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2
391 .octa 0x72be5d74f27b896f72be5d74f27b896f,\
392 0x72be5d74f27b896f72be5d74f27b896f
393 .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\
394 0x80deb1fe3b1696b180deb1fe3b1696b1
395 .octa 0x9bdc06a725c712359bdc06a725c71235,\
396 0x9bdc06a725c712359bdc06a725c71235
397 .octa 0xc19bf174cf692694c19bf174cf692694,\
398 0xc19bf174cf692694c19bf174cf692694
399 .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\
400 0xe49b69c19ef14ad2e49b69c19ef14ad2
401 .octa 0xefbe4786384f25e3efbe4786384f25e3,\
402 0xefbe4786384f25e3efbe4786384f25e3
403 .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\
404 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5
405 .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\
406 0x240ca1cc77ac9c65240ca1cc77ac9c65
407 .octa 0x2de92c6f592b02752de92c6f592b0275,\
408 0x2de92c6f592b02752de92c6f592b0275
409 .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\
410 0x4a7484aa6ea6e4834a7484aa6ea6e483
411 .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\
412 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4
413 .octa 0x76f988da831153b576f988da831153b5,\
414 0x76f988da831153b576f988da831153b5
415 .octa 0x983e5152ee66dfab983e5152ee66dfab,\
416 0x983e5152ee66dfab983e5152ee66dfab
417 .octa 0xa831c66d2db43210a831c66d2db43210,\
418 0xa831c66d2db43210a831c66d2db43210
419 .octa 0xb00327c898fb213fb00327c898fb213f,\
420 0xb00327c898fb213fb00327c898fb213f
421 .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\
422 0xbf597fc7beef0ee4bf597fc7beef0ee4
423 .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\
424 0xc6e00bf33da88fc2c6e00bf33da88fc2
425 .octa 0xd5a79147930aa725d5a79147930aa725,\
426 0xd5a79147930aa725d5a79147930aa725
427 .octa 0x06ca6351e003826f06ca6351e003826f,\
428 0x06ca6351e003826f06ca6351e003826f
429 .octa 0x142929670a0e6e70142929670a0e6e70,\
430 0x142929670a0e6e70142929670a0e6e70
431 .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\
432 0x27b70a8546d22ffc27b70a8546d22ffc
433 .octa 0x2e1b21385c26c9262e1b21385c26c926,\
434 0x2e1b21385c26c9262e1b21385c26c926
435 .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\
436 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed
437 .octa 0x53380d139d95b3df53380d139d95b3df,\
438 0x53380d139d95b3df53380d139d95b3df
439 .octa 0x650a73548baf63de650a73548baf63de,\
440 0x650a73548baf63de650a73548baf63de
441 .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\
442 0x766a0abb3c77b2a8766a0abb3c77b2a8
443 .octa 0x81c2c92e47edaee681c2c92e47edaee6,\
444 0x81c2c92e47edaee681c2c92e47edaee6
445 .octa 0x92722c851482353b92722c851482353b,\
446 0x92722c851482353b92722c851482353b
447 .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\
448 0xa2bfe8a14cf10364a2bfe8a14cf10364
449 .octa 0xa81a664bbc423001a81a664bbc423001,\
450 0xa81a664bbc423001a81a664bbc423001
451 .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\
452 0xc24b8b70d0f89791c24b8b70d0f89791
453 .octa 0xc76c51a30654be30c76c51a30654be30,\
454 0xc76c51a30654be30c76c51a30654be30
455 .octa 0xd192e819d6ef5218d192e819d6ef5218,\
456 0xd192e819d6ef5218d192e819d6ef5218
457 .octa 0xd69906245565a910d69906245565a910,\
458 0xd69906245565a910d69906245565a910
459 .octa 0xf40e35855771202af40e35855771202a,\
460 0xf40e35855771202af40e35855771202a
461 .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\
462 0x106aa07032bbd1b8106aa07032bbd1b8
463 .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\
464 0x19a4c116b8d2d0c819a4c116b8d2d0c8
465 .octa 0x1e376c085141ab531e376c085141ab53,\
466 0x1e376c085141ab531e376c085141ab53
467 .octa 0x2748774cdf8eeb992748774cdf8eeb99,\
468 0x2748774cdf8eeb992748774cdf8eeb99
469 .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\
470 0x34b0bcb5e19b48a834b0bcb5e19b48a8
471 .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\
472 0x391c0cb3c5c95a63391c0cb3c5c95a63
473 .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\
474 0x4ed8aa4ae3418acb4ed8aa4ae3418acb
475 .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\
476 0x5b9cca4f7763e3735b9cca4f7763e373
477 .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\
478 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3
479 .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\
480 0x748f82ee5defb2fc748f82ee5defb2fc
481 .octa 0x78a5636f43172f6078a5636f43172f60,\
482 0x78a5636f43172f6078a5636f43172f60
483 .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\
484 0x84c87814a1f0ab7284c87814a1f0ab72
485 .octa 0x8cc702081a6439ec8cc702081a6439ec,\
486 0x8cc702081a6439ec8cc702081a6439ec
487 .octa 0x90befffa23631e2890befffa23631e28,\
488 0x90befffa23631e2890befffa23631e28
489 .octa 0xa4506cebde82bde9a4506cebde82bde9,\
490 0xa4506cebde82bde9a4506cebde82bde9
491 .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\
492 0xbef9a3f7b2c67915bef9a3f7b2c67915
493 .octa 0xc67178f2e372532bc67178f2e372532b,\
494 0xc67178f2e372532bc67178f2e372532b
495 .octa 0xca273eceea26619cca273eceea26619c,\
496 0xca273eceea26619cca273eceea26619c
497 .octa 0xd186b8c721c0c207d186b8c721c0c207,\
498 0xd186b8c721c0c207d186b8c721c0c207
499 .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\
500 0xeada7dd6cde0eb1eeada7dd6cde0eb1e
501 .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\
502 0xf57d4f7fee6ed178f57d4f7fee6ed178
503 .octa 0x06f067aa72176fba06f067aa72176fba,\
504 0x06f067aa72176fba06f067aa72176fba
505 .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\
506 0x0a637dc5a2c898a60a637dc5a2c898a6
507 .octa 0x113f9804bef90dae113f9804bef90dae,\
508 0x113f9804bef90dae113f9804bef90dae
509 .octa 0x1b710b35131c471b1b710b35131c471b,\
510 0x1b710b35131c471b1b710b35131c471b
511 .octa 0x28db77f523047d8428db77f523047d84,\
512 0x28db77f523047d8428db77f523047d84
513 .octa 0x32caab7b40c7249332caab7b40c72493,\
514 0x32caab7b40c7249332caab7b40c72493
515 .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\
516 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc
517 .octa 0x431d67c49c100d4c431d67c49c100d4c,\
518 0x431d67c49c100d4c431d67c49c100d4c
519 .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\
520 0x4cc5d4becb3e42b64cc5d4becb3e42b6
521 .octa 0x597f299cfc657e2a597f299cfc657e2a,\
522 0x597f299cfc657e2a597f299cfc657e2a
523 .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\
524 0x5fcb6fab3ad6faec5fcb6fab3ad6faec
525 .octa 0x6c44198c4a4758176c44198c4a475817,\
526 0x6c44198c4a4758176c44198c4a475817
527
528PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
529 .octa 0x18191a1b1c1d1e1f1011121314151617
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 0b17c83d027d..2b0e2a6825f3 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -346,4 +346,10 @@ MODULE_LICENSE("GPL");
346MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 346MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
347 347
348MODULE_ALIAS_CRYPTO("sha512"); 348MODULE_ALIAS_CRYPTO("sha512");
349MODULE_ALIAS_CRYPTO("sha512-ssse3");
350MODULE_ALIAS_CRYPTO("sha512-avx");
351MODULE_ALIAS_CRYPTO("sha512-avx2");
349MODULE_ALIAS_CRYPTO("sha384"); 352MODULE_ALIAS_CRYPTO("sha384");
353MODULE_ALIAS_CRYPTO("sha384-ssse3");
354MODULE_ALIAS_CRYPTO("sha384-avx");
355MODULE_ALIAS_CRYPTO("sha384-avx2");
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 1d33beb6a1ae..a9377bef25e3 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -93,6 +93,15 @@ config CRYPTO_AKCIPHER
93 select CRYPTO_AKCIPHER2 93 select CRYPTO_AKCIPHER2
94 select CRYPTO_ALGAPI 94 select CRYPTO_ALGAPI
95 95
96config CRYPTO_KPP2
97 tristate
98 select CRYPTO_ALGAPI2
99
100config CRYPTO_KPP
101 tristate
102 select CRYPTO_ALGAPI
103 select CRYPTO_KPP2
104
96config CRYPTO_RSA 105config CRYPTO_RSA
97 tristate "RSA algorithm" 106 tristate "RSA algorithm"
98 select CRYPTO_AKCIPHER 107 select CRYPTO_AKCIPHER
@@ -102,6 +111,19 @@ config CRYPTO_RSA
102 help 111 help
103 Generic implementation of the RSA public key algorithm. 112 Generic implementation of the RSA public key algorithm.
104 113
114config CRYPTO_DH
115 tristate "Diffie-Hellman algorithm"
116 select CRYPTO_KPP
117 select MPILIB
118 help
119 Generic implementation of the Diffie-Hellman algorithm.
120
121config CRYPTO_ECDH
122 tristate "ECDH algorithm"
123 select CRYTPO_KPP
124 help
125 Generic implementation of the ECDH algorithm
126
105config CRYPTO_MANAGER 127config CRYPTO_MANAGER
106 tristate "Cryptographic algorithm manager" 128 tristate "Cryptographic algorithm manager"
107 select CRYPTO_MANAGER2 129 select CRYPTO_MANAGER2
@@ -115,6 +137,7 @@ config CRYPTO_MANAGER2
115 select CRYPTO_HASH2 137 select CRYPTO_HASH2
116 select CRYPTO_BLKCIPHER2 138 select CRYPTO_BLKCIPHER2
117 select CRYPTO_AKCIPHER2 139 select CRYPTO_AKCIPHER2
140 select CRYPTO_KPP2
118 141
119config CRYPTO_USER 142config CRYPTO_USER
120 tristate "Userspace cryptographic algorithm configuration" 143 tristate "Userspace cryptographic algorithm configuration"
@@ -414,6 +437,17 @@ config CRYPTO_CRC32C_INTEL
414 gain performance compared with software implementation. 437 gain performance compared with software implementation.
415 Module will be crc32c-intel. 438 Module will be crc32c-intel.
416 439
440config CRYPT_CRC32C_VPMSUM
441 tristate "CRC32c CRC algorithm (powerpc64)"
442 depends on PPC64
443 select CRYPTO_HASH
444 select CRC32
445 help
446 CRC32c algorithm implemented using vector polynomial multiply-sum
447 (vpmsum) instructions, introduced in POWER8. Enable on POWER8
448 and newer processors for improved performance.
449
450
417config CRYPTO_CRC32C_SPARC64 451config CRYPTO_CRC32C_SPARC64
418 tristate "CRC32c CRC algorithm (SPARC64)" 452 tristate "CRC32c CRC algorithm (SPARC64)"
419 depends on SPARC64 453 depends on SPARC64
@@ -681,6 +715,38 @@ config CRYPTO_SHA1_MB
681 lanes remain unfilled, a flush operation will be initiated to 715 lanes remain unfilled, a flush operation will be initiated to
682 process the crypto jobs, adding a slight latency. 716 process the crypto jobs, adding a slight latency.
683 717
718config CRYPTO_SHA256_MB
719 tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
720 depends on X86 && 64BIT
721 select CRYPTO_SHA256
722 select CRYPTO_HASH
723 select CRYPTO_MCRYPTD
724 help
725 SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
726 using multi-buffer technique. This algorithm computes on
727 multiple data lanes concurrently with SIMD instructions for
728 better throughput. It should not be enabled by default but
729 used when there is significant amount of work to keep the keep
730 the data lanes filled to get performance benefit. If the data
731 lanes remain unfilled, a flush operation will be initiated to
732 process the crypto jobs, adding a slight latency.
733
734config CRYPTO_SHA512_MB
735 tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
736 depends on X86 && 64BIT
737 select CRYPTO_SHA512
738 select CRYPTO_HASH
739 select CRYPTO_MCRYPTD
740 help
741 SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
742 using multi-buffer technique. This algorithm computes on
743 multiple data lanes concurrently with SIMD instructions for
744 better throughput. It should not be enabled by default but
745 used when there is significant amount of work to keep the keep
746 the data lanes filled to get performance benefit. If the data
747 lanes remain unfilled, a flush operation will be initiated to
748 process the crypto jobs, adding a slight latency.
749
684config CRYPTO_SHA256 750config CRYPTO_SHA256
685 tristate "SHA224 and SHA256 digest algorithm" 751 tristate "SHA224 and SHA256 digest algorithm"
686 select CRYPTO_HASH 752 select CRYPTO_HASH
@@ -750,6 +816,16 @@ config CRYPTO_SHA512_SPARC64
750 SHA-512 secure hash standard (DFIPS 180-2) implemented 816 SHA-512 secure hash standard (DFIPS 180-2) implemented
751 using sparc64 crypto instructions, when available. 817 using sparc64 crypto instructions, when available.
752 818
819config CRYPTO_SHA3
820 tristate "SHA3 digest algorithm"
821 select CRYPTO_HASH
822 help
823 SHA-3 secure hash standard (DFIPS 202). It's based on
824 cryptographic sponge function family called Keccak.
825
826 References:
827 http://keccak.noekeon.org/
828
753config CRYPTO_TGR192 829config CRYPTO_TGR192
754 tristate "Tiger digest algorithms" 830 tristate "Tiger digest algorithms"
755 select CRYPTO_HASH 831 select CRYPTO_HASH
@@ -1567,6 +1643,7 @@ config CRYPTO_DRBG_HASH
1567config CRYPTO_DRBG_CTR 1643config CRYPTO_DRBG_CTR
1568 bool "Enable CTR DRBG" 1644 bool "Enable CTR DRBG"
1569 select CRYPTO_AES 1645 select CRYPTO_AES
1646 depends on CRYPTO_CTR
1570 help 1647 help
1571 Enable the CTR DRBG variant as defined in NIST SP800-90A. 1648 Enable the CTR DRBG variant as defined in NIST SP800-90A.
1572 1649
diff --git a/crypto/Makefile b/crypto/Makefile
index 4f4ef7eaae3f..99cc64ac70ef 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -20,8 +20,6 @@ crypto_blkcipher-y := ablkcipher.o
20crypto_blkcipher-y += blkcipher.o 20crypto_blkcipher-y += blkcipher.o
21crypto_blkcipher-y += skcipher.o 21crypto_blkcipher-y += skcipher.o
22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o 22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
23obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
24obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
25obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o 23obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
26obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o 24obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o
27 25
@@ -30,6 +28,15 @@ crypto_hash-y += shash.o
30obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o 28obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
31 29
32obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o 30obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
31obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
32
33dh_generic-y := dh.o
34dh_generic-y += dh_helper.o
35obj-$(CONFIG_CRYPTO_DH) += dh_generic.o
36ecdh_generic-y := ecc.o
37ecdh_generic-y += ecdh.o
38ecdh_generic-y += ecdh_helper.o
39obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
33 40
34$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h 41$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
35$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h 42$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
@@ -61,6 +68,7 @@ obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
61obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o 68obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
62obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o 69obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
63obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o 70obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
71obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
64obj-$(CONFIG_CRYPTO_WP512) += wp512.o 72obj-$(CONFIG_CRYPTO_WP512) += wp512.o
65obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o 73obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
66obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o 74obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
index e1fcf53bb931..1441f07d0a19 100644
--- a/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -71,7 +71,8 @@ int ablk_encrypt(struct ablkcipher_request *req)
71 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 71 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
72 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 72 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
73 73
74 if (!may_use_simd()) { 74 if (!may_use_simd() ||
75 (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
75 struct ablkcipher_request *cryptd_req = 76 struct ablkcipher_request *cryptd_req =
76 ablkcipher_request_ctx(req); 77 ablkcipher_request_ctx(req);
77 78
@@ -90,7 +91,8 @@ int ablk_decrypt(struct ablkcipher_request *req)
90 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 91 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
91 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 92 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
92 93
93 if (!may_use_simd()) { 94 if (!may_use_simd() ||
95 (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
94 struct ablkcipher_request *cryptd_req = 96 struct ablkcipher_request *cryptd_req =
95 ablkcipher_request_ctx(req); 97 ablkcipher_request_ctx(req);
96 98
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e5b5721809e2..d676fc59521a 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -14,11 +14,8 @@
14 */ 14 */
15 15
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <linux/cpumask.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
20#include <linux/rtnetlink.h>
21#include <linux/sched.h>
22#include <linux/slab.h> 19#include <linux/slab.h>
23#include <linux/seq_file.h> 20#include <linux/seq_file.h>
24#include <linux/cryptouser.h> 21#include <linux/cryptouser.h>
@@ -349,16 +346,6 @@ static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
349 return alg->cra_ctxsize; 346 return alg->cra_ctxsize;
350} 347}
351 348
352int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
353{
354 return crypto_ablkcipher_encrypt(&req->creq);
355}
356
357int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
358{
359 return crypto_ablkcipher_decrypt(&req->creq);
360}
361
362static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, 349static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
363 u32 mask) 350 u32 mask)
364{ 351{
@@ -371,10 +358,6 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
371 crt->setkey = setkey; 358 crt->setkey = setkey;
372 crt->encrypt = alg->encrypt; 359 crt->encrypt = alg->encrypt;
373 crt->decrypt = alg->decrypt; 360 crt->decrypt = alg->decrypt;
374 if (!alg->ivsize) {
375 crt->givencrypt = skcipher_null_givencrypt;
376 crt->givdecrypt = skcipher_null_givdecrypt;
377 }
378 crt->base = __crypto_ablkcipher_cast(tfm); 361 crt->base = __crypto_ablkcipher_cast(tfm);
379 crt->ivsize = alg->ivsize; 362 crt->ivsize = alg->ivsize;
380 363
@@ -436,11 +419,6 @@ const struct crypto_type crypto_ablkcipher_type = {
436}; 419};
437EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); 420EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
438 421
439static int no_givdecrypt(struct skcipher_givcrypt_request *req)
440{
441 return -ENOSYS;
442}
443
444static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, 422static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
445 u32 mask) 423 u32 mask)
446{ 424{
@@ -454,8 +432,6 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
454 alg->setkey : setkey; 432 alg->setkey : setkey;
455 crt->encrypt = alg->encrypt; 433 crt->encrypt = alg->encrypt;
456 crt->decrypt = alg->decrypt; 434 crt->decrypt = alg->decrypt;
457 crt->givencrypt = alg->givencrypt ?: no_givdecrypt;
458 crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
459 crt->base = __crypto_ablkcipher_cast(tfm); 435 crt->base = __crypto_ablkcipher_cast(tfm);
460 crt->ivsize = alg->ivsize; 436 crt->ivsize = alg->ivsize;
461 437
@@ -516,202 +492,3 @@ const struct crypto_type crypto_givcipher_type = {
516 .report = crypto_givcipher_report, 492 .report = crypto_givcipher_report,
517}; 493};
518EXPORT_SYMBOL_GPL(crypto_givcipher_type); 494EXPORT_SYMBOL_GPL(crypto_givcipher_type);
519
520const char *crypto_default_geniv(const struct crypto_alg *alg)
521{
522 if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
523 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
524 alg->cra_ablkcipher.ivsize) !=
525 alg->cra_blocksize)
526 return "chainiv";
527
528 return "eseqiv";
529}
530
531static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
532{
533 struct rtattr *tb[3];
534 struct {
535 struct rtattr attr;
536 struct crypto_attr_type data;
537 } ptype;
538 struct {
539 struct rtattr attr;
540 struct crypto_attr_alg data;
541 } palg;
542 struct crypto_template *tmpl;
543 struct crypto_instance *inst;
544 struct crypto_alg *larval;
545 const char *geniv;
546 int err;
547
548 larval = crypto_larval_lookup(alg->cra_driver_name,
549 (type & ~CRYPTO_ALG_TYPE_MASK) |
550 CRYPTO_ALG_TYPE_GIVCIPHER,
551 mask | CRYPTO_ALG_TYPE_MASK);
552 err = PTR_ERR(larval);
553 if (IS_ERR(larval))
554 goto out;
555
556 err = -EAGAIN;
557 if (!crypto_is_larval(larval))
558 goto drop_larval;
559
560 ptype.attr.rta_len = sizeof(ptype);
561 ptype.attr.rta_type = CRYPTOA_TYPE;
562 ptype.data.type = type | CRYPTO_ALG_GENIV;
563 /* GENIV tells the template that we're making a default geniv. */
564 ptype.data.mask = mask | CRYPTO_ALG_GENIV;
565 tb[0] = &ptype.attr;
566
567 palg.attr.rta_len = sizeof(palg);
568 palg.attr.rta_type = CRYPTOA_ALG;
569 /* Must use the exact name to locate ourselves. */
570 memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
571 tb[1] = &palg.attr;
572
573 tb[2] = NULL;
574
575 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
576 CRYPTO_ALG_TYPE_BLKCIPHER)
577 geniv = alg->cra_blkcipher.geniv;
578 else
579 geniv = alg->cra_ablkcipher.geniv;
580
581 if (!geniv)
582 geniv = crypto_default_geniv(alg);
583
584 tmpl = crypto_lookup_template(geniv);
585 err = -ENOENT;
586 if (!tmpl)
587 goto kill_larval;
588
589 if (tmpl->create) {
590 err = tmpl->create(tmpl, tb);
591 if (err)
592 goto put_tmpl;
593 goto ok;
594 }
595
596 inst = tmpl->alloc(tb);
597 err = PTR_ERR(inst);
598 if (IS_ERR(inst))
599 goto put_tmpl;
600
601 err = crypto_register_instance(tmpl, inst);
602 if (err) {
603 tmpl->free(inst);
604 goto put_tmpl;
605 }
606
607ok:
608 /* Redo the lookup to use the instance we just registered. */
609 err = -EAGAIN;
610
611put_tmpl:
612 crypto_tmpl_put(tmpl);
613kill_larval:
614 crypto_larval_kill(larval);
615drop_larval:
616 crypto_mod_put(larval);
617out:
618 crypto_mod_put(alg);
619 return err;
620}
621
622struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
623{
624 struct crypto_alg *alg;
625
626 alg = crypto_alg_mod_lookup(name, type, mask);
627 if (IS_ERR(alg))
628 return alg;
629
630 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
631 CRYPTO_ALG_TYPE_GIVCIPHER)
632 return alg;
633
634 if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
635 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
636 alg->cra_ablkcipher.ivsize))
637 return alg;
638
639 crypto_mod_put(alg);
640 alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
641 mask & ~CRYPTO_ALG_TESTED);
642 if (IS_ERR(alg))
643 return alg;
644
645 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
646 CRYPTO_ALG_TYPE_GIVCIPHER) {
647 if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
648 crypto_mod_put(alg);
649 alg = ERR_PTR(-ENOENT);
650 }
651 return alg;
652 }
653
654 BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
655 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
656 alg->cra_ablkcipher.ivsize));
657
658 return ERR_PTR(crypto_givcipher_default(alg, type, mask));
659}
660EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
661
662int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
663 u32 type, u32 mask)
664{
665 struct crypto_alg *alg;
666 int err;
667
668 type = crypto_skcipher_type(type);
669 mask = crypto_skcipher_mask(mask);
670
671 alg = crypto_lookup_skcipher(name, type, mask);
672 if (IS_ERR(alg))
673 return PTR_ERR(alg);
674
675 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
676 crypto_mod_put(alg);
677 return err;
678}
679EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
680
681struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
682 u32 type, u32 mask)
683{
684 struct crypto_tfm *tfm;
685 int err;
686
687 type = crypto_skcipher_type(type);
688 mask = crypto_skcipher_mask(mask);
689
690 for (;;) {
691 struct crypto_alg *alg;
692
693 alg = crypto_lookup_skcipher(alg_name, type, mask);
694 if (IS_ERR(alg)) {
695 err = PTR_ERR(alg);
696 goto err;
697 }
698
699 tfm = __crypto_alloc_tfm(alg, type, mask);
700 if (!IS_ERR(tfm))
701 return __crypto_ablkcipher_cast(tfm);
702
703 crypto_mod_put(alg);
704 err = PTR_ERR(tfm);
705
706err:
707 if (err != -EAGAIN)
708 break;
709 if (fatal_signal_pending(current)) {
710 err = -EINTR;
711 break;
712 }
713 }
714
715 return ERR_PTR(err);
716}
717EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
diff --git a/crypto/aead.c b/crypto/aead.c
index 9b18a1e40d6a..3f5c5ff004ab 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -294,9 +294,9 @@ int aead_init_geniv(struct crypto_aead *aead)
294 if (err) 294 if (err)
295 goto out; 295 goto out;
296 296
297 ctx->null = crypto_get_default_null_skcipher(); 297 ctx->sknull = crypto_get_default_null_skcipher2();
298 err = PTR_ERR(ctx->null); 298 err = PTR_ERR(ctx->sknull);
299 if (IS_ERR(ctx->null)) 299 if (IS_ERR(ctx->sknull))
300 goto out; 300 goto out;
301 301
302 child = crypto_spawn_aead(aead_instance_ctx(inst)); 302 child = crypto_spawn_aead(aead_instance_ctx(inst));
@@ -314,7 +314,7 @@ out:
314 return err; 314 return err;
315 315
316drop_null: 316drop_null:
317 crypto_put_default_null_skcipher(); 317 crypto_put_default_null_skcipher2();
318 goto out; 318 goto out;
319} 319}
320EXPORT_SYMBOL_GPL(aead_init_geniv); 320EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -324,7 +324,7 @@ void aead_exit_geniv(struct crypto_aead *tfm)
324 struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); 324 struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
325 325
326 crypto_free_aead(ctx->child); 326 crypto_free_aead(ctx->child);
327 crypto_put_default_null_skcipher(); 327 crypto_put_default_null_skcipher2();
328} 328}
329EXPORT_SYMBOL_GPL(aead_exit_geniv); 329EXPORT_SYMBOL_GPL(aead_exit_geniv);
330 330
@@ -346,9 +346,13 @@ static int aead_prepare_alg(struct aead_alg *alg)
346{ 346{
347 struct crypto_alg *base = &alg->base; 347 struct crypto_alg *base = &alg->base;
348 348
349 if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8) 349 if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
350 PAGE_SIZE / 8)
350 return -EINVAL; 351 return -EINVAL;
351 352
353 if (!alg->chunksize)
354 alg->chunksize = base->cra_blocksize;
355
352 base->cra_type = &crypto_aead_type; 356 base->cra_type = &crypto_aead_type;
353 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 357 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
354 base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; 358 base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 3887a98abcc3..2ce8bcb9049c 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -461,10 +461,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
461 461
462static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) 462static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
463{ 463{
464 if (alg->cra_type == &crypto_ahash_type) 464 if (alg->cra_type != &crypto_ahash_type)
465 return alg->cra_ctxsize; 465 return sizeof(struct crypto_shash *);
466 466
467 return sizeof(struct crypto_shash *); 467 return crypto_alg_extsize(alg);
468} 468}
469 469
470#ifdef CONFIG_NET 470#ifdef CONFIG_NET
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 731255a6104f..df939b54b09f 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -811,6 +811,21 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num)
811} 811}
812EXPORT_SYMBOL_GPL(crypto_attr_u32); 812EXPORT_SYMBOL_GPL(crypto_attr_u32);
813 813
814int crypto_inst_setname(struct crypto_instance *inst, const char *name,
815 struct crypto_alg *alg)
816{
817 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
818 alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
819 return -ENAMETOOLONG;
820
821 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
822 name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
823 return -ENAMETOOLONG;
824
825 return 0;
826}
827EXPORT_SYMBOL_GPL(crypto_inst_setname);
828
814void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, 829void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
815 unsigned int head) 830 unsigned int head)
816{ 831{
@@ -825,13 +840,8 @@ void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
825 840
826 inst = (void *)(p + head); 841 inst = (void *)(p + head);
827 842
828 err = -ENAMETOOLONG; 843 err = crypto_inst_setname(inst, name, alg);
829 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, 844 if (err)
830 alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
831 goto err_free_inst;
832
833 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
834 name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
835 goto err_free_inst; 845 goto err_free_inst;
836 846
837 return p; 847 return p;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 55a354d57251..a7e1ac786c5d 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -32,8 +32,8 @@ struct authenc_instance_ctx {
32 32
33struct crypto_authenc_ctx { 33struct crypto_authenc_ctx {
34 struct crypto_ahash *auth; 34 struct crypto_ahash *auth;
35 struct crypto_ablkcipher *enc; 35 struct crypto_skcipher *enc;
36 struct crypto_blkcipher *null; 36 struct crypto_skcipher *null;
37}; 37};
38 38
39struct authenc_request_ctx { 39struct authenc_request_ctx {
@@ -83,7 +83,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
83{ 83{
84 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 84 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
85 struct crypto_ahash *auth = ctx->auth; 85 struct crypto_ahash *auth = ctx->auth;
86 struct crypto_ablkcipher *enc = ctx->enc; 86 struct crypto_skcipher *enc = ctx->enc;
87 struct crypto_authenc_keys keys; 87 struct crypto_authenc_keys keys;
88 int err = -EINVAL; 88 int err = -EINVAL;
89 89
@@ -100,11 +100,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
100 if (err) 100 if (err)
101 goto out; 101 goto out;
102 102
103 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 103 crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
104 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & 104 crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
105 CRYPTO_TFM_REQ_MASK); 105 CRYPTO_TFM_REQ_MASK);
106 err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); 106 err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
107 crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & 107 crypto_aead_set_flags(authenc, crypto_skcipher_get_flags(enc) &
108 CRYPTO_TFM_RES_MASK); 108 CRYPTO_TFM_RES_MASK);
109 109
110out: 110out:
@@ -184,12 +184,15 @@ static int crypto_authenc_copy_assoc(struct aead_request *req)
184{ 184{
185 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 185 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
186 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 186 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
187 struct blkcipher_desc desc = { 187 SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
188 .tfm = ctx->null,
189 };
190 188
191 return crypto_blkcipher_encrypt(&desc, req->dst, req->src, 189 skcipher_request_set_tfm(skreq, ctx->null);
192 req->assoclen); 190 skcipher_request_set_callback(skreq, aead_request_flags(req),
191 NULL, NULL);
192 skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
193 NULL);
194
195 return crypto_skcipher_encrypt(skreq);
193} 196}
194 197
195static int crypto_authenc_encrypt(struct aead_request *req) 198static int crypto_authenc_encrypt(struct aead_request *req)
@@ -199,14 +202,13 @@ static int crypto_authenc_encrypt(struct aead_request *req)
199 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 202 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
200 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); 203 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
201 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); 204 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
202 struct crypto_ablkcipher *enc = ctx->enc; 205 struct crypto_skcipher *enc = ctx->enc;
203 unsigned int cryptlen = req->cryptlen; 206 unsigned int cryptlen = req->cryptlen;
204 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + 207 struct skcipher_request *skreq = (void *)(areq_ctx->tail +
205 ictx->reqoff); 208 ictx->reqoff);
206 struct scatterlist *src, *dst; 209 struct scatterlist *src, *dst;
207 int err; 210 int err;
208 211
209 sg_init_table(areq_ctx->src, 2);
210 src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); 212 src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
211 dst = src; 213 dst = src;
212 214
@@ -215,16 +217,15 @@ static int crypto_authenc_encrypt(struct aead_request *req)
215 if (err) 217 if (err)
216 return err; 218 return err;
217 219
218 sg_init_table(areq_ctx->dst, 2);
219 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); 220 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
220 } 221 }
221 222
222 ablkcipher_request_set_tfm(abreq, enc); 223 skcipher_request_set_tfm(skreq, enc);
223 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 224 skcipher_request_set_callback(skreq, aead_request_flags(req),
224 crypto_authenc_encrypt_done, req); 225 crypto_authenc_encrypt_done, req);
225 ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv); 226 skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
226 227
227 err = crypto_ablkcipher_encrypt(abreq); 228 err = crypto_skcipher_encrypt(skreq);
228 if (err) 229 if (err)
229 return err; 230 return err;
230 231
@@ -240,8 +241,8 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
240 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); 241 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
241 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); 242 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
242 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); 243 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
243 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail + 244 struct skcipher_request *skreq = (void *)(areq_ctx->tail +
244 ictx->reqoff); 245 ictx->reqoff);
245 unsigned int authsize = crypto_aead_authsize(authenc); 246 unsigned int authsize = crypto_aead_authsize(authenc);
246 u8 *ihash = ahreq->result + authsize; 247 u8 *ihash = ahreq->result + authsize;
247 struct scatterlist *src, *dst; 248 struct scatterlist *src, *dst;
@@ -251,22 +252,19 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
251 if (crypto_memneq(ihash, ahreq->result, authsize)) 252 if (crypto_memneq(ihash, ahreq->result, authsize))
252 return -EBADMSG; 253 return -EBADMSG;
253 254
254 sg_init_table(areq_ctx->src, 2);
255 src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); 255 src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
256 dst = src; 256 dst = src;
257 257
258 if (req->src != req->dst) { 258 if (req->src != req->dst)
259 sg_init_table(areq_ctx->dst, 2);
260 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); 259 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
261 }
262 260
263 ablkcipher_request_set_tfm(abreq, ctx->enc); 261 skcipher_request_set_tfm(skreq, ctx->enc);
264 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 262 skcipher_request_set_callback(skreq, aead_request_flags(req),
265 req->base.complete, req->base.data); 263 req->base.complete, req->base.data);
266 ablkcipher_request_set_crypt(abreq, src, dst, 264 skcipher_request_set_crypt(skreq, src, dst,
267 req->cryptlen - authsize, req->iv); 265 req->cryptlen - authsize, req->iv);
268 266
269 return crypto_ablkcipher_decrypt(abreq); 267 return crypto_skcipher_decrypt(skreq);
270} 268}
271 269
272static void authenc_verify_ahash_done(struct crypto_async_request *areq, 270static void authenc_verify_ahash_done(struct crypto_async_request *areq,
@@ -318,20 +316,20 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
318 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst); 316 struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
319 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); 317 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
320 struct crypto_ahash *auth; 318 struct crypto_ahash *auth;
321 struct crypto_ablkcipher *enc; 319 struct crypto_skcipher *enc;
322 struct crypto_blkcipher *null; 320 struct crypto_skcipher *null;
323 int err; 321 int err;
324 322
325 auth = crypto_spawn_ahash(&ictx->auth); 323 auth = crypto_spawn_ahash(&ictx->auth);
326 if (IS_ERR(auth)) 324 if (IS_ERR(auth))
327 return PTR_ERR(auth); 325 return PTR_ERR(auth);
328 326
329 enc = crypto_spawn_skcipher(&ictx->enc); 327 enc = crypto_spawn_skcipher2(&ictx->enc);
330 err = PTR_ERR(enc); 328 err = PTR_ERR(enc);
331 if (IS_ERR(enc)) 329 if (IS_ERR(enc))
332 goto err_free_ahash; 330 goto err_free_ahash;
333 331
334 null = crypto_get_default_null_skcipher(); 332 null = crypto_get_default_null_skcipher2();
335 err = PTR_ERR(null); 333 err = PTR_ERR(null);
336 if (IS_ERR(null)) 334 if (IS_ERR(null))
337 goto err_free_skcipher; 335 goto err_free_skcipher;
@@ -347,13 +345,13 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
347 max_t(unsigned int, 345 max_t(unsigned int,
348 crypto_ahash_reqsize(auth) + 346 crypto_ahash_reqsize(auth) +
349 sizeof(struct ahash_request), 347 sizeof(struct ahash_request),
350 sizeof(struct ablkcipher_request) + 348 sizeof(struct skcipher_request) +
351 crypto_ablkcipher_reqsize(enc))); 349 crypto_skcipher_reqsize(enc)));
352 350
353 return 0; 351 return 0;
354 352
355err_free_skcipher: 353err_free_skcipher:
356 crypto_free_ablkcipher(enc); 354 crypto_free_skcipher(enc);
357err_free_ahash: 355err_free_ahash:
358 crypto_free_ahash(auth); 356 crypto_free_ahash(auth);
359 return err; 357 return err;
@@ -364,8 +362,8 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
364 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm); 362 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
365 363
366 crypto_free_ahash(ctx->auth); 364 crypto_free_ahash(ctx->auth);
367 crypto_free_ablkcipher(ctx->enc); 365 crypto_free_skcipher(ctx->enc);
368 crypto_put_default_null_skcipher(); 366 crypto_put_default_null_skcipher2();
369} 367}
370 368
371static void crypto_authenc_free(struct aead_instance *inst) 369static void crypto_authenc_free(struct aead_instance *inst)
@@ -384,7 +382,7 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
384 struct aead_instance *inst; 382 struct aead_instance *inst;
385 struct hash_alg_common *auth; 383 struct hash_alg_common *auth;
386 struct crypto_alg *auth_base; 384 struct crypto_alg *auth_base;
387 struct crypto_alg *enc; 385 struct skcipher_alg *enc;
388 struct authenc_instance_ctx *ctx; 386 struct authenc_instance_ctx *ctx;
389 const char *enc_name; 387 const char *enc_name;
390 int err; 388 int err;
@@ -397,7 +395,8 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
397 return -EINVAL; 395 return -EINVAL;
398 396
399 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, 397 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
400 CRYPTO_ALG_TYPE_AHASH_MASK); 398 CRYPTO_ALG_TYPE_AHASH_MASK |
399 crypto_requires_sync(algt->type, algt->mask));
401 if (IS_ERR(auth)) 400 if (IS_ERR(auth))
402 return PTR_ERR(auth); 401 return PTR_ERR(auth);
403 402
@@ -421,37 +420,40 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
421 goto err_free_inst; 420 goto err_free_inst;
422 421
423 crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); 422 crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
424 err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, 423 err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
425 crypto_requires_sync(algt->type, 424 crypto_requires_sync(algt->type,
426 algt->mask)); 425 algt->mask));
427 if (err) 426 if (err)
428 goto err_drop_auth; 427 goto err_drop_auth;
429 428
430 enc = crypto_skcipher_spawn_alg(&ctx->enc); 429 enc = crypto_spawn_skcipher_alg(&ctx->enc);
431 430
432 ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask, 431 ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
433 auth_base->cra_alignmask + 1); 432 auth_base->cra_alignmask + 1);
434 433
435 err = -ENAMETOOLONG; 434 err = -ENAMETOOLONG;
436 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 435 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
437 "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >= 436 "authenc(%s,%s)", auth_base->cra_name,
437 enc->base.cra_name) >=
438 CRYPTO_MAX_ALG_NAME) 438 CRYPTO_MAX_ALG_NAME)
439 goto err_drop_enc; 439 goto err_drop_enc;
440 440
441 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 441 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
442 "authenc(%s,%s)", auth_base->cra_driver_name, 442 "authenc(%s,%s)", auth_base->cra_driver_name,
443 enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 443 enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
444 goto err_drop_enc; 444 goto err_drop_enc;
445 445
446 inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC; 446 inst->alg.base.cra_flags = (auth_base->cra_flags |
447 inst->alg.base.cra_priority = enc->cra_priority * 10 + 447 enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
448 inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
448 auth_base->cra_priority; 449 auth_base->cra_priority;
449 inst->alg.base.cra_blocksize = enc->cra_blocksize; 450 inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
450 inst->alg.base.cra_alignmask = auth_base->cra_alignmask | 451 inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
451 enc->cra_alignmask; 452 enc->base.cra_alignmask;
452 inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx); 453 inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
453 454
454 inst->alg.ivsize = enc->cra_ablkcipher.ivsize; 455 inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
456 inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
455 inst->alg.maxauthsize = auth->digestsize; 457 inst->alg.maxauthsize = auth->digestsize;
456 458
457 inst->alg.init = crypto_authenc_init_tfm; 459 inst->alg.init = crypto_authenc_init_tfm;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 0c0468869e25..121010ac9962 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -35,8 +35,8 @@ struct authenc_esn_instance_ctx {
35struct crypto_authenc_esn_ctx { 35struct crypto_authenc_esn_ctx {
36 unsigned int reqoff; 36 unsigned int reqoff;
37 struct crypto_ahash *auth; 37 struct crypto_ahash *auth;
38 struct crypto_ablkcipher *enc; 38 struct crypto_skcipher *enc;
39 struct crypto_blkcipher *null; 39 struct crypto_skcipher *null;
40}; 40};
41 41
42struct authenc_esn_request_ctx { 42struct authenc_esn_request_ctx {
@@ -65,7 +65,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
65{ 65{
66 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 66 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
67 struct crypto_ahash *auth = ctx->auth; 67 struct crypto_ahash *auth = ctx->auth;
68 struct crypto_ablkcipher *enc = ctx->enc; 68 struct crypto_skcipher *enc = ctx->enc;
69 struct crypto_authenc_keys keys; 69 struct crypto_authenc_keys keys;
70 int err = -EINVAL; 70 int err = -EINVAL;
71 71
@@ -82,11 +82,11 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
82 if (err) 82 if (err)
83 goto out; 83 goto out;
84 84
85 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 85 crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
86 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & 86 crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
87 CRYPTO_TFM_REQ_MASK); 87 CRYPTO_TFM_REQ_MASK);
88 err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); 88 err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
89 crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & 89 crypto_aead_set_flags(authenc_esn, crypto_skcipher_get_flags(enc) &
90 CRYPTO_TFM_RES_MASK); 90 CRYPTO_TFM_RES_MASK);
91 91
92out: 92out:
@@ -182,11 +182,14 @@ static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
182{ 182{
183 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); 183 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
184 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 184 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
185 struct blkcipher_desc desc = { 185 SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
186 .tfm = ctx->null,
187 };
188 186
189 return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len); 187 skcipher_request_set_tfm(skreq, ctx->null);
188 skcipher_request_set_callback(skreq, aead_request_flags(req),
189 NULL, NULL);
190 skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
191
192 return crypto_skcipher_encrypt(skreq);
190} 193}
191 194
192static int crypto_authenc_esn_encrypt(struct aead_request *req) 195static int crypto_authenc_esn_encrypt(struct aead_request *req)
@@ -194,9 +197,9 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
194 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); 197 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
195 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); 198 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
196 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 199 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
197 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail 200 struct skcipher_request *skreq = (void *)(areq_ctx->tail +
198 + ctx->reqoff); 201 ctx->reqoff);
199 struct crypto_ablkcipher *enc = ctx->enc; 202 struct crypto_skcipher *enc = ctx->enc;
200 unsigned int assoclen = req->assoclen; 203 unsigned int assoclen = req->assoclen;
201 unsigned int cryptlen = req->cryptlen; 204 unsigned int cryptlen = req->cryptlen;
202 struct scatterlist *src, *dst; 205 struct scatterlist *src, *dst;
@@ -215,12 +218,12 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
215 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen); 218 dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
216 } 219 }
217 220
218 ablkcipher_request_set_tfm(abreq, enc); 221 skcipher_request_set_tfm(skreq, enc);
219 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 222 skcipher_request_set_callback(skreq, aead_request_flags(req),
220 crypto_authenc_esn_encrypt_done, req); 223 crypto_authenc_esn_encrypt_done, req);
221 ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv); 224 skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
222 225
223 err = crypto_ablkcipher_encrypt(abreq); 226 err = crypto_skcipher_encrypt(skreq);
224 if (err) 227 if (err)
225 return err; 228 return err;
226 229
@@ -234,8 +237,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
234 unsigned int authsize = crypto_aead_authsize(authenc_esn); 237 unsigned int authsize = crypto_aead_authsize(authenc_esn);
235 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); 238 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
236 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 239 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
237 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail 240 struct skcipher_request *skreq = (void *)(areq_ctx->tail +
238 + ctx->reqoff); 241 ctx->reqoff);
239 struct crypto_ahash *auth = ctx->auth; 242 struct crypto_ahash *auth = ctx->auth;
240 u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail, 243 u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
241 crypto_ahash_alignmask(auth) + 1); 244 crypto_ahash_alignmask(auth) + 1);
@@ -256,12 +259,12 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
256 sg_init_table(areq_ctx->dst, 2); 259 sg_init_table(areq_ctx->dst, 2);
257 dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); 260 dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
258 261
259 ablkcipher_request_set_tfm(abreq, ctx->enc); 262 skcipher_request_set_tfm(skreq, ctx->enc);
260 ablkcipher_request_set_callback(abreq, flags, 263 skcipher_request_set_callback(skreq, flags,
261 req->base.complete, req->base.data); 264 req->base.complete, req->base.data);
262 ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv); 265 skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
263 266
264 return crypto_ablkcipher_decrypt(abreq); 267 return crypto_skcipher_decrypt(skreq);
265} 268}
266 269
267static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, 270static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
@@ -331,20 +334,20 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
331 struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst); 334 struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
332 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); 335 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
333 struct crypto_ahash *auth; 336 struct crypto_ahash *auth;
334 struct crypto_ablkcipher *enc; 337 struct crypto_skcipher *enc;
335 struct crypto_blkcipher *null; 338 struct crypto_skcipher *null;
336 int err; 339 int err;
337 340
338 auth = crypto_spawn_ahash(&ictx->auth); 341 auth = crypto_spawn_ahash(&ictx->auth);
339 if (IS_ERR(auth)) 342 if (IS_ERR(auth))
340 return PTR_ERR(auth); 343 return PTR_ERR(auth);
341 344
342 enc = crypto_spawn_skcipher(&ictx->enc); 345 enc = crypto_spawn_skcipher2(&ictx->enc);
343 err = PTR_ERR(enc); 346 err = PTR_ERR(enc);
344 if (IS_ERR(enc)) 347 if (IS_ERR(enc))
345 goto err_free_ahash; 348 goto err_free_ahash;
346 349
347 null = crypto_get_default_null_skcipher(); 350 null = crypto_get_default_null_skcipher2();
348 err = PTR_ERR(null); 351 err = PTR_ERR(null);
349 if (IS_ERR(null)) 352 if (IS_ERR(null))
350 goto err_free_skcipher; 353 goto err_free_skcipher;
@@ -361,15 +364,15 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
361 sizeof(struct authenc_esn_request_ctx) + 364 sizeof(struct authenc_esn_request_ctx) +
362 ctx->reqoff + 365 ctx->reqoff +
363 max_t(unsigned int, 366 max_t(unsigned int,
364 crypto_ahash_reqsize(auth) + 367 crypto_ahash_reqsize(auth) +
365 sizeof(struct ahash_request), 368 sizeof(struct ahash_request),
366 sizeof(struct skcipher_givcrypt_request) + 369 sizeof(struct skcipher_request) +
367 crypto_ablkcipher_reqsize(enc))); 370 crypto_skcipher_reqsize(enc)));
368 371
369 return 0; 372 return 0;
370 373
371err_free_skcipher: 374err_free_skcipher:
372 crypto_free_ablkcipher(enc); 375 crypto_free_skcipher(enc);
373err_free_ahash: 376err_free_ahash:
374 crypto_free_ahash(auth); 377 crypto_free_ahash(auth);
375 return err; 378 return err;
@@ -380,8 +383,8 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
380 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm); 383 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
381 384
382 crypto_free_ahash(ctx->auth); 385 crypto_free_ahash(ctx->auth);
383 crypto_free_ablkcipher(ctx->enc); 386 crypto_free_skcipher(ctx->enc);
384 crypto_put_default_null_skcipher(); 387 crypto_put_default_null_skcipher2();
385} 388}
386 389
387static void crypto_authenc_esn_free(struct aead_instance *inst) 390static void crypto_authenc_esn_free(struct aead_instance *inst)
@@ -400,7 +403,7 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
400 struct aead_instance *inst; 403 struct aead_instance *inst;
401 struct hash_alg_common *auth; 404 struct hash_alg_common *auth;
402 struct crypto_alg *auth_base; 405 struct crypto_alg *auth_base;
403 struct crypto_alg *enc; 406 struct skcipher_alg *enc;
404 struct authenc_esn_instance_ctx *ctx; 407 struct authenc_esn_instance_ctx *ctx;
405 const char *enc_name; 408 const char *enc_name;
406 int err; 409 int err;
@@ -413,7 +416,8 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
413 return -EINVAL; 416 return -EINVAL;
414 417
415 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, 418 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
416 CRYPTO_ALG_TYPE_AHASH_MASK); 419 CRYPTO_ALG_TYPE_AHASH_MASK |
420 crypto_requires_sync(algt->type, algt->mask));
417 if (IS_ERR(auth)) 421 if (IS_ERR(auth))
418 return PTR_ERR(auth); 422 return PTR_ERR(auth);
419 423
@@ -437,34 +441,36 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
437 goto err_free_inst; 441 goto err_free_inst;
438 442
439 crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst)); 443 crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
440 err = crypto_grab_skcipher(&ctx->enc, enc_name, 0, 444 err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
441 crypto_requires_sync(algt->type, 445 crypto_requires_sync(algt->type,
442 algt->mask)); 446 algt->mask));
443 if (err) 447 if (err)
444 goto err_drop_auth; 448 goto err_drop_auth;
445 449
446 enc = crypto_skcipher_spawn_alg(&ctx->enc); 450 enc = crypto_spawn_skcipher_alg(&ctx->enc);
447 451
448 err = -ENAMETOOLONG; 452 err = -ENAMETOOLONG;
449 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 453 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
450 "authencesn(%s,%s)", auth_base->cra_name, 454 "authencesn(%s,%s)", auth_base->cra_name,
451 enc->cra_name) >= CRYPTO_MAX_ALG_NAME) 455 enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
452 goto err_drop_enc; 456 goto err_drop_enc;
453 457
454 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 458 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
455 "authencesn(%s,%s)", auth_base->cra_driver_name, 459 "authencesn(%s,%s)", auth_base->cra_driver_name,
456 enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 460 enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
457 goto err_drop_enc; 461 goto err_drop_enc;
458 462
459 inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC; 463 inst->alg.base.cra_flags = (auth_base->cra_flags |
460 inst->alg.base.cra_priority = enc->cra_priority * 10 + 464 enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
465 inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
461 auth_base->cra_priority; 466 auth_base->cra_priority;
462 inst->alg.base.cra_blocksize = enc->cra_blocksize; 467 inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
463 inst->alg.base.cra_alignmask = auth_base->cra_alignmask | 468 inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
464 enc->cra_alignmask; 469 enc->base.cra_alignmask;
465 inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx); 470 inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
466 471
467 inst->alg.ivsize = enc->cra_ablkcipher.ivsize; 472 inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
473 inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
468 inst->alg.maxauthsize = auth->digestsize; 474 inst->alg.maxauthsize = auth->digestsize;
469 475
470 inst->alg.init = crypto_authenc_esn_init_tfm; 476 inst->alg.init = crypto_authenc_esn_init_tfm;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 8cc1622b2ee0..369999530108 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -21,7 +21,6 @@
21#include <linux/hardirq.h> 21#include <linux/hardirq.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/scatterlist.h>
25#include <linux/seq_file.h> 24#include <linux/seq_file.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27#include <linux/string.h> 26#include <linux/string.h>
@@ -466,10 +465,6 @@ static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
466 crt->setkey = async_setkey; 465 crt->setkey = async_setkey;
467 crt->encrypt = async_encrypt; 466 crt->encrypt = async_encrypt;
468 crt->decrypt = async_decrypt; 467 crt->decrypt = async_decrypt;
469 if (!alg->ivsize) {
470 crt->givencrypt = skcipher_null_givencrypt;
471 crt->givdecrypt = skcipher_null_givdecrypt;
472 }
473 crt->base = __crypto_ablkcipher_cast(tfm); 468 crt->base = __crypto_ablkcipher_cast(tfm);
474 crt->ivsize = alg->ivsize; 469 crt->ivsize = alg->ivsize;
475 470
@@ -560,185 +555,5 @@ const struct crypto_type crypto_blkcipher_type = {
560}; 555};
561EXPORT_SYMBOL_GPL(crypto_blkcipher_type); 556EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
562 557
563static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
564 const char *name, u32 type, u32 mask)
565{
566 struct crypto_alg *alg;
567 int err;
568
569 type = crypto_skcipher_type(type);
570 mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
571
572 alg = crypto_alg_mod_lookup(name, type, mask);
573 if (IS_ERR(alg))
574 return PTR_ERR(alg);
575
576 err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
577 crypto_mod_put(alg);
578 return err;
579}
580
581struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
582 struct rtattr **tb, u32 type,
583 u32 mask)
584{
585 struct {
586 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
587 unsigned int keylen);
588 int (*encrypt)(struct ablkcipher_request *req);
589 int (*decrypt)(struct ablkcipher_request *req);
590
591 unsigned int min_keysize;
592 unsigned int max_keysize;
593 unsigned int ivsize;
594
595 const char *geniv;
596 } balg;
597 const char *name;
598 struct crypto_skcipher_spawn *spawn;
599 struct crypto_attr_type *algt;
600 struct crypto_instance *inst;
601 struct crypto_alg *alg;
602 int err;
603
604 algt = crypto_get_attr_type(tb);
605 if (IS_ERR(algt))
606 return ERR_CAST(algt);
607
608 if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
609 algt->mask)
610 return ERR_PTR(-EINVAL);
611
612 name = crypto_attr_alg_name(tb[1]);
613 if (IS_ERR(name))
614 return ERR_CAST(name);
615
616 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
617 if (!inst)
618 return ERR_PTR(-ENOMEM);
619
620 spawn = crypto_instance_ctx(inst);
621
622 /* Ignore async algorithms if necessary. */
623 mask |= crypto_requires_sync(algt->type, algt->mask);
624
625 crypto_set_skcipher_spawn(spawn, inst);
626 err = crypto_grab_nivcipher(spawn, name, type, mask);
627 if (err)
628 goto err_free_inst;
629
630 alg = crypto_skcipher_spawn_alg(spawn);
631
632 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
633 CRYPTO_ALG_TYPE_BLKCIPHER) {
634 balg.ivsize = alg->cra_blkcipher.ivsize;
635 balg.min_keysize = alg->cra_blkcipher.min_keysize;
636 balg.max_keysize = alg->cra_blkcipher.max_keysize;
637
638 balg.setkey = async_setkey;
639 balg.encrypt = async_encrypt;
640 balg.decrypt = async_decrypt;
641
642 balg.geniv = alg->cra_blkcipher.geniv;
643 } else {
644 balg.ivsize = alg->cra_ablkcipher.ivsize;
645 balg.min_keysize = alg->cra_ablkcipher.min_keysize;
646 balg.max_keysize = alg->cra_ablkcipher.max_keysize;
647
648 balg.setkey = alg->cra_ablkcipher.setkey;
649 balg.encrypt = alg->cra_ablkcipher.encrypt;
650 balg.decrypt = alg->cra_ablkcipher.decrypt;
651
652 balg.geniv = alg->cra_ablkcipher.geniv;
653 }
654
655 err = -EINVAL;
656 if (!balg.ivsize)
657 goto err_drop_alg;
658
659 /*
660 * This is only true if we're constructing an algorithm with its
661 * default IV generator. For the default generator we elide the
662 * template name and double-check the IV generator.
663 */
664 if (algt->mask & CRYPTO_ALG_GENIV) {
665 if (!balg.geniv)
666 balg.geniv = crypto_default_geniv(alg);
667 err = -EAGAIN;
668 if (strcmp(tmpl->name, balg.geniv))
669 goto err_drop_alg;
670
671 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
672 memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
673 CRYPTO_MAX_ALG_NAME);
674 } else {
675 err = -ENAMETOOLONG;
676 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
677 "%s(%s)", tmpl->name, alg->cra_name) >=
678 CRYPTO_MAX_ALG_NAME)
679 goto err_drop_alg;
680 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
681 "%s(%s)", tmpl->name, alg->cra_driver_name) >=
682 CRYPTO_MAX_ALG_NAME)
683 goto err_drop_alg;
684 }
685
686 inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
687 inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
688 inst->alg.cra_priority = alg->cra_priority;
689 inst->alg.cra_blocksize = alg->cra_blocksize;
690 inst->alg.cra_alignmask = alg->cra_alignmask;
691 inst->alg.cra_type = &crypto_givcipher_type;
692
693 inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
694 inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
695 inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
696 inst->alg.cra_ablkcipher.geniv = balg.geniv;
697
698 inst->alg.cra_ablkcipher.setkey = balg.setkey;
699 inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
700 inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
701
702out:
703 return inst;
704
705err_drop_alg:
706 crypto_drop_skcipher(spawn);
707err_free_inst:
708 kfree(inst);
709 inst = ERR_PTR(err);
710 goto out;
711}
712EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
713
714void skcipher_geniv_free(struct crypto_instance *inst)
715{
716 crypto_drop_skcipher(crypto_instance_ctx(inst));
717 kfree(inst);
718}
719EXPORT_SYMBOL_GPL(skcipher_geniv_free);
720
721int skcipher_geniv_init(struct crypto_tfm *tfm)
722{
723 struct crypto_instance *inst = (void *)tfm->__crt_alg;
724 struct crypto_ablkcipher *cipher;
725
726 cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
727 if (IS_ERR(cipher))
728 return PTR_ERR(cipher);
729
730 tfm->crt_ablkcipher.base = cipher;
731 tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
732
733 return 0;
734}
735EXPORT_SYMBOL_GPL(skcipher_geniv_init);
736
737void skcipher_geniv_exit(struct crypto_tfm *tfm)
738{
739 crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
740}
741EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
742
743MODULE_LICENSE("GPL"); 558MODULE_LICENSE("GPL");
744MODULE_DESCRIPTION("Generic block chaining cipher type"); 559MODULE_DESCRIPTION("Generic block chaining cipher type");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index cc31ea4335bf..006d8575ef5c 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -28,7 +28,7 @@ struct ccm_instance_ctx {
28 28
29struct crypto_ccm_ctx { 29struct crypto_ccm_ctx {
30 struct crypto_cipher *cipher; 30 struct crypto_cipher *cipher;
31 struct crypto_ablkcipher *ctr; 31 struct crypto_skcipher *ctr;
32}; 32};
33 33
34struct crypto_rfc4309_ctx { 34struct crypto_rfc4309_ctx {
@@ -50,7 +50,7 @@ struct crypto_ccm_req_priv_ctx {
50 u32 flags; 50 u32 flags;
51 struct scatterlist src[3]; 51 struct scatterlist src[3];
52 struct scatterlist dst[3]; 52 struct scatterlist dst[3];
53 struct ablkcipher_request abreq; 53 struct skcipher_request skreq;
54}; 54};
55 55
56static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx( 56static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
@@ -83,15 +83,15 @@ static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
83 unsigned int keylen) 83 unsigned int keylen)
84{ 84{
85 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); 85 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
86 struct crypto_ablkcipher *ctr = ctx->ctr; 86 struct crypto_skcipher *ctr = ctx->ctr;
87 struct crypto_cipher *tfm = ctx->cipher; 87 struct crypto_cipher *tfm = ctx->cipher;
88 int err = 0; 88 int err = 0;
89 89
90 crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); 90 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
91 crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & 91 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
92 CRYPTO_TFM_REQ_MASK); 92 CRYPTO_TFM_REQ_MASK);
93 err = crypto_ablkcipher_setkey(ctr, key, keylen); 93 err = crypto_skcipher_setkey(ctr, key, keylen);
94 crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & 94 crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
95 CRYPTO_TFM_RES_MASK); 95 CRYPTO_TFM_RES_MASK);
96 if (err) 96 if (err)
97 goto out; 97 goto out;
@@ -347,7 +347,7 @@ static int crypto_ccm_encrypt(struct aead_request *req)
347 struct crypto_aead *aead = crypto_aead_reqtfm(req); 347 struct crypto_aead *aead = crypto_aead_reqtfm(req);
348 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); 348 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
349 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); 349 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
350 struct ablkcipher_request *abreq = &pctx->abreq; 350 struct skcipher_request *skreq = &pctx->skreq;
351 struct scatterlist *dst; 351 struct scatterlist *dst;
352 unsigned int cryptlen = req->cryptlen; 352 unsigned int cryptlen = req->cryptlen;
353 u8 *odata = pctx->odata; 353 u8 *odata = pctx->odata;
@@ -366,11 +366,11 @@ static int crypto_ccm_encrypt(struct aead_request *req)
366 if (req->src != req->dst) 366 if (req->src != req->dst)
367 dst = pctx->dst; 367 dst = pctx->dst;
368 368
369 ablkcipher_request_set_tfm(abreq, ctx->ctr); 369 skcipher_request_set_tfm(skreq, ctx->ctr);
370 ablkcipher_request_set_callback(abreq, pctx->flags, 370 skcipher_request_set_callback(skreq, pctx->flags,
371 crypto_ccm_encrypt_done, req); 371 crypto_ccm_encrypt_done, req);
372 ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); 372 skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
373 err = crypto_ablkcipher_encrypt(abreq); 373 err = crypto_skcipher_encrypt(skreq);
374 if (err) 374 if (err)
375 return err; 375 return err;
376 376
@@ -407,7 +407,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
407 struct crypto_aead *aead = crypto_aead_reqtfm(req); 407 struct crypto_aead *aead = crypto_aead_reqtfm(req);
408 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead); 408 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
409 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); 409 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
410 struct ablkcipher_request *abreq = &pctx->abreq; 410 struct skcipher_request *skreq = &pctx->skreq;
411 struct scatterlist *dst; 411 struct scatterlist *dst;
412 unsigned int authsize = crypto_aead_authsize(aead); 412 unsigned int authsize = crypto_aead_authsize(aead);
413 unsigned int cryptlen = req->cryptlen; 413 unsigned int cryptlen = req->cryptlen;
@@ -429,11 +429,11 @@ static int crypto_ccm_decrypt(struct aead_request *req)
429 if (req->src != req->dst) 429 if (req->src != req->dst)
430 dst = pctx->dst; 430 dst = pctx->dst;
431 431
432 ablkcipher_request_set_tfm(abreq, ctx->ctr); 432 skcipher_request_set_tfm(skreq, ctx->ctr);
433 ablkcipher_request_set_callback(abreq, pctx->flags, 433 skcipher_request_set_callback(skreq, pctx->flags,
434 crypto_ccm_decrypt_done, req); 434 crypto_ccm_decrypt_done, req);
435 ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv); 435 skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
436 err = crypto_ablkcipher_decrypt(abreq); 436 err = crypto_skcipher_decrypt(skreq);
437 if (err) 437 if (err)
438 return err; 438 return err;
439 439
@@ -454,7 +454,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
454 struct ccm_instance_ctx *ictx = aead_instance_ctx(inst); 454 struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
455 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); 455 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
456 struct crypto_cipher *cipher; 456 struct crypto_cipher *cipher;
457 struct crypto_ablkcipher *ctr; 457 struct crypto_skcipher *ctr;
458 unsigned long align; 458 unsigned long align;
459 int err; 459 int err;
460 460
@@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
462 if (IS_ERR(cipher)) 462 if (IS_ERR(cipher))
463 return PTR_ERR(cipher); 463 return PTR_ERR(cipher);
464 464
465 ctr = crypto_spawn_skcipher(&ictx->ctr); 465 ctr = crypto_spawn_skcipher2(&ictx->ctr);
466 err = PTR_ERR(ctr); 466 err = PTR_ERR(ctr);
467 if (IS_ERR(ctr)) 467 if (IS_ERR(ctr))
468 goto err_free_cipher; 468 goto err_free_cipher;
@@ -475,7 +475,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
475 crypto_aead_set_reqsize( 475 crypto_aead_set_reqsize(
476 tfm, 476 tfm,
477 align + sizeof(struct crypto_ccm_req_priv_ctx) + 477 align + sizeof(struct crypto_ccm_req_priv_ctx) +
478 crypto_ablkcipher_reqsize(ctr)); 478 crypto_skcipher_reqsize(ctr));
479 479
480 return 0; 480 return 0;
481 481
@@ -489,7 +489,7 @@ static void crypto_ccm_exit_tfm(struct crypto_aead *tfm)
489 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm); 489 struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
490 490
491 crypto_free_cipher(ctx->cipher); 491 crypto_free_cipher(ctx->cipher);
492 crypto_free_ablkcipher(ctx->ctr); 492 crypto_free_skcipher(ctx->ctr);
493} 493}
494 494
495static void crypto_ccm_free(struct aead_instance *inst) 495static void crypto_ccm_free(struct aead_instance *inst)
@@ -509,7 +509,7 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
509{ 509{
510 struct crypto_attr_type *algt; 510 struct crypto_attr_type *algt;
511 struct aead_instance *inst; 511 struct aead_instance *inst;
512 struct crypto_alg *ctr; 512 struct skcipher_alg *ctr;
513 struct crypto_alg *cipher; 513 struct crypto_alg *cipher;
514 struct ccm_instance_ctx *ictx; 514 struct ccm_instance_ctx *ictx;
515 int err; 515 int err;
@@ -544,39 +544,40 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
544 goto err_free_inst; 544 goto err_free_inst;
545 545
546 crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst)); 546 crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
547 err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, 547 err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0,
548 crypto_requires_sync(algt->type, 548 crypto_requires_sync(algt->type,
549 algt->mask)); 549 algt->mask));
550 if (err) 550 if (err)
551 goto err_drop_cipher; 551 goto err_drop_cipher;
552 552
553 ctr = crypto_skcipher_spawn_alg(&ictx->ctr); 553 ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
554 554
555 /* Not a stream cipher? */ 555 /* Not a stream cipher? */
556 err = -EINVAL; 556 err = -EINVAL;
557 if (ctr->cra_blocksize != 1) 557 if (ctr->base.cra_blocksize != 1)
558 goto err_drop_ctr; 558 goto err_drop_ctr;
559 559
560 /* We want the real thing! */ 560 /* We want the real thing! */
561 if (ctr->cra_ablkcipher.ivsize != 16) 561 if (crypto_skcipher_alg_ivsize(ctr) != 16)
562 goto err_drop_ctr; 562 goto err_drop_ctr;
563 563
564 err = -ENAMETOOLONG; 564 err = -ENAMETOOLONG;
565 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 565 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
566 "ccm_base(%s,%s)", ctr->cra_driver_name, 566 "ccm_base(%s,%s)", ctr->base.cra_driver_name,
567 cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 567 cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
568 goto err_drop_ctr; 568 goto err_drop_ctr;
569 569
570 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); 570 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
571 571
572 inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC; 572 inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
573 inst->alg.base.cra_priority = (cipher->cra_priority + 573 inst->alg.base.cra_priority = (cipher->cra_priority +
574 ctr->cra_priority) / 2; 574 ctr->base.cra_priority) / 2;
575 inst->alg.base.cra_blocksize = 1; 575 inst->alg.base.cra_blocksize = 1;
576 inst->alg.base.cra_alignmask = cipher->cra_alignmask | 576 inst->alg.base.cra_alignmask = cipher->cra_alignmask |
577 ctr->cra_alignmask | 577 ctr->base.cra_alignmask |
578 (__alignof__(u32) - 1); 578 (__alignof__(u32) - 1);
579 inst->alg.ivsize = 16; 579 inst->alg.ivsize = 16;
580 inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
580 inst->alg.maxauthsize = 16; 581 inst->alg.maxauthsize = 16;
581 inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx); 582 inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
582 inst->alg.init = crypto_ccm_init_tfm; 583 inst->alg.init = crypto_ccm_init_tfm;
@@ -863,6 +864,7 @@ static int crypto_rfc4309_create(struct crypto_template *tmpl,
863 inst->alg.base.cra_alignmask = alg->base.cra_alignmask; 864 inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
864 865
865 inst->alg.ivsize = 8; 866 inst->alg.ivsize = 8;
867 inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
866 inst->alg.maxauthsize = 16; 868 inst->alg.maxauthsize = 16;
867 869
868 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); 870 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 7b6b935cef23..e899ef51dc8e 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -31,7 +31,7 @@ struct chachapoly_instance_ctx {
31}; 31};
32 32
33struct chachapoly_ctx { 33struct chachapoly_ctx {
34 struct crypto_ablkcipher *chacha; 34 struct crypto_skcipher *chacha;
35 struct crypto_ahash *poly; 35 struct crypto_ahash *poly;
36 /* key bytes we use for the ChaCha20 IV */ 36 /* key bytes we use for the ChaCha20 IV */
37 unsigned int saltlen; 37 unsigned int saltlen;
@@ -53,7 +53,7 @@ struct poly_req {
53struct chacha_req { 53struct chacha_req {
54 u8 iv[CHACHA20_IV_SIZE]; 54 u8 iv[CHACHA20_IV_SIZE];
55 struct scatterlist src[1]; 55 struct scatterlist src[1];
56 struct ablkcipher_request req; /* must be last member */ 56 struct skcipher_request req; /* must be last member */
57}; 57};
58 58
59struct chachapoly_req_ctx { 59struct chachapoly_req_ctx {
@@ -144,12 +144,12 @@ static int chacha_decrypt(struct aead_request *req)
144 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); 144 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
145 } 145 }
146 146
147 ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), 147 skcipher_request_set_callback(&creq->req, aead_request_flags(req),
148 chacha_decrypt_done, req); 148 chacha_decrypt_done, req);
149 ablkcipher_request_set_tfm(&creq->req, ctx->chacha); 149 skcipher_request_set_tfm(&creq->req, ctx->chacha);
150 ablkcipher_request_set_crypt(&creq->req, src, dst, 150 skcipher_request_set_crypt(&creq->req, src, dst,
151 rctx->cryptlen, creq->iv); 151 rctx->cryptlen, creq->iv);
152 err = crypto_ablkcipher_decrypt(&creq->req); 152 err = crypto_skcipher_decrypt(&creq->req);
153 if (err) 153 if (err)
154 return err; 154 return err;
155 155
@@ -393,13 +393,13 @@ static int poly_genkey(struct aead_request *req)
393 393
394 chacha_iv(creq->iv, req, 0); 394 chacha_iv(creq->iv, req, 0);
395 395
396 ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), 396 skcipher_request_set_callback(&creq->req, aead_request_flags(req),
397 poly_genkey_done, req); 397 poly_genkey_done, req);
398 ablkcipher_request_set_tfm(&creq->req, ctx->chacha); 398 skcipher_request_set_tfm(&creq->req, ctx->chacha);
399 ablkcipher_request_set_crypt(&creq->req, creq->src, creq->src, 399 skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
400 POLY1305_KEY_SIZE, creq->iv); 400 POLY1305_KEY_SIZE, creq->iv);
401 401
402 err = crypto_ablkcipher_decrypt(&creq->req); 402 err = crypto_skcipher_decrypt(&creq->req);
403 if (err) 403 if (err)
404 return err; 404 return err;
405 405
@@ -433,12 +433,12 @@ static int chacha_encrypt(struct aead_request *req)
433 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); 433 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
434 } 434 }
435 435
436 ablkcipher_request_set_callback(&creq->req, aead_request_flags(req), 436 skcipher_request_set_callback(&creq->req, aead_request_flags(req),
437 chacha_encrypt_done, req); 437 chacha_encrypt_done, req);
438 ablkcipher_request_set_tfm(&creq->req, ctx->chacha); 438 skcipher_request_set_tfm(&creq->req, ctx->chacha);
439 ablkcipher_request_set_crypt(&creq->req, src, dst, 439 skcipher_request_set_crypt(&creq->req, src, dst,
440 req->cryptlen, creq->iv); 440 req->cryptlen, creq->iv);
441 err = crypto_ablkcipher_encrypt(&creq->req); 441 err = crypto_skcipher_encrypt(&creq->req);
442 if (err) 442 if (err)
443 return err; 443 return err;
444 444
@@ -500,13 +500,13 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
500 keylen -= ctx->saltlen; 500 keylen -= ctx->saltlen;
501 memcpy(ctx->salt, key + keylen, ctx->saltlen); 501 memcpy(ctx->salt, key + keylen, ctx->saltlen);
502 502
503 crypto_ablkcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK); 503 crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
504 crypto_ablkcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) & 504 crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
505 CRYPTO_TFM_REQ_MASK); 505 CRYPTO_TFM_REQ_MASK);
506 506
507 err = crypto_ablkcipher_setkey(ctx->chacha, key, keylen); 507 err = crypto_skcipher_setkey(ctx->chacha, key, keylen);
508 crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctx->chacha) & 508 crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctx->chacha) &
509 CRYPTO_TFM_RES_MASK); 509 CRYPTO_TFM_RES_MASK);
510 return err; 510 return err;
511} 511}
512 512
@@ -524,7 +524,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
524 struct aead_instance *inst = aead_alg_instance(tfm); 524 struct aead_instance *inst = aead_alg_instance(tfm);
525 struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst); 525 struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
526 struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); 526 struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
527 struct crypto_ablkcipher *chacha; 527 struct crypto_skcipher *chacha;
528 struct crypto_ahash *poly; 528 struct crypto_ahash *poly;
529 unsigned long align; 529 unsigned long align;
530 530
@@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
532 if (IS_ERR(poly)) 532 if (IS_ERR(poly))
533 return PTR_ERR(poly); 533 return PTR_ERR(poly);
534 534
535 chacha = crypto_spawn_skcipher(&ictx->chacha); 535 chacha = crypto_spawn_skcipher2(&ictx->chacha);
536 if (IS_ERR(chacha)) { 536 if (IS_ERR(chacha)) {
537 crypto_free_ahash(poly); 537 crypto_free_ahash(poly);
538 return PTR_ERR(chacha); 538 return PTR_ERR(chacha);
@@ -548,8 +548,8 @@ static int chachapoly_init(struct crypto_aead *tfm)
548 tfm, 548 tfm,
549 align + offsetof(struct chachapoly_req_ctx, u) + 549 align + offsetof(struct chachapoly_req_ctx, u) +
550 max(offsetof(struct chacha_req, req) + 550 max(offsetof(struct chacha_req, req) +
551 sizeof(struct ablkcipher_request) + 551 sizeof(struct skcipher_request) +
552 crypto_ablkcipher_reqsize(chacha), 552 crypto_skcipher_reqsize(chacha),
553 offsetof(struct poly_req, req) + 553 offsetof(struct poly_req, req) +
554 sizeof(struct ahash_request) + 554 sizeof(struct ahash_request) +
555 crypto_ahash_reqsize(poly))); 555 crypto_ahash_reqsize(poly)));
@@ -562,7 +562,7 @@ static void chachapoly_exit(struct crypto_aead *tfm)
562 struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); 562 struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
563 563
564 crypto_free_ahash(ctx->poly); 564 crypto_free_ahash(ctx->poly);
565 crypto_free_ablkcipher(ctx->chacha); 565 crypto_free_skcipher(ctx->chacha);
566} 566}
567 567
568static void chachapoly_free(struct aead_instance *inst) 568static void chachapoly_free(struct aead_instance *inst)
@@ -579,7 +579,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
579{ 579{
580 struct crypto_attr_type *algt; 580 struct crypto_attr_type *algt;
581 struct aead_instance *inst; 581 struct aead_instance *inst;
582 struct crypto_alg *chacha; 582 struct skcipher_alg *chacha;
583 struct crypto_alg *poly; 583 struct crypto_alg *poly;
584 struct hash_alg_common *poly_hash; 584 struct hash_alg_common *poly_hash;
585 struct chachapoly_instance_ctx *ctx; 585 struct chachapoly_instance_ctx *ctx;
@@ -605,7 +605,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
605 605
606 poly = crypto_find_alg(poly_name, &crypto_ahash_type, 606 poly = crypto_find_alg(poly_name, &crypto_ahash_type,
607 CRYPTO_ALG_TYPE_HASH, 607 CRYPTO_ALG_TYPE_HASH,
608 CRYPTO_ALG_TYPE_AHASH_MASK); 608 CRYPTO_ALG_TYPE_AHASH_MASK |
609 crypto_requires_sync(algt->type,
610 algt->mask));
609 if (IS_ERR(poly)) 611 if (IS_ERR(poly))
610 return PTR_ERR(poly); 612 return PTR_ERR(poly);
611 613
@@ -623,20 +625,20 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
623 goto err_free_inst; 625 goto err_free_inst;
624 626
625 crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst)); 627 crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
626 err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0, 628 err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0,
627 crypto_requires_sync(algt->type, 629 crypto_requires_sync(algt->type,
628 algt->mask)); 630 algt->mask));
629 if (err) 631 if (err)
630 goto err_drop_poly; 632 goto err_drop_poly;
631 633
632 chacha = crypto_skcipher_spawn_alg(&ctx->chacha); 634 chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
633 635
634 err = -EINVAL; 636 err = -EINVAL;
635 /* Need 16-byte IV size, including Initial Block Counter value */ 637 /* Need 16-byte IV size, including Initial Block Counter value */
636 if (chacha->cra_ablkcipher.ivsize != CHACHA20_IV_SIZE) 638 if (crypto_skcipher_alg_ivsize(chacha) != CHACHA20_IV_SIZE)
637 goto out_drop_chacha; 639 goto out_drop_chacha;
638 /* Not a stream cipher? */ 640 /* Not a stream cipher? */
639 if (chacha->cra_blocksize != 1) 641 if (chacha->base.cra_blocksize != 1)
640 goto out_drop_chacha; 642 goto out_drop_chacha;
641 643
642 err = -ENAMETOOLONG; 644 err = -ENAMETOOLONG;
@@ -645,20 +647,21 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
645 poly_name) >= CRYPTO_MAX_ALG_NAME) 647 poly_name) >= CRYPTO_MAX_ALG_NAME)
646 goto out_drop_chacha; 648 goto out_drop_chacha;
647 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 649 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
648 "%s(%s,%s)", name, chacha->cra_driver_name, 650 "%s(%s,%s)", name, chacha->base.cra_driver_name,
649 poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 651 poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
650 goto out_drop_chacha; 652 goto out_drop_chacha;
651 653
652 inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) & 654 inst->alg.base.cra_flags = (chacha->base.cra_flags | poly->cra_flags) &
653 CRYPTO_ALG_ASYNC; 655 CRYPTO_ALG_ASYNC;
654 inst->alg.base.cra_priority = (chacha->cra_priority + 656 inst->alg.base.cra_priority = (chacha->base.cra_priority +
655 poly->cra_priority) / 2; 657 poly->cra_priority) / 2;
656 inst->alg.base.cra_blocksize = 1; 658 inst->alg.base.cra_blocksize = 1;
657 inst->alg.base.cra_alignmask = chacha->cra_alignmask | 659 inst->alg.base.cra_alignmask = chacha->base.cra_alignmask |
658 poly->cra_alignmask; 660 poly->cra_alignmask;
659 inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + 661 inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
660 ctx->saltlen; 662 ctx->saltlen;
661 inst->alg.ivsize = ivsize; 663 inst->alg.ivsize = ivsize;
664 inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha);
662 inst->alg.maxauthsize = POLY1305_DIGEST_SIZE; 665 inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
663 inst->alg.init = chachapoly_init; 666 inst->alg.init = chachapoly_init;
664 inst->alg.exit = chachapoly_exit; 667 inst->alg.exit = chachapoly_exit;
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
deleted file mode 100644
index b4340018c8d4..000000000000
--- a/crypto/chainiv.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/*
2 * chainiv: Chain IV Generator
3 *
4 * Generate IVs simply be using the last block of the previous encryption.
5 * This is mainly useful for CBC with a synchronous algorithm.
6 *
7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/skcipher.h>
17#include <crypto/rng.h>
18#include <crypto/crypto_wq.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25#include <linux/workqueue.h>
26
27enum {
28 CHAINIV_STATE_INUSE = 0,
29};
30
31struct chainiv_ctx {
32 spinlock_t lock;
33 char iv[];
34};
35
36struct async_chainiv_ctx {
37 unsigned long state;
38
39 spinlock_t lock;
40 int err;
41
42 struct crypto_queue queue;
43 struct work_struct postponed;
44
45 char iv[];
46};
47
48static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
49{
50 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
51 struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
52 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
53 unsigned int ivsize;
54 int err;
55
56 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
57 ablkcipher_request_set_callback(subreq, req->creq.base.flags &
58 ~CRYPTO_TFM_REQ_MAY_SLEEP,
59 req->creq.base.complete,
60 req->creq.base.data);
61 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
62 req->creq.nbytes, req->creq.info);
63
64 spin_lock_bh(&ctx->lock);
65
66 ivsize = crypto_ablkcipher_ivsize(geniv);
67
68 memcpy(req->giv, ctx->iv, ivsize);
69 memcpy(subreq->info, ctx->iv, ivsize);
70
71 err = crypto_ablkcipher_encrypt(subreq);
72 if (err)
73 goto unlock;
74
75 memcpy(ctx->iv, subreq->info, ivsize);
76
77unlock:
78 spin_unlock_bh(&ctx->lock);
79
80 return err;
81}
82
83static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
84{
85 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
86 int err = 0;
87
88 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
89
90 if (iv) {
91 err = crypto_rng_get_bytes(crypto_default_rng, iv,
92 crypto_ablkcipher_ivsize(geniv));
93 crypto_put_default_rng();
94 }
95
96 return err ?: skcipher_geniv_init(tfm);
97}
98
99static int chainiv_init(struct crypto_tfm *tfm)
100{
101 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
102 struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
103 char *iv;
104
105 spin_lock_init(&ctx->lock);
106
107 iv = NULL;
108 if (!crypto_get_default_rng()) {
109 crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
110 iv = ctx->iv;
111 }
112
113 return chainiv_init_common(tfm, iv);
114}
115
116static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
117{
118 int queued;
119 int err = ctx->err;
120
121 if (!ctx->queue.qlen) {
122 smp_mb__before_atomic();
123 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
124
125 if (!ctx->queue.qlen ||
126 test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
127 goto out;
128 }
129
130 queued = queue_work(kcrypto_wq, &ctx->postponed);
131 BUG_ON(!queued);
132
133out:
134 return err;
135}
136
137static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
138{
139 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
140 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
141 int err;
142
143 spin_lock_bh(&ctx->lock);
144 err = skcipher_enqueue_givcrypt(&ctx->queue, req);
145 spin_unlock_bh(&ctx->lock);
146
147 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
148 return err;
149
150 ctx->err = err;
151 return async_chainiv_schedule_work(ctx);
152}
153
154static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
155{
156 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
157 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
158 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
159 unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
160
161 memcpy(req->giv, ctx->iv, ivsize);
162 memcpy(subreq->info, ctx->iv, ivsize);
163
164 ctx->err = crypto_ablkcipher_encrypt(subreq);
165 if (ctx->err)
166 goto out;
167
168 memcpy(ctx->iv, subreq->info, ivsize);
169
170out:
171 return async_chainiv_schedule_work(ctx);
172}
173
174static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
175{
176 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
177 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
178 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
179
180 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
181 ablkcipher_request_set_callback(subreq, req->creq.base.flags,
182 req->creq.base.complete,
183 req->creq.base.data);
184 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
185 req->creq.nbytes, req->creq.info);
186
187 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
188 goto postpone;
189
190 if (ctx->queue.qlen) {
191 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
192 goto postpone;
193 }
194
195 return async_chainiv_givencrypt_tail(req);
196
197postpone:
198 return async_chainiv_postpone_request(req);
199}
200
201static void async_chainiv_do_postponed(struct work_struct *work)
202{
203 struct async_chainiv_ctx *ctx = container_of(work,
204 struct async_chainiv_ctx,
205 postponed);
206 struct skcipher_givcrypt_request *req;
207 struct ablkcipher_request *subreq;
208 int err;
209
210 /* Only handle one request at a time to avoid hogging keventd. */
211 spin_lock_bh(&ctx->lock);
212 req = skcipher_dequeue_givcrypt(&ctx->queue);
213 spin_unlock_bh(&ctx->lock);
214
215 if (!req) {
216 async_chainiv_schedule_work(ctx);
217 return;
218 }
219
220 subreq = skcipher_givcrypt_reqctx(req);
221 subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
222
223 err = async_chainiv_givencrypt_tail(req);
224
225 local_bh_disable();
226 skcipher_givcrypt_complete(req, err);
227 local_bh_enable();
228}
229
230static int async_chainiv_init(struct crypto_tfm *tfm)
231{
232 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
233 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
234 char *iv;
235
236 spin_lock_init(&ctx->lock);
237
238 crypto_init_queue(&ctx->queue, 100);
239 INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
240
241 iv = NULL;
242 if (!crypto_get_default_rng()) {
243 crypto_ablkcipher_crt(geniv)->givencrypt =
244 async_chainiv_givencrypt;
245 iv = ctx->iv;
246 }
247
248 return chainiv_init_common(tfm, iv);
249}
250
251static void async_chainiv_exit(struct crypto_tfm *tfm)
252{
253 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
254
255 BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
256
257 skcipher_geniv_exit(tfm);
258}
259
260static struct crypto_template chainiv_tmpl;
261
262static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
263{
264 struct crypto_attr_type *algt;
265 struct crypto_instance *inst;
266
267 algt = crypto_get_attr_type(tb);
268 if (IS_ERR(algt))
269 return ERR_CAST(algt);
270
271 inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
272 if (IS_ERR(inst))
273 goto out;
274
275 inst->alg.cra_init = chainiv_init;
276 inst->alg.cra_exit = skcipher_geniv_exit;
277
278 inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
279
280 if (!crypto_requires_sync(algt->type, algt->mask)) {
281 inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
282
283 inst->alg.cra_init = async_chainiv_init;
284 inst->alg.cra_exit = async_chainiv_exit;
285
286 inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
287 }
288
289 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
290
291out:
292 return inst;
293}
294
295static struct crypto_template chainiv_tmpl = {
296 .name = "chainiv",
297 .alloc = chainiv_alloc,
298 .free = skcipher_geniv_free,
299 .module = THIS_MODULE,
300};
301
302static int __init chainiv_module_init(void)
303{
304 return crypto_register_template(&chainiv_tmpl);
305}
306
307static void chainiv_module_exit(void)
308{
309 crypto_unregister_template(&chainiv_tmpl);
310}
311
312module_init(chainiv_module_init);
313module_exit(chainiv_module_exit);
314
315MODULE_LICENSE("GPL");
316MODULE_DESCRIPTION("Chain IV Generator");
317MODULE_ALIAS_CRYPTO("chainiv");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 7921251cdb13..cf8037a87b2d 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -22,6 +22,7 @@
22#include <crypto/internal/aead.h> 22#include <crypto/internal/aead.h>
23#include <crypto/cryptd.h> 23#include <crypto/cryptd.h>
24#include <crypto/crypto_wq.h> 24#include <crypto/crypto_wq.h>
25#include <linux/atomic.h>
25#include <linux/err.h> 26#include <linux/err.h>
26#include <linux/init.h> 27#include <linux/init.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -31,7 +32,7 @@
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/slab.h> 33#include <linux/slab.h>
33 34
34#define CRYPTD_MAX_CPU_QLEN 100 35#define CRYPTD_MAX_CPU_QLEN 1000
35 36
36struct cryptd_cpu_queue { 37struct cryptd_cpu_queue {
37 struct crypto_queue queue; 38 struct crypto_queue queue;
@@ -58,6 +59,7 @@ struct aead_instance_ctx {
58}; 59};
59 60
60struct cryptd_blkcipher_ctx { 61struct cryptd_blkcipher_ctx {
62 atomic_t refcnt;
61 struct crypto_blkcipher *child; 63 struct crypto_blkcipher *child;
62}; 64};
63 65
@@ -66,6 +68,7 @@ struct cryptd_blkcipher_request_ctx {
66}; 68};
67 69
68struct cryptd_hash_ctx { 70struct cryptd_hash_ctx {
71 atomic_t refcnt;
69 struct crypto_shash *child; 72 struct crypto_shash *child;
70}; 73};
71 74
@@ -75,6 +78,7 @@ struct cryptd_hash_request_ctx {
75}; 78};
76 79
77struct cryptd_aead_ctx { 80struct cryptd_aead_ctx {
81 atomic_t refcnt;
78 struct crypto_aead *child; 82 struct crypto_aead *child;
79}; 83};
80 84
@@ -118,11 +122,29 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
118{ 122{
119 int cpu, err; 123 int cpu, err;
120 struct cryptd_cpu_queue *cpu_queue; 124 struct cryptd_cpu_queue *cpu_queue;
125 struct crypto_tfm *tfm;
126 atomic_t *refcnt;
127 bool may_backlog;
121 128
122 cpu = get_cpu(); 129 cpu = get_cpu();
123 cpu_queue = this_cpu_ptr(queue->cpu_queue); 130 cpu_queue = this_cpu_ptr(queue->cpu_queue);
124 err = crypto_enqueue_request(&cpu_queue->queue, request); 131 err = crypto_enqueue_request(&cpu_queue->queue, request);
132
133 refcnt = crypto_tfm_ctx(request->tfm);
134 may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
135
136 if (err == -EBUSY && !may_backlog)
137 goto out_put_cpu;
138
125 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); 139 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
140
141 if (!atomic_read(refcnt))
142 goto out_put_cpu;
143
144 tfm = request->tfm;
145 atomic_inc(refcnt);
146
147out_put_cpu:
126 put_cpu(); 148 put_cpu();
127 149
128 return err; 150 return err;
@@ -206,7 +228,10 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
206 unsigned int len)) 228 unsigned int len))
207{ 229{
208 struct cryptd_blkcipher_request_ctx *rctx; 230 struct cryptd_blkcipher_request_ctx *rctx;
231 struct cryptd_blkcipher_ctx *ctx;
232 struct crypto_ablkcipher *tfm;
209 struct blkcipher_desc desc; 233 struct blkcipher_desc desc;
234 int refcnt;
210 235
211 rctx = ablkcipher_request_ctx(req); 236 rctx = ablkcipher_request_ctx(req);
212 237
@@ -222,9 +247,16 @@ static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
222 req->base.complete = rctx->complete; 247 req->base.complete = rctx->complete;
223 248
224out: 249out:
250 tfm = crypto_ablkcipher_reqtfm(req);
251 ctx = crypto_ablkcipher_ctx(tfm);
252 refcnt = atomic_read(&ctx->refcnt);
253
225 local_bh_disable(); 254 local_bh_disable();
226 rctx->complete(&req->base, err); 255 rctx->complete(&req->base, err);
227 local_bh_enable(); 256 local_bh_enable();
257
258 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
259 crypto_free_ablkcipher(tfm);
228} 260}
229 261
230static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) 262static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
@@ -456,6 +488,21 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
456 return cryptd_enqueue_request(queue, &req->base); 488 return cryptd_enqueue_request(queue, &req->base);
457} 489}
458 490
491static void cryptd_hash_complete(struct ahash_request *req, int err)
492{
493 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
494 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
495 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
496 int refcnt = atomic_read(&ctx->refcnt);
497
498 local_bh_disable();
499 rctx->complete(&req->base, err);
500 local_bh_enable();
501
502 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
503 crypto_free_ahash(tfm);
504}
505
459static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 506static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
460{ 507{
461 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 508 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
@@ -475,9 +522,7 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
475 req->base.complete = rctx->complete; 522 req->base.complete = rctx->complete;
476 523
477out: 524out:
478 local_bh_disable(); 525 cryptd_hash_complete(req, err);
479 rctx->complete(&req->base, err);
480 local_bh_enable();
481} 526}
482 527
483static int cryptd_hash_init_enqueue(struct ahash_request *req) 528static int cryptd_hash_init_enqueue(struct ahash_request *req)
@@ -500,9 +545,7 @@ static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
500 req->base.complete = rctx->complete; 545 req->base.complete = rctx->complete;
501 546
502out: 547out:
503 local_bh_disable(); 548 cryptd_hash_complete(req, err);
504 rctx->complete(&req->base, err);
505 local_bh_enable();
506} 549}
507 550
508static int cryptd_hash_update_enqueue(struct ahash_request *req) 551static int cryptd_hash_update_enqueue(struct ahash_request *req)
@@ -523,9 +566,7 @@ static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
523 req->base.complete = rctx->complete; 566 req->base.complete = rctx->complete;
524 567
525out: 568out:
526 local_bh_disable(); 569 cryptd_hash_complete(req, err);
527 rctx->complete(&req->base, err);
528 local_bh_enable();
529} 570}
530 571
531static int cryptd_hash_final_enqueue(struct ahash_request *req) 572static int cryptd_hash_final_enqueue(struct ahash_request *req)
@@ -546,9 +587,7 @@ static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
546 req->base.complete = rctx->complete; 587 req->base.complete = rctx->complete;
547 588
548out: 589out:
549 local_bh_disable(); 590 cryptd_hash_complete(req, err);
550 rctx->complete(&req->base, err);
551 local_bh_enable();
552} 591}
553 592
554static int cryptd_hash_finup_enqueue(struct ahash_request *req) 593static int cryptd_hash_finup_enqueue(struct ahash_request *req)
@@ -575,9 +614,7 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
575 req->base.complete = rctx->complete; 614 req->base.complete = rctx->complete;
576 615
577out: 616out:
578 local_bh_disable(); 617 cryptd_hash_complete(req, err);
579 rctx->complete(&req->base, err);
580 local_bh_enable();
581} 618}
582 619
583static int cryptd_hash_digest_enqueue(struct ahash_request *req) 620static int cryptd_hash_digest_enqueue(struct ahash_request *req)
@@ -688,7 +725,10 @@ static void cryptd_aead_crypt(struct aead_request *req,
688 int (*crypt)(struct aead_request *req)) 725 int (*crypt)(struct aead_request *req))
689{ 726{
690 struct cryptd_aead_request_ctx *rctx; 727 struct cryptd_aead_request_ctx *rctx;
728 struct cryptd_aead_ctx *ctx;
691 crypto_completion_t compl; 729 crypto_completion_t compl;
730 struct crypto_aead *tfm;
731 int refcnt;
692 732
693 rctx = aead_request_ctx(req); 733 rctx = aead_request_ctx(req);
694 compl = rctx->complete; 734 compl = rctx->complete;
@@ -697,10 +737,18 @@ static void cryptd_aead_crypt(struct aead_request *req,
697 goto out; 737 goto out;
698 aead_request_set_tfm(req, child); 738 aead_request_set_tfm(req, child);
699 err = crypt( req ); 739 err = crypt( req );
740
700out: 741out:
742 tfm = crypto_aead_reqtfm(req);
743 ctx = crypto_aead_ctx(tfm);
744 refcnt = atomic_read(&ctx->refcnt);
745
701 local_bh_disable(); 746 local_bh_disable();
702 compl(&req->base, err); 747 compl(&req->base, err);
703 local_bh_enable(); 748 local_bh_enable();
749
750 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
751 crypto_free_aead(tfm);
704} 752}
705 753
706static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 754static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
@@ -883,6 +931,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
883 u32 type, u32 mask) 931 u32 type, u32 mask)
884{ 932{
885 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 933 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
934 struct cryptd_blkcipher_ctx *ctx;
886 struct crypto_tfm *tfm; 935 struct crypto_tfm *tfm;
887 936
888 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 937 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
@@ -899,6 +948,9 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
899 return ERR_PTR(-EINVAL); 948 return ERR_PTR(-EINVAL);
900 } 949 }
901 950
951 ctx = crypto_tfm_ctx(tfm);
952 atomic_set(&ctx->refcnt, 1);
953
902 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); 954 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
903} 955}
904EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); 956EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
@@ -910,9 +962,20 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
910} 962}
911EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); 963EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
912 964
965bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
966{
967 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
968
969 return atomic_read(&ctx->refcnt) - 1;
970}
971EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
972
913void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) 973void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
914{ 974{
915 crypto_free_ablkcipher(&tfm->base); 975 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
976
977 if (atomic_dec_and_test(&ctx->refcnt))
978 crypto_free_ablkcipher(&tfm->base);
916} 979}
917EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); 980EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
918 981
@@ -920,6 +983,7 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
920 u32 type, u32 mask) 983 u32 type, u32 mask)
921{ 984{
922 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 985 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
986 struct cryptd_hash_ctx *ctx;
923 struct crypto_ahash *tfm; 987 struct crypto_ahash *tfm;
924 988
925 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 989 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
@@ -933,6 +997,9 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
933 return ERR_PTR(-EINVAL); 997 return ERR_PTR(-EINVAL);
934 } 998 }
935 999
1000 ctx = crypto_ahash_ctx(tfm);
1001 atomic_set(&ctx->refcnt, 1);
1002
936 return __cryptd_ahash_cast(tfm); 1003 return __cryptd_ahash_cast(tfm);
937} 1004}
938EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); 1005EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
@@ -952,9 +1019,20 @@ struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
952} 1019}
953EXPORT_SYMBOL_GPL(cryptd_shash_desc); 1020EXPORT_SYMBOL_GPL(cryptd_shash_desc);
954 1021
1022bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1023{
1024 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1025
1026 return atomic_read(&ctx->refcnt) - 1;
1027}
1028EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1029
955void cryptd_free_ahash(struct cryptd_ahash *tfm) 1030void cryptd_free_ahash(struct cryptd_ahash *tfm)
956{ 1031{
957 crypto_free_ahash(&tfm->base); 1032 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1033
1034 if (atomic_dec_and_test(&ctx->refcnt))
1035 crypto_free_ahash(&tfm->base);
958} 1036}
959EXPORT_SYMBOL_GPL(cryptd_free_ahash); 1037EXPORT_SYMBOL_GPL(cryptd_free_ahash);
960 1038
@@ -962,6 +1040,7 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
962 u32 type, u32 mask) 1040 u32 type, u32 mask)
963{ 1041{
964 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 1042 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1043 struct cryptd_aead_ctx *ctx;
965 struct crypto_aead *tfm; 1044 struct crypto_aead *tfm;
966 1045
967 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 1046 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
@@ -974,6 +1053,10 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
974 crypto_free_aead(tfm); 1053 crypto_free_aead(tfm);
975 return ERR_PTR(-EINVAL); 1054 return ERR_PTR(-EINVAL);
976 } 1055 }
1056
1057 ctx = crypto_aead_ctx(tfm);
1058 atomic_set(&ctx->refcnt, 1);
1059
977 return __cryptd_aead_cast(tfm); 1060 return __cryptd_aead_cast(tfm);
978} 1061}
979EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 1062EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
@@ -986,9 +1069,20 @@ struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
986} 1069}
987EXPORT_SYMBOL_GPL(cryptd_aead_child); 1070EXPORT_SYMBOL_GPL(cryptd_aead_child);
988 1071
1072bool cryptd_aead_queued(struct cryptd_aead *tfm)
1073{
1074 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1075
1076 return atomic_read(&ctx->refcnt) - 1;
1077}
1078EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1079
989void cryptd_free_aead(struct cryptd_aead *tfm) 1080void cryptd_free_aead(struct cryptd_aead *tfm)
990{ 1081{
991 crypto_free_aead(&tfm->base); 1082 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1083
1084 if (atomic_dec_and_test(&ctx->refcnt))
1085 crypto_free_aead(&tfm->base);
992} 1086}
993EXPORT_SYMBOL_GPL(cryptd_free_aead); 1087EXPORT_SYMBOL_GPL(cryptd_free_aead);
994 1088
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 941c9a434d50..20ff2c746e0b 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -26,7 +26,7 @@
26#include <linux/string.h> 26#include <linux/string.h>
27 27
28static DEFINE_MUTEX(crypto_default_null_skcipher_lock); 28static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
29static struct crypto_blkcipher *crypto_default_null_skcipher; 29static struct crypto_skcipher *crypto_default_null_skcipher;
30static int crypto_default_null_skcipher_refcnt; 30static int crypto_default_null_skcipher_refcnt;
31 31
32static int null_compress(struct crypto_tfm *tfm, const u8 *src, 32static int null_compress(struct crypto_tfm *tfm, const u8 *src,
@@ -153,15 +153,16 @@ MODULE_ALIAS_CRYPTO("compress_null");
153MODULE_ALIAS_CRYPTO("digest_null"); 153MODULE_ALIAS_CRYPTO("digest_null");
154MODULE_ALIAS_CRYPTO("cipher_null"); 154MODULE_ALIAS_CRYPTO("cipher_null");
155 155
156struct crypto_blkcipher *crypto_get_default_null_skcipher(void) 156struct crypto_skcipher *crypto_get_default_null_skcipher(void)
157{ 157{
158 struct crypto_blkcipher *tfm; 158 struct crypto_skcipher *tfm;
159 159
160 mutex_lock(&crypto_default_null_skcipher_lock); 160 mutex_lock(&crypto_default_null_skcipher_lock);
161 tfm = crypto_default_null_skcipher; 161 tfm = crypto_default_null_skcipher;
162 162
163 if (!tfm) { 163 if (!tfm) {
164 tfm = crypto_alloc_blkcipher("ecb(cipher_null)", 0, 0); 164 tfm = crypto_alloc_skcipher("ecb(cipher_null)",
165 0, CRYPTO_ALG_ASYNC);
165 if (IS_ERR(tfm)) 166 if (IS_ERR(tfm))
166 goto unlock; 167 goto unlock;
167 168
@@ -181,7 +182,7 @@ void crypto_put_default_null_skcipher(void)
181{ 182{
182 mutex_lock(&crypto_default_null_skcipher_lock); 183 mutex_lock(&crypto_default_null_skcipher_lock);
183 if (!--crypto_default_null_skcipher_refcnt) { 184 if (!--crypto_default_null_skcipher_refcnt) {
184 crypto_free_blkcipher(crypto_default_null_skcipher); 185 crypto_free_skcipher(crypto_default_null_skcipher);
185 crypto_default_null_skcipher = NULL; 186 crypto_default_null_skcipher = NULL;
186 } 187 }
187 mutex_unlock(&crypto_default_null_skcipher_lock); 188 mutex_unlock(&crypto_default_null_skcipher_lock);
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 7097a3395b25..1c5705481c69 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -28,6 +28,7 @@
28#include <crypto/internal/skcipher.h> 28#include <crypto/internal/skcipher.h>
29#include <crypto/internal/rng.h> 29#include <crypto/internal/rng.h>
30#include <crypto/akcipher.h> 30#include <crypto/akcipher.h>
31#include <crypto/kpp.h>
31 32
32#include "internal.h" 33#include "internal.h"
33 34
@@ -126,6 +127,21 @@ nla_put_failure:
126 return -EMSGSIZE; 127 return -EMSGSIZE;
127} 128}
128 129
130static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
131{
132 struct crypto_report_kpp rkpp;
133
134 strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
135
136 if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
137 sizeof(struct crypto_report_kpp), &rkpp))
138 goto nla_put_failure;
139 return 0;
140
141nla_put_failure:
142 return -EMSGSIZE;
143}
144
129static int crypto_report_one(struct crypto_alg *alg, 145static int crypto_report_one(struct crypto_alg *alg,
130 struct crypto_user_alg *ualg, struct sk_buff *skb) 146 struct crypto_user_alg *ualg, struct sk_buff *skb)
131{ 147{
@@ -176,6 +192,10 @@ static int crypto_report_one(struct crypto_alg *alg,
176 goto nla_put_failure; 192 goto nla_put_failure;
177 193
178 break; 194 break;
195 case CRYPTO_ALG_TYPE_KPP:
196 if (crypto_report_kpp(skb, alg))
197 goto nla_put_failure;
198 break;
179 } 199 }
180 200
181out: 201out:
@@ -358,32 +378,6 @@ drop_alg:
358 return err; 378 return err;
359} 379}
360 380
361static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
362 u32 mask)
363{
364 int err;
365 struct crypto_alg *alg;
366
367 type = crypto_skcipher_type(type);
368 mask = crypto_skcipher_mask(mask);
369
370 for (;;) {
371 alg = crypto_lookup_skcipher(name, type, mask);
372 if (!IS_ERR(alg))
373 return alg;
374
375 err = PTR_ERR(alg);
376 if (err != -EAGAIN)
377 break;
378 if (fatal_signal_pending(current)) {
379 err = -EINTR;
380 break;
381 }
382 }
383
384 return ERR_PTR(err);
385}
386
387static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh, 381static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
388 struct nlattr **attrs) 382 struct nlattr **attrs)
389{ 383{
@@ -416,16 +410,7 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
416 else 410 else
417 name = p->cru_name; 411 name = p->cru_name;
418 412
419 switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) { 413 alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
420 case CRYPTO_ALG_TYPE_GIVCIPHER:
421 case CRYPTO_ALG_TYPE_BLKCIPHER:
422 case CRYPTO_ALG_TYPE_ABLKCIPHER:
423 alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
424 break;
425 default:
426 alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
427 }
428
429 if (IS_ERR(alg)) 414 if (IS_ERR(alg))
430 return PTR_ERR(alg); 415 return PTR_ERR(alg);
431 416
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 2386f7313952..ff4d21eddb83 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -26,13 +26,13 @@ struct crypto_ctr_ctx {
26}; 26};
27 27
28struct crypto_rfc3686_ctx { 28struct crypto_rfc3686_ctx {
29 struct crypto_ablkcipher *child; 29 struct crypto_skcipher *child;
30 u8 nonce[CTR_RFC3686_NONCE_SIZE]; 30 u8 nonce[CTR_RFC3686_NONCE_SIZE];
31}; 31};
32 32
33struct crypto_rfc3686_req_ctx { 33struct crypto_rfc3686_req_ctx {
34 u8 iv[CTR_RFC3686_BLOCK_SIZE]; 34 u8 iv[CTR_RFC3686_BLOCK_SIZE];
35 struct ablkcipher_request subreq CRYPTO_MINALIGN_ATTR; 35 struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
36}; 36};
37 37
38static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, 38static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
@@ -249,11 +249,11 @@ static struct crypto_template crypto_ctr_tmpl = {
249 .module = THIS_MODULE, 249 .module = THIS_MODULE,
250}; 250};
251 251
252static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent, 252static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
253 const u8 *key, unsigned int keylen) 253 const u8 *key, unsigned int keylen)
254{ 254{
255 struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(parent); 255 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent);
256 struct crypto_ablkcipher *child = ctx->child; 256 struct crypto_skcipher *child = ctx->child;
257 int err; 257 int err;
258 258
259 /* the nonce is stored in bytes at end of key */ 259 /* the nonce is stored in bytes at end of key */
@@ -265,173 +265,178 @@ static int crypto_rfc3686_setkey(struct crypto_ablkcipher *parent,
265 265
266 keylen -= CTR_RFC3686_NONCE_SIZE; 266 keylen -= CTR_RFC3686_NONCE_SIZE;
267 267
268 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 268 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
269 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & 269 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
270 CRYPTO_TFM_REQ_MASK); 270 CRYPTO_TFM_REQ_MASK);
271 err = crypto_ablkcipher_setkey(child, key, keylen); 271 err = crypto_skcipher_setkey(child, key, keylen);
272 crypto_ablkcipher_set_flags(parent, crypto_ablkcipher_get_flags(child) & 272 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
273 CRYPTO_TFM_RES_MASK); 273 CRYPTO_TFM_RES_MASK);
274 274
275 return err; 275 return err;
276} 276}
277 277
278static int crypto_rfc3686_crypt(struct ablkcipher_request *req) 278static int crypto_rfc3686_crypt(struct skcipher_request *req)
279{ 279{
280 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 280 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
281 struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); 281 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
282 struct crypto_ablkcipher *child = ctx->child; 282 struct crypto_skcipher *child = ctx->child;
283 unsigned long align = crypto_ablkcipher_alignmask(tfm); 283 unsigned long align = crypto_skcipher_alignmask(tfm);
284 struct crypto_rfc3686_req_ctx *rctx = 284 struct crypto_rfc3686_req_ctx *rctx =
285 (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); 285 (void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1);
286 struct ablkcipher_request *subreq = &rctx->subreq; 286 struct skcipher_request *subreq = &rctx->subreq;
287 u8 *iv = rctx->iv; 287 u8 *iv = rctx->iv;
288 288
289 /* set up counter block */ 289 /* set up counter block */
290 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); 290 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
291 memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); 291 memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
292 292
293 /* initialize counter portion of counter block */ 293 /* initialize counter portion of counter block */
294 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = 294 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
295 cpu_to_be32(1); 295 cpu_to_be32(1);
296 296
297 ablkcipher_request_set_tfm(subreq, child); 297 skcipher_request_set_tfm(subreq, child);
298 ablkcipher_request_set_callback(subreq, req->base.flags, 298 skcipher_request_set_callback(subreq, req->base.flags,
299 req->base.complete, req->base.data); 299 req->base.complete, req->base.data);
300 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, 300 skcipher_request_set_crypt(subreq, req->src, req->dst,
301 iv); 301 req->cryptlen, iv);
302 302
303 return crypto_ablkcipher_encrypt(subreq); 303 return crypto_skcipher_encrypt(subreq);
304} 304}
305 305
306static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm) 306static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
307{ 307{
308 struct crypto_instance *inst = (void *)tfm->__crt_alg; 308 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
309 struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst); 309 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
310 struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); 310 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
311 struct crypto_ablkcipher *cipher; 311 struct crypto_skcipher *cipher;
312 unsigned long align; 312 unsigned long align;
313 unsigned int reqsize;
313 314
314 cipher = crypto_spawn_skcipher(spawn); 315 cipher = crypto_spawn_skcipher2(spawn);
315 if (IS_ERR(cipher)) 316 if (IS_ERR(cipher))
316 return PTR_ERR(cipher); 317 return PTR_ERR(cipher);
317 318
318 ctx->child = cipher; 319 ctx->child = cipher;
319 320
320 align = crypto_tfm_alg_alignmask(tfm); 321 align = crypto_skcipher_alignmask(tfm);
321 align &= ~(crypto_tfm_ctx_alignment() - 1); 322 align &= ~(crypto_tfm_ctx_alignment() - 1);
322 tfm->crt_ablkcipher.reqsize = align + 323 reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) +
323 sizeof(struct crypto_rfc3686_req_ctx) + 324 crypto_skcipher_reqsize(cipher);
324 crypto_ablkcipher_reqsize(cipher); 325 crypto_skcipher_set_reqsize(tfm, reqsize);
325 326
326 return 0; 327 return 0;
327} 328}
328 329
329static void crypto_rfc3686_exit_tfm(struct crypto_tfm *tfm) 330static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm)
330{ 331{
331 struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm); 332 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
333
334 crypto_free_skcipher(ctx->child);
335}
332 336
333 crypto_free_ablkcipher(ctx->child); 337static void crypto_rfc3686_free(struct skcipher_instance *inst)
338{
339 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
340
341 crypto_drop_skcipher(spawn);
342 kfree(inst);
334} 343}
335 344
336static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) 345static int crypto_rfc3686_create(struct crypto_template *tmpl,
346 struct rtattr **tb)
337{ 347{
338 struct crypto_attr_type *algt; 348 struct crypto_attr_type *algt;
339 struct crypto_instance *inst; 349 struct skcipher_instance *inst;
340 struct crypto_alg *alg; 350 struct skcipher_alg *alg;
341 struct crypto_skcipher_spawn *spawn; 351 struct crypto_skcipher_spawn *spawn;
342 const char *cipher_name; 352 const char *cipher_name;
343 int err; 353 int err;
344 354
345 algt = crypto_get_attr_type(tb); 355 algt = crypto_get_attr_type(tb);
346 if (IS_ERR(algt)) 356 if (IS_ERR(algt))
347 return ERR_CAST(algt); 357 return PTR_ERR(algt);
348 358
349 if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask) 359 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
350 return ERR_PTR(-EINVAL); 360 return -EINVAL;
351 361
352 cipher_name = crypto_attr_alg_name(tb[1]); 362 cipher_name = crypto_attr_alg_name(tb[1]);
353 if (IS_ERR(cipher_name)) 363 if (IS_ERR(cipher_name))
354 return ERR_CAST(cipher_name); 364 return PTR_ERR(cipher_name);
355 365
356 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 366 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
357 if (!inst) 367 if (!inst)
358 return ERR_PTR(-ENOMEM); 368 return -ENOMEM;
359 369
360 spawn = crypto_instance_ctx(inst); 370 spawn = skcipher_instance_ctx(inst);
361 371
362 crypto_set_skcipher_spawn(spawn, inst); 372 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
363 err = crypto_grab_skcipher(spawn, cipher_name, 0, 373 err = crypto_grab_skcipher2(spawn, cipher_name, 0,
364 crypto_requires_sync(algt->type, 374 crypto_requires_sync(algt->type,
365 algt->mask)); 375 algt->mask));
366 if (err) 376 if (err)
367 goto err_free_inst; 377 goto err_free_inst;
368 378
369 alg = crypto_skcipher_spawn_alg(spawn); 379 alg = crypto_spawn_skcipher_alg(spawn);
370 380
371 /* We only support 16-byte blocks. */ 381 /* We only support 16-byte blocks. */
372 err = -EINVAL; 382 err = -EINVAL;
373 if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) 383 if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
374 goto err_drop_spawn; 384 goto err_drop_spawn;
375 385
376 /* Not a stream cipher? */ 386 /* Not a stream cipher? */
377 if (alg->cra_blocksize != 1) 387 if (alg->base.cra_blocksize != 1)
378 goto err_drop_spawn; 388 goto err_drop_spawn;
379 389
380 err = -ENAMETOOLONG; 390 err = -ENAMETOOLONG;
381 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", 391 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
382 alg->cra_name) >= CRYPTO_MAX_ALG_NAME) 392 "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
383 goto err_drop_spawn; 393 goto err_drop_spawn;
384 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 394 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
385 "rfc3686(%s)", alg->cra_driver_name) >= 395 "rfc3686(%s)", alg->base.cra_driver_name) >=
386 CRYPTO_MAX_ALG_NAME) 396 CRYPTO_MAX_ALG_NAME)
387 goto err_drop_spawn; 397 goto err_drop_spawn;
388 398
389 inst->alg.cra_priority = alg->cra_priority; 399 inst->alg.base.cra_priority = alg->base.cra_priority;
390 inst->alg.cra_blocksize = 1; 400 inst->alg.base.cra_blocksize = 1;
391 inst->alg.cra_alignmask = alg->cra_alignmask; 401 inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
392 402
393 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 403 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
394 (alg->cra_flags & CRYPTO_ALG_ASYNC);
395 inst->alg.cra_type = &crypto_ablkcipher_type;
396 404
397 inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE; 405 inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
398 inst->alg.cra_ablkcipher.min_keysize = 406 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
399 alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE; 407 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
400 inst->alg.cra_ablkcipher.max_keysize = 408 CTR_RFC3686_NONCE_SIZE;
401 alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE; 409 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
410 CTR_RFC3686_NONCE_SIZE;
402 411
403 inst->alg.cra_ablkcipher.geniv = "seqiv"; 412 inst->alg.setkey = crypto_rfc3686_setkey;
413 inst->alg.encrypt = crypto_rfc3686_crypt;
414 inst->alg.decrypt = crypto_rfc3686_crypt;
404 415
405 inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey; 416 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
406 inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt;
407 inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt;
408 417
409 inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); 418 inst->alg.init = crypto_rfc3686_init_tfm;
419 inst->alg.exit = crypto_rfc3686_exit_tfm;
410 420
411 inst->alg.cra_init = crypto_rfc3686_init_tfm; 421 inst->free = crypto_rfc3686_free;
412 inst->alg.cra_exit = crypto_rfc3686_exit_tfm;
413 422
414 return inst; 423 err = skcipher_register_instance(tmpl, inst);
424 if (err)
425 goto err_drop_spawn;
426
427out:
428 return err;
415 429
416err_drop_spawn: 430err_drop_spawn:
417 crypto_drop_skcipher(spawn); 431 crypto_drop_skcipher(spawn);
418err_free_inst: 432err_free_inst:
419 kfree(inst); 433 kfree(inst);
420 return ERR_PTR(err); 434 goto out;
421}
422
423static void crypto_rfc3686_free(struct crypto_instance *inst)
424{
425 struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst);
426
427 crypto_drop_skcipher(spawn);
428 kfree(inst);
429} 435}
430 436
431static struct crypto_template crypto_rfc3686_tmpl = { 437static struct crypto_template crypto_rfc3686_tmpl = {
432 .name = "rfc3686", 438 .name = "rfc3686",
433 .alloc = crypto_rfc3686_alloc, 439 .create = crypto_rfc3686_create,
434 .free = crypto_rfc3686_free,
435 .module = THIS_MODULE, 440 .module = THIS_MODULE,
436}; 441};
437 442
diff --git a/crypto/cts.c b/crypto/cts.c
index e467ec0acf9f..51976187b2bf 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -40,7 +40,7 @@
40 * rfc3962 includes errata information in its Appendix A. 40 * rfc3962 includes errata information in its Appendix A.
41 */ 41 */
42 42
43#include <crypto/algapi.h> 43#include <crypto/internal/skcipher.h>
44#include <linux/err.h> 44#include <linux/err.h>
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/kernel.h> 46#include <linux/kernel.h>
@@ -51,289 +51,364 @@
51#include <linux/slab.h> 51#include <linux/slab.h>
52 52
53struct crypto_cts_ctx { 53struct crypto_cts_ctx {
54 struct crypto_blkcipher *child; 54 struct crypto_skcipher *child;
55}; 55};
56 56
57static int crypto_cts_setkey(struct crypto_tfm *parent, const u8 *key, 57struct crypto_cts_reqctx {
58 unsigned int keylen) 58 struct scatterlist sg[2];
59 unsigned offset;
60 struct skcipher_request subreq;
61};
62
63static inline u8 *crypto_cts_reqctx_space(struct skcipher_request *req)
59{ 64{
60 struct crypto_cts_ctx *ctx = crypto_tfm_ctx(parent); 65 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
61 struct crypto_blkcipher *child = ctx->child; 66 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
62 int err; 67 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
68 struct crypto_skcipher *child = ctx->child;
63 69
64 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 70 return PTR_ALIGN((u8 *)(rctx + 1) + crypto_skcipher_reqsize(child),
65 crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & 71 crypto_skcipher_alignmask(tfm) + 1);
66 CRYPTO_TFM_REQ_MASK);
67 err = crypto_blkcipher_setkey(child, key, keylen);
68 crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
69 CRYPTO_TFM_RES_MASK);
70 return err;
71} 72}
72 73
73static int cts_cbc_encrypt(struct crypto_cts_ctx *ctx, 74static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key,
74 struct blkcipher_desc *desc, 75 unsigned int keylen)
75 struct scatterlist *dst,
76 struct scatterlist *src,
77 unsigned int offset,
78 unsigned int nbytes)
79{ 76{
80 int bsize = crypto_blkcipher_blocksize(desc->tfm); 77 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(parent);
81 u8 tmp[bsize], tmp2[bsize]; 78 struct crypto_skcipher *child = ctx->child;
82 struct blkcipher_desc lcldesc;
83 struct scatterlist sgsrc[1], sgdst[1];
84 int lastn = nbytes - bsize;
85 u8 iv[bsize];
86 u8 s[bsize * 2], d[bsize * 2];
87 int err; 79 int err;
88 80
89 if (lastn < 0) 81 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
90 return -EINVAL; 82 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
83 CRYPTO_TFM_REQ_MASK);
84 err = crypto_skcipher_setkey(child, key, keylen);
85 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
86 CRYPTO_TFM_RES_MASK);
87 return err;
88}
91 89
92 sg_init_table(sgsrc, 1); 90static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err)
93 sg_init_table(sgdst, 1); 91{
92 struct skcipher_request *req = areq->data;
94 93
95 memset(s, 0, sizeof(s)); 94 if (err == -EINPROGRESS)
96 scatterwalk_map_and_copy(s, src, offset, nbytes, 0); 95 return;
97 96
98 memcpy(iv, desc->info, bsize); 97 skcipher_request_complete(req, err);
98}
99 99
100 lcldesc.tfm = ctx->child; 100static int cts_cbc_encrypt(struct skcipher_request *req)
101 lcldesc.info = iv; 101{
102 lcldesc.flags = desc->flags; 102 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
103 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
104 struct skcipher_request *subreq = &rctx->subreq;
105 int bsize = crypto_skcipher_blocksize(tfm);
106 u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32))));
107 struct scatterlist *sg;
108 unsigned int offset;
109 int lastn;
110
111 offset = rctx->offset;
112 lastn = req->cryptlen - offset;
113
114 sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
115 scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
116
117 memset(d, 0, bsize);
118 scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
119
120 scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
121 memzero_explicit(d, sizeof(d));
122
123 skcipher_request_set_callback(subreq, req->base.flags &
124 CRYPTO_TFM_REQ_MAY_BACKLOG,
125 cts_cbc_crypt_done, req);
126 skcipher_request_set_crypt(subreq, sg, sg, bsize, req->iv);
127 return crypto_skcipher_encrypt(subreq);
128}
103 129
104 sg_set_buf(&sgsrc[0], s, bsize); 130static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
105 sg_set_buf(&sgdst[0], tmp, bsize); 131{
106 err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); 132 struct skcipher_request *req = areq->data;
107 133
108 memcpy(d + bsize, tmp, lastn); 134 if (err)
135 goto out;
109 136
110 lcldesc.info = tmp; 137 err = cts_cbc_encrypt(req);
138 if (err == -EINPROGRESS ||
139 (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
140 return;
111 141
112 sg_set_buf(&sgsrc[0], s + bsize, bsize); 142out:
113 sg_set_buf(&sgdst[0], tmp2, bsize); 143 skcipher_request_complete(req, err);
114 err = crypto_blkcipher_encrypt_iv(&lcldesc, sgdst, sgsrc, bsize); 144}
115 145
116 memcpy(d, tmp2, bsize); 146static int crypto_cts_encrypt(struct skcipher_request *req)
147{
148 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
149 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
150 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
151 struct skcipher_request *subreq = &rctx->subreq;
152 int bsize = crypto_skcipher_blocksize(tfm);
153 unsigned int nbytes = req->cryptlen;
154 int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
155 unsigned int offset;
156
157 skcipher_request_set_tfm(subreq, ctx->child);
158
159 if (cbc_blocks <= 0) {
160 skcipher_request_set_callback(subreq, req->base.flags,
161 req->base.complete,
162 req->base.data);
163 skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
164 req->iv);
165 return crypto_skcipher_encrypt(subreq);
166 }
117 167
118 scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); 168 offset = cbc_blocks * bsize;
169 rctx->offset = offset;
119 170
120 memcpy(desc->info, tmp2, bsize); 171 skcipher_request_set_callback(subreq, req->base.flags,
172 crypto_cts_encrypt_done, req);
173 skcipher_request_set_crypt(subreq, req->src, req->dst,
174 offset, req->iv);
121 175
122 return err; 176 return crypto_skcipher_encrypt(subreq) ?:
177 cts_cbc_encrypt(req);
123} 178}
124 179
125static int crypto_cts_encrypt(struct blkcipher_desc *desc, 180static int cts_cbc_decrypt(struct skcipher_request *req)
126 struct scatterlist *dst, struct scatterlist *src,
127 unsigned int nbytes)
128{ 181{
129 struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 182 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
130 int bsize = crypto_blkcipher_blocksize(desc->tfm); 183 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
131 int tot_blocks = (nbytes + bsize - 1) / bsize; 184 struct skcipher_request *subreq = &rctx->subreq;
132 int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0; 185 int bsize = crypto_skcipher_blocksize(tfm);
133 struct blkcipher_desc lcldesc; 186 u8 d[bsize * 2] __attribute__ ((aligned(__alignof__(u32))));
134 int err; 187 struct scatterlist *sg;
188 unsigned int offset;
189 u8 *space;
190 int lastn;
191
192 offset = rctx->offset;
193 lastn = req->cryptlen - offset;
194
195 sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
196
197 /* 1. Decrypt Cn-1 (s) to create Dn */
198 scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
199 space = crypto_cts_reqctx_space(req);
200 crypto_xor(d + bsize, space, bsize);
201 /* 2. Pad Cn with zeros at the end to create C of length BB */
202 memset(d, 0, bsize);
203 scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
204 /* 3. Exclusive-or Dn with C to create Xn */
205 /* 4. Select the first Ln bytes of Xn to create Pn */
206 crypto_xor(d + bsize, d, lastn);
207
208 /* 5. Append the tail (BB - Ln) bytes of Xn to Cn to create En */
209 memcpy(d + lastn, d + bsize + lastn, bsize - lastn);
210 /* 6. Decrypt En to create Pn-1 */
135 211
136 lcldesc.tfm = ctx->child; 212 scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
137 lcldesc.info = desc->info; 213 memzero_explicit(d, sizeof(d));
138 lcldesc.flags = desc->flags;
139
140 if (tot_blocks == 1) {
141 err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize);
142 } else if (nbytes <= bsize * 2) {
143 err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes);
144 } else {
145 /* do normal function for tot_blocks - 2 */
146 err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src,
147 cbc_blocks * bsize);
148 if (err == 0) {
149 /* do cts for final two blocks */
150 err = cts_cbc_encrypt(ctx, desc, dst, src,
151 cbc_blocks * bsize,
152 nbytes - (cbc_blocks * bsize));
153 }
154 }
155 214
156 return err; 215 skcipher_request_set_callback(subreq, req->base.flags &
216 CRYPTO_TFM_REQ_MAY_BACKLOG,
217 cts_cbc_crypt_done, req);
218
219 skcipher_request_set_crypt(subreq, sg, sg, bsize, space);
220 return crypto_skcipher_decrypt(subreq);
157} 221}
158 222
159static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx, 223static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
160 struct blkcipher_desc *desc,
161 struct scatterlist *dst,
162 struct scatterlist *src,
163 unsigned int offset,
164 unsigned int nbytes)
165{ 224{
166 int bsize = crypto_blkcipher_blocksize(desc->tfm); 225 struct skcipher_request *req = areq->data;
167 u8 tmp[bsize];
168 struct blkcipher_desc lcldesc;
169 struct scatterlist sgsrc[1], sgdst[1];
170 int lastn = nbytes - bsize;
171 u8 iv[bsize];
172 u8 s[bsize * 2], d[bsize * 2];
173 int err;
174
175 if (lastn < 0)
176 return -EINVAL;
177 226
178 sg_init_table(sgsrc, 1); 227 if (err)
179 sg_init_table(sgdst, 1); 228 goto out;
180 229
181 scatterwalk_map_and_copy(s, src, offset, nbytes, 0); 230 err = cts_cbc_decrypt(req);
231 if (err == -EINPROGRESS ||
232 (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
233 return;
182 234
183 lcldesc.tfm = ctx->child; 235out:
184 lcldesc.info = iv; 236 skcipher_request_complete(req, err);
185 lcldesc.flags = desc->flags; 237}
186 238
187 /* 1. Decrypt Cn-1 (s) to create Dn (tmp)*/ 239static int crypto_cts_decrypt(struct skcipher_request *req)
188 memset(iv, 0, sizeof(iv)); 240{
189 sg_set_buf(&sgsrc[0], s, bsize); 241 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
190 sg_set_buf(&sgdst[0], tmp, bsize); 242 struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
191 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); 243 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
192 if (err) 244 struct skcipher_request *subreq = &rctx->subreq;
193 return err; 245 int bsize = crypto_skcipher_blocksize(tfm);
194 /* 2. Pad Cn with zeros at the end to create C of length BB */ 246 unsigned int nbytes = req->cryptlen;
195 memset(iv, 0, sizeof(iv)); 247 int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
196 memcpy(iv, s + bsize, lastn); 248 unsigned int offset;
197 /* 3. Exclusive-or Dn (tmp) with C (iv) to create Xn (tmp) */ 249 u8 *space;
198 crypto_xor(tmp, iv, bsize); 250
199 /* 4. Select the first Ln bytes of Xn (tmp) to create Pn */ 251 skcipher_request_set_tfm(subreq, ctx->child);
200 memcpy(d + bsize, tmp, lastn); 252
201 253 if (cbc_blocks <= 0) {
202 /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ 254 skcipher_request_set_callback(subreq, req->base.flags,
203 memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); 255 req->base.complete,
204 /* 6. Decrypt En to create Pn-1 */ 256 req->base.data);
205 memzero_explicit(iv, sizeof(iv)); 257 skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
258 req->iv);
259 return crypto_skcipher_decrypt(subreq);
260 }
206 261
207 sg_set_buf(&sgsrc[0], s + bsize, bsize); 262 skcipher_request_set_callback(subreq, req->base.flags,
208 sg_set_buf(&sgdst[0], d, bsize); 263 crypto_cts_decrypt_done, req);
209 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
210 264
211 /* XOR with previous block */ 265 space = crypto_cts_reqctx_space(req);
212 crypto_xor(d, desc->info, bsize);
213 266
214 scatterwalk_map_and_copy(d, dst, offset, nbytes, 1); 267 offset = cbc_blocks * bsize;
268 rctx->offset = offset;
215 269
216 memcpy(desc->info, s, bsize); 270 if (cbc_blocks <= 1)
217 return err; 271 memcpy(space, req->iv, bsize);
218} 272 else
273 scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
274 bsize, 0);
219 275
220static int crypto_cts_decrypt(struct blkcipher_desc *desc, 276 skcipher_request_set_crypt(subreq, req->src, req->dst,
221 struct scatterlist *dst, struct scatterlist *src, 277 offset, req->iv);
222 unsigned int nbytes)
223{
224 struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
225 int bsize = crypto_blkcipher_blocksize(desc->tfm);
226 int tot_blocks = (nbytes + bsize - 1) / bsize;
227 int cbc_blocks = tot_blocks > 2 ? tot_blocks - 2 : 0;
228 struct blkcipher_desc lcldesc;
229 int err;
230 278
231 lcldesc.tfm = ctx->child; 279 return crypto_skcipher_decrypt(subreq) ?:
232 lcldesc.info = desc->info; 280 cts_cbc_decrypt(req);
233 lcldesc.flags = desc->flags;
234
235 if (tot_blocks == 1) {
236 err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize);
237 } else if (nbytes <= bsize * 2) {
238 err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes);
239 } else {
240 /* do normal function for tot_blocks - 2 */
241 err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src,
242 cbc_blocks * bsize);
243 if (err == 0) {
244 /* do cts for final two blocks */
245 err = cts_cbc_decrypt(ctx, desc, dst, src,
246 cbc_blocks * bsize,
247 nbytes - (cbc_blocks * bsize));
248 }
249 }
250 return err;
251} 281}
252 282
253static int crypto_cts_init_tfm(struct crypto_tfm *tfm) 283static int crypto_cts_init_tfm(struct crypto_skcipher *tfm)
254{ 284{
255 struct crypto_instance *inst = (void *)tfm->__crt_alg; 285 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
256 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 286 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
257 struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); 287 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
258 struct crypto_blkcipher *cipher; 288 struct crypto_skcipher *cipher;
259 289 unsigned reqsize;
260 cipher = crypto_spawn_blkcipher(spawn); 290 unsigned bsize;
291 unsigned align;
292
293 cipher = crypto_spawn_skcipher2(spawn);
261 if (IS_ERR(cipher)) 294 if (IS_ERR(cipher))
262 return PTR_ERR(cipher); 295 return PTR_ERR(cipher);
263 296
264 ctx->child = cipher; 297 ctx->child = cipher;
298
299 align = crypto_skcipher_alignmask(tfm);
300 bsize = crypto_skcipher_blocksize(cipher);
301 reqsize = ALIGN(sizeof(struct crypto_cts_reqctx) +
302 crypto_skcipher_reqsize(cipher),
303 crypto_tfm_ctx_alignment()) +
304 (align & ~(crypto_tfm_ctx_alignment() - 1)) + bsize;
305
306 crypto_skcipher_set_reqsize(tfm, reqsize);
307
265 return 0; 308 return 0;
266} 309}
267 310
268static void crypto_cts_exit_tfm(struct crypto_tfm *tfm) 311static void crypto_cts_exit_tfm(struct crypto_skcipher *tfm)
269{ 312{
270 struct crypto_cts_ctx *ctx = crypto_tfm_ctx(tfm); 313 struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
271 crypto_free_blkcipher(ctx->child); 314
315 crypto_free_skcipher(ctx->child);
272} 316}
273 317
274static struct crypto_instance *crypto_cts_alloc(struct rtattr **tb) 318static void crypto_cts_free(struct skcipher_instance *inst)
275{ 319{
276 struct crypto_instance *inst; 320 crypto_drop_skcipher(skcipher_instance_ctx(inst));
277 struct crypto_alg *alg; 321 kfree(inst);
322}
323
324static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
325{
326 struct crypto_skcipher_spawn *spawn;
327 struct skcipher_instance *inst;
328 struct crypto_attr_type *algt;
329 struct skcipher_alg *alg;
330 const char *cipher_name;
278 int err; 331 int err;
279 332
280 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); 333 algt = crypto_get_attr_type(tb);
334 if (IS_ERR(algt))
335 return PTR_ERR(algt);
336
337 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
338 return -EINVAL;
339
340 cipher_name = crypto_attr_alg_name(tb[1]);
341 if (IS_ERR(cipher_name))
342 return PTR_ERR(cipher_name);
343
344 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
345 if (!inst)
346 return -ENOMEM;
347
348 spawn = skcipher_instance_ctx(inst);
349
350 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
351 err = crypto_grab_skcipher2(spawn, cipher_name, 0,
352 crypto_requires_sync(algt->type,
353 algt->mask));
281 if (err) 354 if (err)
282 return ERR_PTR(err); 355 goto err_free_inst;
283 356
284 alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_BLKCIPHER, 357 alg = crypto_spawn_skcipher_alg(spawn);
285 CRYPTO_ALG_TYPE_MASK);
286 if (IS_ERR(alg))
287 return ERR_CAST(alg);
288 358
289 inst = ERR_PTR(-EINVAL); 359 err = -EINVAL;
290 if (!is_power_of_2(alg->cra_blocksize)) 360 if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize)
291 goto out_put_alg; 361 goto err_drop_spawn;
292 362
293 if (strncmp(alg->cra_name, "cbc(", 4)) 363 if (strncmp(alg->base.cra_name, "cbc(", 4))
294 goto out_put_alg; 364 goto err_drop_spawn;
295 365
296 inst = crypto_alloc_instance("cts", alg); 366 err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts",
297 if (IS_ERR(inst)) 367 &alg->base);
298 goto out_put_alg; 368 if (err)
369 goto err_drop_spawn;
299 370
300 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; 371 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
301 inst->alg.cra_priority = alg->cra_priority; 372 inst->alg.base.cra_priority = alg->base.cra_priority;
302 inst->alg.cra_blocksize = alg->cra_blocksize; 373 inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
303 inst->alg.cra_alignmask = alg->cra_alignmask; 374 inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
304 inst->alg.cra_type = &crypto_blkcipher_type;
305 375
306 /* We access the data as u32s when xoring. */ 376 /* We access the data as u32s when xoring. */
307 inst->alg.cra_alignmask |= __alignof__(u32) - 1; 377 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
308 378
309 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; 379 inst->alg.ivsize = alg->base.cra_blocksize;
310 inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; 380 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
311 inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; 381 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
382 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
312 383
313 inst->alg.cra_ctxsize = sizeof(struct crypto_cts_ctx); 384 inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx);
314 385
315 inst->alg.cra_init = crypto_cts_init_tfm; 386 inst->alg.init = crypto_cts_init_tfm;
316 inst->alg.cra_exit = crypto_cts_exit_tfm; 387 inst->alg.exit = crypto_cts_exit_tfm;
317 388
318 inst->alg.cra_blkcipher.setkey = crypto_cts_setkey; 389 inst->alg.setkey = crypto_cts_setkey;
319 inst->alg.cra_blkcipher.encrypt = crypto_cts_encrypt; 390 inst->alg.encrypt = crypto_cts_encrypt;
320 inst->alg.cra_blkcipher.decrypt = crypto_cts_decrypt; 391 inst->alg.decrypt = crypto_cts_decrypt;
321 392
322out_put_alg: 393 inst->free = crypto_cts_free;
323 crypto_mod_put(alg);
324 return inst;
325}
326 394
327static void crypto_cts_free(struct crypto_instance *inst) 395 err = skcipher_register_instance(tmpl, inst);
328{ 396 if (err)
329 crypto_drop_spawn(crypto_instance_ctx(inst)); 397 goto err_drop_spawn;
398
399out:
400 return err;
401
402err_drop_spawn:
403 crypto_drop_skcipher(spawn);
404err_free_inst:
330 kfree(inst); 405 kfree(inst);
406 goto out;
331} 407}
332 408
333static struct crypto_template crypto_cts_tmpl = { 409static struct crypto_template crypto_cts_tmpl = {
334 .name = "cts", 410 .name = "cts",
335 .alloc = crypto_cts_alloc, 411 .create = crypto_cts_create,
336 .free = crypto_cts_free,
337 .module = THIS_MODULE, 412 .module = THIS_MODULE,
338}; 413};
339 414
diff --git a/crypto/dh.c b/crypto/dh.c
new file mode 100644
index 000000000000..9d19360e7189
--- /dev/null
+++ b/crypto/dh.c
@@ -0,0 +1,189 @@
1/* Diffie-Hellman Key Agreement Method [RFC2631]
2 *
3 * Copyright (c) 2016, Intel Corporation
4 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <crypto/internal/kpp.h>
14#include <crypto/kpp.h>
15#include <crypto/dh.h>
16#include <linux/mpi.h>
17
18struct dh_ctx {
19 MPI p;
20 MPI g;
21 MPI xa;
22};
23
24static inline void dh_clear_params(struct dh_ctx *ctx)
25{
26 mpi_free(ctx->p);
27 mpi_free(ctx->g);
28 ctx->p = NULL;
29 ctx->g = NULL;
30}
31
32static void dh_free_ctx(struct dh_ctx *ctx)
33{
34 dh_clear_params(ctx);
35 mpi_free(ctx->xa);
36 ctx->xa = NULL;
37}
38
39/*
40 * If base is g we compute the public key
41 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
42 * else if base if the counterpart public key we compute the shared secret
43 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
44 */
45static int _compute_val(const struct dh_ctx *ctx, MPI base, MPI val)
46{
47 /* val = base^xa mod p */
48 return mpi_powm(val, base, ctx->xa, ctx->p);
49}
50
51static inline struct dh_ctx *dh_get_ctx(struct crypto_kpp *tfm)
52{
53 return kpp_tfm_ctx(tfm);
54}
55
56static int dh_check_params_length(unsigned int p_len)
57{
58 return (p_len < 1536) ? -EINVAL : 0;
59}
60
61static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
62{
63 if (unlikely(!params->p || !params->g))
64 return -EINVAL;
65
66 if (dh_check_params_length(params->p_size << 3))
67 return -EINVAL;
68
69 ctx->p = mpi_read_raw_data(params->p, params->p_size);
70 if (!ctx->p)
71 return -EINVAL;
72
73 ctx->g = mpi_read_raw_data(params->g, params->g_size);
74 if (!ctx->g) {
75 mpi_free(ctx->p);
76 return -EINVAL;
77 }
78
79 return 0;
80}
81
82static int dh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
83{
84 struct dh_ctx *ctx = dh_get_ctx(tfm);
85 struct dh params;
86
87 if (crypto_dh_decode_key(buf, len, &params) < 0)
88 return -EINVAL;
89
90 if (dh_set_params(ctx, &params) < 0)
91 return -EINVAL;
92
93 ctx->xa = mpi_read_raw_data(params.key, params.key_size);
94 if (!ctx->xa) {
95 dh_clear_params(ctx);
96 return -EINVAL;
97 }
98
99 return 0;
100}
101
102static int dh_compute_value(struct kpp_request *req)
103{
104 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
105 struct dh_ctx *ctx = dh_get_ctx(tfm);
106 MPI base, val = mpi_alloc(0);
107 int ret = 0;
108 int sign;
109
110 if (!val)
111 return -ENOMEM;
112
113 if (unlikely(!ctx->xa)) {
114 ret = -EINVAL;
115 goto err_free_val;
116 }
117
118 if (req->src) {
119 base = mpi_read_raw_from_sgl(req->src, req->src_len);
120 if (!base) {
121 ret = EINVAL;
122 goto err_free_val;
123 }
124 } else {
125 base = ctx->g;
126 }
127
128 ret = _compute_val(ctx, base, val);
129 if (ret)
130 goto err_free_base;
131
132 ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign);
133 if (ret)
134 goto err_free_base;
135
136 if (sign < 0)
137 ret = -EBADMSG;
138err_free_base:
139 if (req->src)
140 mpi_free(base);
141err_free_val:
142 mpi_free(val);
143 return ret;
144}
145
146static int dh_max_size(struct crypto_kpp *tfm)
147{
148 struct dh_ctx *ctx = dh_get_ctx(tfm);
149
150 return mpi_get_size(ctx->p);
151}
152
153static void dh_exit_tfm(struct crypto_kpp *tfm)
154{
155 struct dh_ctx *ctx = dh_get_ctx(tfm);
156
157 dh_free_ctx(ctx);
158}
159
160static struct kpp_alg dh = {
161 .set_secret = dh_set_secret,
162 .generate_public_key = dh_compute_value,
163 .compute_shared_secret = dh_compute_value,
164 .max_size = dh_max_size,
165 .exit = dh_exit_tfm,
166 .base = {
167 .cra_name = "dh",
168 .cra_driver_name = "dh-generic",
169 .cra_priority = 100,
170 .cra_module = THIS_MODULE,
171 .cra_ctxsize = sizeof(struct dh_ctx),
172 },
173};
174
175static int dh_init(void)
176{
177 return crypto_register_kpp(&dh);
178}
179
180static void dh_exit(void)
181{
182 crypto_unregister_kpp(&dh);
183}
184
185module_init(dh_init);
186module_exit(dh_exit);
187MODULE_ALIAS_CRYPTO("dh");
188MODULE_LICENSE("GPL");
189MODULE_DESCRIPTION("DH generic algorithm");
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
new file mode 100644
index 000000000000..02db76b20d00
--- /dev/null
+++ b/crypto/dh_helper.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright (c) 2016, Intel Corporation
3 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public Licence
7 * as published by the Free Software Foundation; either version
8 * 2 of the Licence, or (at your option) any later version.
9 */
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/err.h>
13#include <linux/string.h>
14#include <crypto/dh.h>
15#include <crypto/kpp.h>
16
17#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
18
19static inline u8 *dh_pack_data(void *dst, const void *src, size_t size)
20{
21 memcpy(dst, src, size);
22 return dst + size;
23}
24
25static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
26{
27 memcpy(dst, src, size);
28 return src + size;
29}
30
31static inline int dh_data_size(const struct dh *p)
32{
33 return p->key_size + p->p_size + p->g_size;
34}
35
36int crypto_dh_key_len(const struct dh *p)
37{
38 return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
39}
40EXPORT_SYMBOL_GPL(crypto_dh_key_len);
41
42int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
43{
44 u8 *ptr = buf;
45 struct kpp_secret secret = {
46 .type = CRYPTO_KPP_SECRET_TYPE_DH,
47 .len = len
48 };
49
50 if (unlikely(!buf))
51 return -EINVAL;
52
53 if (len != crypto_dh_key_len(params))
54 return -EINVAL;
55
56 ptr = dh_pack_data(ptr, &secret, sizeof(secret));
57 ptr = dh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
58 ptr = dh_pack_data(ptr, &params->p_size, sizeof(params->p_size));
59 ptr = dh_pack_data(ptr, &params->g_size, sizeof(params->g_size));
60 ptr = dh_pack_data(ptr, params->key, params->key_size);
61 ptr = dh_pack_data(ptr, params->p, params->p_size);
62 dh_pack_data(ptr, params->g, params->g_size);
63
64 return 0;
65}
66EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
67
68int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
69{
70 const u8 *ptr = buf;
71 struct kpp_secret secret;
72
73 if (unlikely(!buf || len < DH_KPP_SECRET_MIN_SIZE))
74 return -EINVAL;
75
76 ptr = dh_unpack_data(&secret, ptr, sizeof(secret));
77 if (secret.type != CRYPTO_KPP_SECRET_TYPE_DH)
78 return -EINVAL;
79
80 ptr = dh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
81 ptr = dh_unpack_data(&params->p_size, ptr, sizeof(params->p_size));
82 ptr = dh_unpack_data(&params->g_size, ptr, sizeof(params->g_size));
83 if (secret.len != crypto_dh_key_len(params))
84 return -EINVAL;
85
86 /* Don't allocate memory. Set pointers to data within
87 * the given buffer
88 */
89 params->key = (void *)ptr;
90 params->p = (void *)(ptr + params->key_size);
91 params->g = (void *)(ptr + params->key_size + params->p_size);
92
93 return 0;
94}
95EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 0a3538f6cf22..f752da3a7c75 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -252,10 +252,16 @@ MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
252MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128"); 252MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
253MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128"); 253MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
254 254
255static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, 255static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
256 unsigned char *outval, const struct drbg_string *in); 256 const unsigned char *key);
257static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
258 const struct drbg_string *in);
257static int drbg_init_sym_kernel(struct drbg_state *drbg); 259static int drbg_init_sym_kernel(struct drbg_state *drbg);
258static int drbg_fini_sym_kernel(struct drbg_state *drbg); 260static int drbg_fini_sym_kernel(struct drbg_state *drbg);
261static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
262 u8 *inbuf, u32 inbuflen,
263 u8 *outbuf, u32 outlen);
264#define DRBG_CTR_NULL_LEN 128
259 265
260/* BCC function for CTR DRBG as defined in 10.4.3 */ 266/* BCC function for CTR DRBG as defined in 10.4.3 */
261static int drbg_ctr_bcc(struct drbg_state *drbg, 267static int drbg_ctr_bcc(struct drbg_state *drbg,
@@ -270,6 +276,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
270 drbg_string_fill(&data, out, drbg_blocklen(drbg)); 276 drbg_string_fill(&data, out, drbg_blocklen(drbg));
271 277
272 /* 10.4.3 step 2 / 4 */ 278 /* 10.4.3 step 2 / 4 */
279 drbg_kcapi_symsetkey(drbg, key);
273 list_for_each_entry(curr, in, list) { 280 list_for_each_entry(curr, in, list) {
274 const unsigned char *pos = curr->buf; 281 const unsigned char *pos = curr->buf;
275 size_t len = curr->len; 282 size_t len = curr->len;
@@ -278,7 +285,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
278 /* 10.4.3 step 4.2 */ 285 /* 10.4.3 step 4.2 */
279 if (drbg_blocklen(drbg) == cnt) { 286 if (drbg_blocklen(drbg) == cnt) {
280 cnt = 0; 287 cnt = 0;
281 ret = drbg_kcapi_sym(drbg, key, out, &data); 288 ret = drbg_kcapi_sym(drbg, out, &data);
282 if (ret) 289 if (ret)
283 return ret; 290 return ret;
284 } 291 }
@@ -290,7 +297,7 @@ static int drbg_ctr_bcc(struct drbg_state *drbg,
290 } 297 }
291 /* 10.4.3 step 4.2 for last block */ 298 /* 10.4.3 step 4.2 for last block */
292 if (cnt) 299 if (cnt)
293 ret = drbg_kcapi_sym(drbg, key, out, &data); 300 ret = drbg_kcapi_sym(drbg, out, &data);
294 301
295 return ret; 302 return ret;
296} 303}
@@ -425,6 +432,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
425 /* 10.4.2 step 12: overwriting of outval is implemented in next step */ 432 /* 10.4.2 step 12: overwriting of outval is implemented in next step */
426 433
427 /* 10.4.2 step 13 */ 434 /* 10.4.2 step 13 */
435 drbg_kcapi_symsetkey(drbg, temp);
428 while (generated_len < bytes_to_return) { 436 while (generated_len < bytes_to_return) {
429 short blocklen = 0; 437 short blocklen = 0;
430 /* 438 /*
@@ -432,7 +440,7 @@ static int drbg_ctr_df(struct drbg_state *drbg,
432 * implicit as the key is only drbg_blocklen in size based on 440 * implicit as the key is only drbg_blocklen in size based on
433 * the implementation of the cipher function callback 441 * the implementation of the cipher function callback
434 */ 442 */
435 ret = drbg_kcapi_sym(drbg, temp, X, &cipherin); 443 ret = drbg_kcapi_sym(drbg, X, &cipherin);
436 if (ret) 444 if (ret)
437 goto out; 445 goto out;
438 blocklen = (drbg_blocklen(drbg) < 446 blocklen = (drbg_blocklen(drbg) <
@@ -476,49 +484,47 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
476 unsigned char *temp = drbg->scratchpad; 484 unsigned char *temp = drbg->scratchpad;
477 unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) + 485 unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
478 drbg_blocklen(drbg); 486 drbg_blocklen(drbg);
479 unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
480 unsigned int len = 0;
481 struct drbg_string cipherin;
482 487
483 if (3 > reseed) 488 if (3 > reseed)
484 memset(df_data, 0, drbg_statelen(drbg)); 489 memset(df_data, 0, drbg_statelen(drbg));
485 490
486 /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */ 491 if (!reseed) {
487 if (seed) { 492 /*
488 ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed); 493 * The DRBG uses the CTR mode of the underlying AES cipher. The
494 * CTR mode increments the counter value after the AES operation
495 * but SP800-90A requires that the counter is incremented before
496 * the AES operation. Hence, we increment it at the time we set
497 * it by one.
498 */
499 crypto_inc(drbg->V, drbg_blocklen(drbg));
500
501 ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C,
502 drbg_keylen(drbg));
489 if (ret) 503 if (ret)
490 goto out; 504 goto out;
491 } 505 }
492 506
493 drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg)); 507 /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
494 /* 508 if (seed) {
495 * 10.2.1.3.2 steps 2 and 3 are already covered as the allocation 509 ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
496 * zeroizes all memory during initialization
497 */
498 while (len < (drbg_statelen(drbg))) {
499 /* 10.2.1.2 step 2.1 */
500 crypto_inc(drbg->V, drbg_blocklen(drbg));
501 /*
502 * 10.2.1.2 step 2.2 */
503 ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
504 if (ret) 510 if (ret)
505 goto out; 511 goto out;
506 /* 10.2.1.2 step 2.3 and 3 */
507 len += drbg_blocklen(drbg);
508 } 512 }
509 513
510 /* 10.2.1.2 step 4 */ 514 ret = drbg_kcapi_sym_ctr(drbg, df_data, drbg_statelen(drbg),
511 temp_p = temp; 515 temp, drbg_statelen(drbg));
512 df_data_p = df_data; 516 if (ret)
513 for (len = 0; len < drbg_statelen(drbg); len++) { 517 return ret;
514 *temp_p ^= *df_data_p;
515 df_data_p++; temp_p++;
516 }
517 518
518 /* 10.2.1.2 step 5 */ 519 /* 10.2.1.2 step 5 */
519 memcpy(drbg->C, temp, drbg_keylen(drbg)); 520 ret = crypto_skcipher_setkey(drbg->ctr_handle, temp,
521 drbg_keylen(drbg));
522 if (ret)
523 goto out;
520 /* 10.2.1.2 step 6 */ 524 /* 10.2.1.2 step 6 */
521 memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg)); 525 memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
526 /* See above: increment counter by one to compensate timing of CTR op */
527 crypto_inc(drbg->V, drbg_blocklen(drbg));
522 ret = 0; 528 ret = 0;
523 529
524out: 530out:
@@ -537,9 +543,8 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
537 unsigned char *buf, unsigned int buflen, 543 unsigned char *buf, unsigned int buflen,
538 struct list_head *addtl) 544 struct list_head *addtl)
539{ 545{
540 int len = 0; 546 int ret;
541 int ret = 0; 547 int len = min_t(int, buflen, INT_MAX);
542 struct drbg_string data;
543 548
544 /* 10.2.1.5.2 step 2 */ 549 /* 10.2.1.5.2 step 2 */
545 if (addtl && !list_empty(addtl)) { 550 if (addtl && !list_empty(addtl)) {
@@ -549,33 +554,16 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
549 } 554 }
550 555
551 /* 10.2.1.5.2 step 4.1 */ 556 /* 10.2.1.5.2 step 4.1 */
552 crypto_inc(drbg->V, drbg_blocklen(drbg)); 557 ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN,
553 drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg)); 558 buf, len);
554 while (len < buflen) { 559 if (ret)
555 int outlen = 0; 560 return ret;
556 /* 10.2.1.5.2 step 4.2 */
557 ret = drbg_kcapi_sym(drbg, drbg->C, drbg->scratchpad, &data);
558 if (ret) {
559 len = ret;
560 goto out;
561 }
562 outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
563 drbg_blocklen(drbg) : (buflen - len);
564 /* 10.2.1.5.2 step 4.3 */
565 memcpy(buf + len, drbg->scratchpad, outlen);
566 len += outlen;
567 /* 10.2.1.5.2 step 6 */
568 if (len < buflen)
569 crypto_inc(drbg->V, drbg_blocklen(drbg));
570 }
571 561
572 /* 10.2.1.5.2 step 6 */ 562 /* 10.2.1.5.2 step 6 */
573 ret = drbg_ctr_update(drbg, NULL, 3); 563 ret = drbg_ctr_update(drbg, NULL, 3);
574 if (ret) 564 if (ret)
575 len = ret; 565 len = ret;
576 566
577out:
578 memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
579 return len; 567 return len;
580} 568}
581 569
@@ -1145,11 +1133,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
1145 if (!drbg) 1133 if (!drbg)
1146 return; 1134 return;
1147 kzfree(drbg->V); 1135 kzfree(drbg->V);
1148 drbg->V = NULL; 1136 drbg->Vbuf = NULL;
1149 kzfree(drbg->C); 1137 kzfree(drbg->C);
1150 drbg->C = NULL; 1138 drbg->Cbuf = NULL;
1151 kzfree(drbg->scratchpad); 1139 kzfree(drbg->scratchpadbuf);
1152 drbg->scratchpad = NULL; 1140 drbg->scratchpadbuf = NULL;
1153 drbg->reseed_ctr = 0; 1141 drbg->reseed_ctr = 0;
1154 drbg->d_ops = NULL; 1142 drbg->d_ops = NULL;
1155 drbg->core = NULL; 1143 drbg->core = NULL;
@@ -1185,12 +1173,18 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
1185 goto err; 1173 goto err;
1186 } 1174 }
1187 1175
1188 drbg->V = kmalloc(drbg_statelen(drbg), GFP_KERNEL); 1176 ret = drbg->d_ops->crypto_init(drbg);
1189 if (!drbg->V) 1177 if (ret < 0)
1190 goto err;
1191 drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
1192 if (!drbg->C)
1193 goto err; 1178 goto err;
1179
1180 drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
1181 if (!drbg->Vbuf)
1182 goto fini;
1183 drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
1184 drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
1185 if (!drbg->Cbuf)
1186 goto fini;
1187 drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
1194 /* scratchpad is only generated for CTR and Hash */ 1188 /* scratchpad is only generated for CTR and Hash */
1195 if (drbg->core->flags & DRBG_HMAC) 1189 if (drbg->core->flags & DRBG_HMAC)
1196 sb_size = 0; 1190 sb_size = 0;
@@ -1204,13 +1198,16 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
1204 sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg); 1198 sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
1205 1199
1206 if (0 < sb_size) { 1200 if (0 < sb_size) {
1207 drbg->scratchpad = kzalloc(sb_size, GFP_KERNEL); 1201 drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
1208 if (!drbg->scratchpad) 1202 if (!drbg->scratchpadbuf)
1209 goto err; 1203 goto fini;
1204 drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
1210 } 1205 }
1211 1206
1212 return 0; 1207 return 0;
1213 1208
1209fini:
1210 drbg->d_ops->crypto_fini(drbg);
1214err: 1211err:
1215 drbg_dealloc_state(drbg); 1212 drbg_dealloc_state(drbg);
1216 return ret; 1213 return ret;
@@ -1478,10 +1475,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
1478 if (ret) 1475 if (ret)
1479 goto unlock; 1476 goto unlock;
1480 1477
1481 ret = -EFAULT;
1482 if (drbg->d_ops->crypto_init(drbg))
1483 goto err;
1484
1485 ret = drbg_prepare_hrng(drbg); 1478 ret = drbg_prepare_hrng(drbg);
1486 if (ret) 1479 if (ret)
1487 goto free_everything; 1480 goto free_everything;
@@ -1505,8 +1498,6 @@ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
1505 mutex_unlock(&drbg->drbg_mutex); 1498 mutex_unlock(&drbg->drbg_mutex);
1506 return ret; 1499 return ret;
1507 1500
1508err:
1509 drbg_dealloc_state(drbg);
1510unlock: 1501unlock:
1511 mutex_unlock(&drbg->drbg_mutex); 1502 mutex_unlock(&drbg->drbg_mutex);
1512 return ret; 1503 return ret;
@@ -1591,7 +1582,8 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
1591 sdesc->shash.tfm = tfm; 1582 sdesc->shash.tfm = tfm;
1592 sdesc->shash.flags = 0; 1583 sdesc->shash.flags = 0;
1593 drbg->priv_data = sdesc; 1584 drbg->priv_data = sdesc;
1594 return 0; 1585
1586 return crypto_shash_alignmask(tfm);
1595} 1587}
1596 1588
1597static int drbg_fini_hash_kernel(struct drbg_state *drbg) 1589static int drbg_fini_hash_kernel(struct drbg_state *drbg)
@@ -1627,10 +1619,45 @@ static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
1627#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */ 1619#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
1628 1620
1629#ifdef CONFIG_CRYPTO_DRBG_CTR 1621#ifdef CONFIG_CRYPTO_DRBG_CTR
1622static int drbg_fini_sym_kernel(struct drbg_state *drbg)
1623{
1624 struct crypto_cipher *tfm =
1625 (struct crypto_cipher *)drbg->priv_data;
1626 if (tfm)
1627 crypto_free_cipher(tfm);
1628 drbg->priv_data = NULL;
1629
1630 if (drbg->ctr_handle)
1631 crypto_free_skcipher(drbg->ctr_handle);
1632 drbg->ctr_handle = NULL;
1633
1634 if (drbg->ctr_req)
1635 skcipher_request_free(drbg->ctr_req);
1636 drbg->ctr_req = NULL;
1637
1638 kfree(drbg->ctr_null_value_buf);
1639 drbg->ctr_null_value = NULL;
1640
1641 return 0;
1642}
1643
1644static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
1645{
1646 struct drbg_state *drbg = req->data;
1647
1648 if (error == -EINPROGRESS)
1649 return;
1650 drbg->ctr_async_err = error;
1651 complete(&drbg->ctr_completion);
1652}
1653
1630static int drbg_init_sym_kernel(struct drbg_state *drbg) 1654static int drbg_init_sym_kernel(struct drbg_state *drbg)
1631{ 1655{
1632 int ret = 0;
1633 struct crypto_cipher *tfm; 1656 struct crypto_cipher *tfm;
1657 struct crypto_skcipher *sk_tfm;
1658 struct skcipher_request *req;
1659 unsigned int alignmask;
1660 char ctr_name[CRYPTO_MAX_ALG_NAME];
1634 1661
1635 tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0); 1662 tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
1636 if (IS_ERR(tfm)) { 1663 if (IS_ERR(tfm)) {
@@ -1640,31 +1667,103 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
1640 } 1667 }
1641 BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm)); 1668 BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
1642 drbg->priv_data = tfm; 1669 drbg->priv_data = tfm;
1643 return ret; 1670
1671 if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
1672 drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
1673 drbg_fini_sym_kernel(drbg);
1674 return -EINVAL;
1675 }
1676 sk_tfm = crypto_alloc_skcipher(ctr_name, 0, 0);
1677 if (IS_ERR(sk_tfm)) {
1678 pr_info("DRBG: could not allocate CTR cipher TFM handle: %s\n",
1679 ctr_name);
1680 drbg_fini_sym_kernel(drbg);
1681 return PTR_ERR(sk_tfm);
1682 }
1683 drbg->ctr_handle = sk_tfm;
1684
1685 req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
1686 if (!req) {
1687 pr_info("DRBG: could not allocate request queue\n");
1688 drbg_fini_sym_kernel(drbg);
1689 return -ENOMEM;
1690 }
1691 drbg->ctr_req = req;
1692 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1693 drbg_skcipher_cb, drbg);
1694
1695 alignmask = crypto_skcipher_alignmask(sk_tfm);
1696 drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
1697 GFP_KERNEL);
1698 if (!drbg->ctr_null_value_buf) {
1699 drbg_fini_sym_kernel(drbg);
1700 return -ENOMEM;
1701 }
1702 drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf,
1703 alignmask + 1);
1704
1705 return alignmask;
1644} 1706}
1645 1707
1646static int drbg_fini_sym_kernel(struct drbg_state *drbg) 1708static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
1709 const unsigned char *key)
1647{ 1710{
1648 struct crypto_cipher *tfm = 1711 struct crypto_cipher *tfm =
1649 (struct crypto_cipher *)drbg->priv_data; 1712 (struct crypto_cipher *)drbg->priv_data;
1650 if (tfm) 1713
1651 crypto_free_cipher(tfm); 1714 crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
1652 drbg->priv_data = NULL;
1653 return 0;
1654} 1715}
1655 1716
1656static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key, 1717static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
1657 unsigned char *outval, const struct drbg_string *in) 1718 const struct drbg_string *in)
1658{ 1719{
1659 struct crypto_cipher *tfm = 1720 struct crypto_cipher *tfm =
1660 (struct crypto_cipher *)drbg->priv_data; 1721 (struct crypto_cipher *)drbg->priv_data;
1661 1722
1662 crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
1663 /* there is only component in *in */ 1723 /* there is only component in *in */
1664 BUG_ON(in->len < drbg_blocklen(drbg)); 1724 BUG_ON(in->len < drbg_blocklen(drbg));
1665 crypto_cipher_encrypt_one(tfm, outval, in->buf); 1725 crypto_cipher_encrypt_one(tfm, outval, in->buf);
1666 return 0; 1726 return 0;
1667} 1727}
1728
1729static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
1730 u8 *inbuf, u32 inlen,
1731 u8 *outbuf, u32 outlen)
1732{
1733 struct scatterlist sg_in;
1734
1735 sg_init_one(&sg_in, inbuf, inlen);
1736
1737 while (outlen) {
1738 u32 cryptlen = min_t(u32, inlen, outlen);
1739 struct scatterlist sg_out;
1740 int ret;
1741
1742 sg_init_one(&sg_out, outbuf, cryptlen);
1743 skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
1744 cryptlen, drbg->V);
1745 ret = crypto_skcipher_encrypt(drbg->ctr_req);
1746 switch (ret) {
1747 case 0:
1748 break;
1749 case -EINPROGRESS:
1750 case -EBUSY:
1751 ret = wait_for_completion_interruptible(
1752 &drbg->ctr_completion);
1753 if (!ret && !drbg->ctr_async_err) {
1754 reinit_completion(&drbg->ctr_completion);
1755 break;
1756 }
1757 default:
1758 return ret;
1759 }
1760 init_completion(&drbg->ctr_completion);
1761
1762 outlen -= cryptlen;
1763 }
1764
1765 return 0;
1766}
1668#endif /* CONFIG_CRYPTO_DRBG_CTR */ 1767#endif /* CONFIG_CRYPTO_DRBG_CTR */
1669 1768
1670/*************************************************************** 1769/***************************************************************
diff --git a/crypto/ecc.c b/crypto/ecc.c
new file mode 100644
index 000000000000..414c78a9c214
--- /dev/null
+++ b/crypto/ecc.c
@@ -0,0 +1,1018 @@
1/*
2 * Copyright (c) 2013, Kenneth MacKay
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <linux/random.h>
28#include <linux/slab.h>
29#include <linux/swab.h>
30#include <linux/fips.h>
31#include <crypto/ecdh.h>
32
33#include "ecc.h"
34#include "ecc_curve_defs.h"
35
36typedef struct {
37 u64 m_low;
38 u64 m_high;
39} uint128_t;
40
41static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
42{
43 switch (curve_id) {
44 /* In FIPS mode only allow P256 and higher */
45 case ECC_CURVE_NIST_P192:
46 return fips_enabled ? NULL : &nist_p192;
47 case ECC_CURVE_NIST_P256:
48 return &nist_p256;
49 default:
50 return NULL;
51 }
52}
53
54static u64 *ecc_alloc_digits_space(unsigned int ndigits)
55{
56 size_t len = ndigits * sizeof(u64);
57
58 if (!len)
59 return NULL;
60
61 return kmalloc(len, GFP_KERNEL);
62}
63
64static void ecc_free_digits_space(u64 *space)
65{
66 kzfree(space);
67}
68
69static struct ecc_point *ecc_alloc_point(unsigned int ndigits)
70{
71 struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL);
72
73 if (!p)
74 return NULL;
75
76 p->x = ecc_alloc_digits_space(ndigits);
77 if (!p->x)
78 goto err_alloc_x;
79
80 p->y = ecc_alloc_digits_space(ndigits);
81 if (!p->y)
82 goto err_alloc_y;
83
84 p->ndigits = ndigits;
85
86 return p;
87
88err_alloc_y:
89 ecc_free_digits_space(p->x);
90err_alloc_x:
91 kfree(p);
92 return NULL;
93}
94
95static void ecc_free_point(struct ecc_point *p)
96{
97 if (!p)
98 return;
99
100 kzfree(p->x);
101 kzfree(p->y);
102 kzfree(p);
103}
104
105static void vli_clear(u64 *vli, unsigned int ndigits)
106{
107 int i;
108
109 for (i = 0; i < ndigits; i++)
110 vli[i] = 0;
111}
112
113/* Returns true if vli == 0, false otherwise. */
114static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
115{
116 int i;
117
118 for (i = 0; i < ndigits; i++) {
119 if (vli[i])
120 return false;
121 }
122
123 return true;
124}
125
126/* Returns nonzero if bit bit of vli is set. */
127static u64 vli_test_bit(const u64 *vli, unsigned int bit)
128{
129 return (vli[bit / 64] & ((u64)1 << (bit % 64)));
130}
131
132/* Counts the number of 64-bit "digits" in vli. */
133static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
134{
135 int i;
136
137 /* Search from the end until we find a non-zero digit.
138 * We do it in reverse because we expect that most digits will
139 * be nonzero.
140 */
141 for (i = ndigits - 1; i >= 0 && vli[i] == 0; i--);
142
143 return (i + 1);
144}
145
146/* Counts the number of bits required for vli. */
147static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
148{
149 unsigned int i, num_digits;
150 u64 digit;
151
152 num_digits = vli_num_digits(vli, ndigits);
153 if (num_digits == 0)
154 return 0;
155
156 digit = vli[num_digits - 1];
157 for (i = 0; digit; i++)
158 digit >>= 1;
159
160 return ((num_digits - 1) * 64 + i);
161}
162
163/* Sets dest = src. */
164static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
165{
166 int i;
167
168 for (i = 0; i < ndigits; i++)
169 dest[i] = src[i];
170}
171
172/* Returns sign of left - right. */
173static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
174{
175 int i;
176
177 for (i = ndigits - 1; i >= 0; i--) {
178 if (left[i] > right[i])
179 return 1;
180 else if (left[i] < right[i])
181 return -1;
182 }
183
184 return 0;
185}
186
187/* Computes result = in << c, returning carry. Can modify in place
188 * (if result == in). 0 < shift < 64.
189 */
190static u64 vli_lshift(u64 *result, const u64 *in, unsigned int shift,
191 unsigned int ndigits)
192{
193 u64 carry = 0;
194 int i;
195
196 for (i = 0; i < ndigits; i++) {
197 u64 temp = in[i];
198
199 result[i] = (temp << shift) | carry;
200 carry = temp >> (64 - shift);
201 }
202
203 return carry;
204}
205
206/* Computes vli = vli >> 1. */
207static void vli_rshift1(u64 *vli, unsigned int ndigits)
208{
209 u64 *end = vli;
210 u64 carry = 0;
211
212 vli += ndigits;
213
214 while (vli-- > end) {
215 u64 temp = *vli;
216 *vli = (temp >> 1) | carry;
217 carry = temp << 63;
218 }
219}
220
221/* Computes result = left + right, returning carry. Can modify in place. */
222static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
223 unsigned int ndigits)
224{
225 u64 carry = 0;
226 int i;
227
228 for (i = 0; i < ndigits; i++) {
229 u64 sum;
230
231 sum = left[i] + right[i] + carry;
232 if (sum != left[i])
233 carry = (sum < left[i]);
234
235 result[i] = sum;
236 }
237
238 return carry;
239}
240
241/* Computes result = left - right, returning borrow. Can modify in place. */
242static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
243 unsigned int ndigits)
244{
245 u64 borrow = 0;
246 int i;
247
248 for (i = 0; i < ndigits; i++) {
249 u64 diff;
250
251 diff = left[i] - right[i] - borrow;
252 if (diff != left[i])
253 borrow = (diff > left[i]);
254
255 result[i] = diff;
256 }
257
258 return borrow;
259}
260
261static uint128_t mul_64_64(u64 left, u64 right)
262{
263 u64 a0 = left & 0xffffffffull;
264 u64 a1 = left >> 32;
265 u64 b0 = right & 0xffffffffull;
266 u64 b1 = right >> 32;
267 u64 m0 = a0 * b0;
268 u64 m1 = a0 * b1;
269 u64 m2 = a1 * b0;
270 u64 m3 = a1 * b1;
271 uint128_t result;
272
273 m2 += (m0 >> 32);
274 m2 += m1;
275
276 /* Overflow */
277 if (m2 < m1)
278 m3 += 0x100000000ull;
279
280 result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
281 result.m_high = m3 + (m2 >> 32);
282
283 return result;
284}
285
286static uint128_t add_128_128(uint128_t a, uint128_t b)
287{
288 uint128_t result;
289
290 result.m_low = a.m_low + b.m_low;
291 result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low);
292
293 return result;
294}
295
296static void vli_mult(u64 *result, const u64 *left, const u64 *right,
297 unsigned int ndigits)
298{
299 uint128_t r01 = { 0, 0 };
300 u64 r2 = 0;
301 unsigned int i, k;
302
303 /* Compute each digit of result in sequence, maintaining the
304 * carries.
305 */
306 for (k = 0; k < ndigits * 2 - 1; k++) {
307 unsigned int min;
308
309 if (k < ndigits)
310 min = 0;
311 else
312 min = (k + 1) - ndigits;
313
314 for (i = min; i <= k && i < ndigits; i++) {
315 uint128_t product;
316
317 product = mul_64_64(left[i], right[k - i]);
318
319 r01 = add_128_128(r01, product);
320 r2 += (r01.m_high < product.m_high);
321 }
322
323 result[k] = r01.m_low;
324 r01.m_low = r01.m_high;
325 r01.m_high = r2;
326 r2 = 0;
327 }
328
329 result[ndigits * 2 - 1] = r01.m_low;
330}
331
332static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
333{
334 uint128_t r01 = { 0, 0 };
335 u64 r2 = 0;
336 int i, k;
337
338 for (k = 0; k < ndigits * 2 - 1; k++) {
339 unsigned int min;
340
341 if (k < ndigits)
342 min = 0;
343 else
344 min = (k + 1) - ndigits;
345
346 for (i = min; i <= k && i <= k - i; i++) {
347 uint128_t product;
348
349 product = mul_64_64(left[i], left[k - i]);
350
351 if (i < k - i) {
352 r2 += product.m_high >> 63;
353 product.m_high = (product.m_high << 1) |
354 (product.m_low >> 63);
355 product.m_low <<= 1;
356 }
357
358 r01 = add_128_128(r01, product);
359 r2 += (r01.m_high < product.m_high);
360 }
361
362 result[k] = r01.m_low;
363 r01.m_low = r01.m_high;
364 r01.m_high = r2;
365 r2 = 0;
366 }
367
368 result[ndigits * 2 - 1] = r01.m_low;
369}
370
371/* Computes result = (left + right) % mod.
372 * Assumes that left < mod and right < mod, result != mod.
373 */
374static void vli_mod_add(u64 *result, const u64 *left, const u64 *right,
375 const u64 *mod, unsigned int ndigits)
376{
377 u64 carry;
378
379 carry = vli_add(result, left, right, ndigits);
380
381 /* result > mod (result = mod + remainder), so subtract mod to
382 * get remainder.
383 */
384 if (carry || vli_cmp(result, mod, ndigits) >= 0)
385 vli_sub(result, result, mod, ndigits);
386}
387
388/* Computes result = (left - right) % mod.
389 * Assumes that left < mod and right < mod, result != mod.
390 */
391static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
392 const u64 *mod, unsigned int ndigits)
393{
394 u64 borrow = vli_sub(result, left, right, ndigits);
395
396 /* In this case, p_result == -diff == (max int) - diff.
397 * Since -x % d == d - x, we can get the correct result from
398 * result + mod (with overflow).
399 */
400 if (borrow)
401 vli_add(result, result, mod, ndigits);
402}
403
404/* Computes p_result = p_product % curve_p.
405 * See algorithm 5 and 6 from
406 * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
407 */
408static void vli_mmod_fast_192(u64 *result, const u64 *product,
409 const u64 *curve_prime, u64 *tmp)
410{
411 const unsigned int ndigits = 3;
412 int carry;
413
414 vli_set(result, product, ndigits);
415
416 vli_set(tmp, &product[3], ndigits);
417 carry = vli_add(result, result, tmp, ndigits);
418
419 tmp[0] = 0;
420 tmp[1] = product[3];
421 tmp[2] = product[4];
422 carry += vli_add(result, result, tmp, ndigits);
423
424 tmp[0] = tmp[1] = product[5];
425 tmp[2] = 0;
426 carry += vli_add(result, result, tmp, ndigits);
427
428 while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
429 carry -= vli_sub(result, result, curve_prime, ndigits);
430}
431
432/* Computes result = product % curve_prime
433 * from http://www.nsa.gov/ia/_files/nist-routines.pdf
434 */
435static void vli_mmod_fast_256(u64 *result, const u64 *product,
436 const u64 *curve_prime, u64 *tmp)
437{
438 int carry;
439 const unsigned int ndigits = 4;
440
441 /* t */
442 vli_set(result, product, ndigits);
443
444 /* s1 */
445 tmp[0] = 0;
446 tmp[1] = product[5] & 0xffffffff00000000ull;
447 tmp[2] = product[6];
448 tmp[3] = product[7];
449 carry = vli_lshift(tmp, tmp, 1, ndigits);
450 carry += vli_add(result, result, tmp, ndigits);
451
452 /* s2 */
453 tmp[1] = product[6] << 32;
454 tmp[2] = (product[6] >> 32) | (product[7] << 32);
455 tmp[3] = product[7] >> 32;
456 carry += vli_lshift(tmp, tmp, 1, ndigits);
457 carry += vli_add(result, result, tmp, ndigits);
458
459 /* s3 */
460 tmp[0] = product[4];
461 tmp[1] = product[5] & 0xffffffff;
462 tmp[2] = 0;
463 tmp[3] = product[7];
464 carry += vli_add(result, result, tmp, ndigits);
465
466 /* s4 */
467 tmp[0] = (product[4] >> 32) | (product[5] << 32);
468 tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull);
469 tmp[2] = product[7];
470 tmp[3] = (product[6] >> 32) | (product[4] << 32);
471 carry += vli_add(result, result, tmp, ndigits);
472
473 /* d1 */
474 tmp[0] = (product[5] >> 32) | (product[6] << 32);
475 tmp[1] = (product[6] >> 32);
476 tmp[2] = 0;
477 tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32);
478 carry -= vli_sub(result, result, tmp, ndigits);
479
480 /* d2 */
481 tmp[0] = product[6];
482 tmp[1] = product[7];
483 tmp[2] = 0;
484 tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull);
485 carry -= vli_sub(result, result, tmp, ndigits);
486
487 /* d3 */
488 tmp[0] = (product[6] >> 32) | (product[7] << 32);
489 tmp[1] = (product[7] >> 32) | (product[4] << 32);
490 tmp[2] = (product[4] >> 32) | (product[5] << 32);
491 tmp[3] = (product[6] << 32);
492 carry -= vli_sub(result, result, tmp, ndigits);
493
494 /* d4 */
495 tmp[0] = product[7];
496 tmp[1] = product[4] & 0xffffffff00000000ull;
497 tmp[2] = product[5];
498 tmp[3] = product[6] & 0xffffffff00000000ull;
499 carry -= vli_sub(result, result, tmp, ndigits);
500
501 if (carry < 0) {
502 do {
503 carry += vli_add(result, result, curve_prime, ndigits);
504 } while (carry < 0);
505 } else {
506 while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
507 carry -= vli_sub(result, result, curve_prime, ndigits);
508 }
509}
510
511/* Computes result = product % curve_prime
512 * from http://www.nsa.gov/ia/_files/nist-routines.pdf
513*/
514static bool vli_mmod_fast(u64 *result, u64 *product,
515 const u64 *curve_prime, unsigned int ndigits)
516{
517 u64 tmp[2 * ndigits];
518
519 switch (ndigits) {
520 case 3:
521 vli_mmod_fast_192(result, product, curve_prime, tmp);
522 break;
523 case 4:
524 vli_mmod_fast_256(result, product, curve_prime, tmp);
525 break;
526 default:
527 pr_err("unsupports digits size!\n");
528 return false;
529 }
530
531 return true;
532}
533
534/* Computes result = (left * right) % curve_prime. */
535static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
536 const u64 *curve_prime, unsigned int ndigits)
537{
538 u64 product[2 * ndigits];
539
540 vli_mult(product, left, right, ndigits);
541 vli_mmod_fast(result, product, curve_prime, ndigits);
542}
543
544/* Computes result = left^2 % curve_prime. */
545static void vli_mod_square_fast(u64 *result, const u64 *left,
546 const u64 *curve_prime, unsigned int ndigits)
547{
548 u64 product[2 * ndigits];
549
550 vli_square(product, left, ndigits);
551 vli_mmod_fast(result, product, curve_prime, ndigits);
552}
553
554#define EVEN(vli) (!(vli[0] & 1))
555/* Computes result = (1 / p_input) % mod. All VLIs are the same size.
556 * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
557 * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
558 */
559static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
560 unsigned int ndigits)
561{
562 u64 a[ndigits], b[ndigits];
563 u64 u[ndigits], v[ndigits];
564 u64 carry;
565 int cmp_result;
566
567 if (vli_is_zero(input, ndigits)) {
568 vli_clear(result, ndigits);
569 return;
570 }
571
572 vli_set(a, input, ndigits);
573 vli_set(b, mod, ndigits);
574 vli_clear(u, ndigits);
575 u[0] = 1;
576 vli_clear(v, ndigits);
577
578 while ((cmp_result = vli_cmp(a, b, ndigits)) != 0) {
579 carry = 0;
580
581 if (EVEN(a)) {
582 vli_rshift1(a, ndigits);
583
584 if (!EVEN(u))
585 carry = vli_add(u, u, mod, ndigits);
586
587 vli_rshift1(u, ndigits);
588 if (carry)
589 u[ndigits - 1] |= 0x8000000000000000ull;
590 } else if (EVEN(b)) {
591 vli_rshift1(b, ndigits);
592
593 if (!EVEN(v))
594 carry = vli_add(v, v, mod, ndigits);
595
596 vli_rshift1(v, ndigits);
597 if (carry)
598 v[ndigits - 1] |= 0x8000000000000000ull;
599 } else if (cmp_result > 0) {
600 vli_sub(a, a, b, ndigits);
601 vli_rshift1(a, ndigits);
602
603 if (vli_cmp(u, v, ndigits) < 0)
604 vli_add(u, u, mod, ndigits);
605
606 vli_sub(u, u, v, ndigits);
607 if (!EVEN(u))
608 carry = vli_add(u, u, mod, ndigits);
609
610 vli_rshift1(u, ndigits);
611 if (carry)
612 u[ndigits - 1] |= 0x8000000000000000ull;
613 } else {
614 vli_sub(b, b, a, ndigits);
615 vli_rshift1(b, ndigits);
616
617 if (vli_cmp(v, u, ndigits) < 0)
618 vli_add(v, v, mod, ndigits);
619
620 vli_sub(v, v, u, ndigits);
621 if (!EVEN(v))
622 carry = vli_add(v, v, mod, ndigits);
623
624 vli_rshift1(v, ndigits);
625 if (carry)
626 v[ndigits - 1] |= 0x8000000000000000ull;
627 }
628 }
629
630 vli_set(result, u, ndigits);
631}
632
633/* ------ Point operations ------ */
634
635/* Returns true if p_point is the point at infinity, false otherwise. */
636static bool ecc_point_is_zero(const struct ecc_point *point)
637{
638 return (vli_is_zero(point->x, point->ndigits) &&
639 vli_is_zero(point->y, point->ndigits));
640}
641
642/* Point multiplication algorithm using Montgomery's ladder with co-Z
643 * coordinates. From http://eprint.iacr.org/2011/338.pdf
644 */
645
646/* Double in place */
647static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
648 u64 *curve_prime, unsigned int ndigits)
649{
650 /* t1 = x, t2 = y, t3 = z */
651 u64 t4[ndigits];
652 u64 t5[ndigits];
653
654 if (vli_is_zero(z1, ndigits))
655 return;
656
657 /* t4 = y1^2 */
658 vli_mod_square_fast(t4, y1, curve_prime, ndigits);
659 /* t5 = x1*y1^2 = A */
660 vli_mod_mult_fast(t5, x1, t4, curve_prime, ndigits);
661 /* t4 = y1^4 */
662 vli_mod_square_fast(t4, t4, curve_prime, ndigits);
663 /* t2 = y1*z1 = z3 */
664 vli_mod_mult_fast(y1, y1, z1, curve_prime, ndigits);
665 /* t3 = z1^2 */
666 vli_mod_square_fast(z1, z1, curve_prime, ndigits);
667
668 /* t1 = x1 + z1^2 */
669 vli_mod_add(x1, x1, z1, curve_prime, ndigits);
670 /* t3 = 2*z1^2 */
671 vli_mod_add(z1, z1, z1, curve_prime, ndigits);
672 /* t3 = x1 - z1^2 */
673 vli_mod_sub(z1, x1, z1, curve_prime, ndigits);
674 /* t1 = x1^2 - z1^4 */
675 vli_mod_mult_fast(x1, x1, z1, curve_prime, ndigits);
676
677 /* t3 = 2*(x1^2 - z1^4) */
678 vli_mod_add(z1, x1, x1, curve_prime, ndigits);
679 /* t1 = 3*(x1^2 - z1^4) */
680 vli_mod_add(x1, x1, z1, curve_prime, ndigits);
681 if (vli_test_bit(x1, 0)) {
682 u64 carry = vli_add(x1, x1, curve_prime, ndigits);
683
684 vli_rshift1(x1, ndigits);
685 x1[ndigits - 1] |= carry << 63;
686 } else {
687 vli_rshift1(x1, ndigits);
688 }
689 /* t1 = 3/2*(x1^2 - z1^4) = B */
690
691 /* t3 = B^2 */
692 vli_mod_square_fast(z1, x1, curve_prime, ndigits);
693 /* t3 = B^2 - A */
694 vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
695 /* t3 = B^2 - 2A = x3 */
696 vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
697 /* t5 = A - x3 */
698 vli_mod_sub(t5, t5, z1, curve_prime, ndigits);
699 /* t1 = B * (A - x3) */
700 vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
701 /* t4 = B * (A - x3) - y1^4 = y3 */
702 vli_mod_sub(t4, x1, t4, curve_prime, ndigits);
703
704 vli_set(x1, z1, ndigits);
705 vli_set(z1, y1, ndigits);
706 vli_set(y1, t4, ndigits);
707}
708
709/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
710static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime,
711 unsigned int ndigits)
712{
713 u64 t1[ndigits];
714
715 vli_mod_square_fast(t1, z, curve_prime, ndigits); /* z^2 */
716 vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */
717 vli_mod_mult_fast(t1, t1, z, curve_prime, ndigits); /* z^3 */
718 vli_mod_mult_fast(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */
719}
720
721/* P = (x1, y1) => 2P, (x2, y2) => P' */
722static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
723 u64 *p_initial_z, u64 *curve_prime,
724 unsigned int ndigits)
725{
726 u64 z[ndigits];
727
728 vli_set(x2, x1, ndigits);
729 vli_set(y2, y1, ndigits);
730
731 vli_clear(z, ndigits);
732 z[0] = 1;
733
734 if (p_initial_z)
735 vli_set(z, p_initial_z, ndigits);
736
737 apply_z(x1, y1, z, curve_prime, ndigits);
738
739 ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits);
740
741 apply_z(x2, y2, z, curve_prime, ndigits);
742}
743
744/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
745 * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
746 * or P => P', Q => P + Q
747 */
748static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
749 unsigned int ndigits)
750{
751 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
752 u64 t5[ndigits];
753
754 /* t5 = x2 - x1 */
755 vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
756 /* t5 = (x2 - x1)^2 = A */
757 vli_mod_square_fast(t5, t5, curve_prime, ndigits);
758 /* t1 = x1*A = B */
759 vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
760 /* t3 = x2*A = C */
761 vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
762 /* t4 = y2 - y1 */
763 vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
764 /* t5 = (y2 - y1)^2 = D */
765 vli_mod_square_fast(t5, y2, curve_prime, ndigits);
766
767 /* t5 = D - B */
768 vli_mod_sub(t5, t5, x1, curve_prime, ndigits);
769 /* t5 = D - B - C = x3 */
770 vli_mod_sub(t5, t5, x2, curve_prime, ndigits);
771 /* t3 = C - B */
772 vli_mod_sub(x2, x2, x1, curve_prime, ndigits);
773 /* t2 = y1*(C - B) */
774 vli_mod_mult_fast(y1, y1, x2, curve_prime, ndigits);
775 /* t3 = B - x3 */
776 vli_mod_sub(x2, x1, t5, curve_prime, ndigits);
777 /* t4 = (y2 - y1)*(B - x3) */
778 vli_mod_mult_fast(y2, y2, x2, curve_prime, ndigits);
779 /* t4 = y3 */
780 vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
781
782 vli_set(x2, t5, ndigits);
783}
784
785/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
786 * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
787 * or P => P - Q, Q => P + Q
788 */
789static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
790 unsigned int ndigits)
791{
792 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
793 u64 t5[ndigits];
794 u64 t6[ndigits];
795 u64 t7[ndigits];
796
797 /* t5 = x2 - x1 */
798 vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
799 /* t5 = (x2 - x1)^2 = A */
800 vli_mod_square_fast(t5, t5, curve_prime, ndigits);
801 /* t1 = x1*A = B */
802 vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits);
803 /* t3 = x2*A = C */
804 vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits);
805 /* t4 = y2 + y1 */
806 vli_mod_add(t5, y2, y1, curve_prime, ndigits);
807 /* t4 = y2 - y1 */
808 vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
809
810 /* t6 = C - B */
811 vli_mod_sub(t6, x2, x1, curve_prime, ndigits);
812 /* t2 = y1 * (C - B) */
813 vli_mod_mult_fast(y1, y1, t6, curve_prime, ndigits);
814 /* t6 = B + C */
815 vli_mod_add(t6, x1, x2, curve_prime, ndigits);
816 /* t3 = (y2 - y1)^2 */
817 vli_mod_square_fast(x2, y2, curve_prime, ndigits);
818 /* t3 = x3 */
819 vli_mod_sub(x2, x2, t6, curve_prime, ndigits);
820
821 /* t7 = B - x3 */
822 vli_mod_sub(t7, x1, x2, curve_prime, ndigits);
823 /* t4 = (y2 - y1)*(B - x3) */
824 vli_mod_mult_fast(y2, y2, t7, curve_prime, ndigits);
825 /* t4 = y3 */
826 vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
827
828 /* t7 = (y2 + y1)^2 = F */
829 vli_mod_square_fast(t7, t5, curve_prime, ndigits);
830 /* t7 = x3' */
831 vli_mod_sub(t7, t7, t6, curve_prime, ndigits);
832 /* t6 = x3' - B */
833 vli_mod_sub(t6, t7, x1, curve_prime, ndigits);
834 /* t6 = (y2 + y1)*(x3' - B) */
835 vli_mod_mult_fast(t6, t6, t5, curve_prime, ndigits);
836 /* t2 = y3' */
837 vli_mod_sub(y1, t6, y1, curve_prime, ndigits);
838
839 vli_set(x1, t7, ndigits);
840}
841
842static void ecc_point_mult(struct ecc_point *result,
843 const struct ecc_point *point, const u64 *scalar,
844 u64 *initial_z, u64 *curve_prime,
845 unsigned int ndigits)
846{
847 /* R0 and R1 */
848 u64 rx[2][ndigits];
849 u64 ry[2][ndigits];
850 u64 z[ndigits];
851 int i, nb;
852 int num_bits = vli_num_bits(scalar, ndigits);
853
854 vli_set(rx[1], point->x, ndigits);
855 vli_set(ry[1], point->y, ndigits);
856
857 xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime,
858 ndigits);
859
860 for (i = num_bits - 2; i > 0; i--) {
861 nb = !vli_test_bit(scalar, i);
862 xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
863 ndigits);
864 xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime,
865 ndigits);
866 }
867
868 nb = !vli_test_bit(scalar, 0);
869 xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime,
870 ndigits);
871
872 /* Find final 1/Z value. */
873 /* X1 - X0 */
874 vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits);
875 /* Yb * (X1 - X0) */
876 vli_mod_mult_fast(z, z, ry[1 - nb], curve_prime, ndigits);
877 /* xP * Yb * (X1 - X0) */
878 vli_mod_mult_fast(z, z, point->x, curve_prime, ndigits);
879
880 /* 1 / (xP * Yb * (X1 - X0)) */
881 vli_mod_inv(z, z, curve_prime, point->ndigits);
882
883 /* yP / (xP * Yb * (X1 - X0)) */
884 vli_mod_mult_fast(z, z, point->y, curve_prime, ndigits);
885 /* Xb * yP / (xP * Yb * (X1 - X0)) */
886 vli_mod_mult_fast(z, z, rx[1 - nb], curve_prime, ndigits);
887 /* End 1/Z calculation */
888
889 xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits);
890
891 apply_z(rx[0], ry[0], z, curve_prime, ndigits);
892
893 vli_set(result->x, rx[0], ndigits);
894 vli_set(result->y, ry[0], ndigits);
895}
896
897static inline void ecc_swap_digits(const u64 *in, u64 *out,
898 unsigned int ndigits)
899{
900 int i;
901
902 for (i = 0; i < ndigits; i++)
903 out[i] = __swab64(in[ndigits - 1 - i]);
904}
905
906int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
907 const u8 *private_key, unsigned int private_key_len)
908{
909 int nbytes;
910 const struct ecc_curve *curve = ecc_get_curve(curve_id);
911
912 if (!private_key)
913 return -EINVAL;
914
915 nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
916
917 if (private_key_len != nbytes)
918 return -EINVAL;
919
920 if (vli_is_zero((const u64 *)&private_key[0], ndigits))
921 return -EINVAL;
922
923 /* Make sure the private key is in the range [1, n-1]. */
924 if (vli_cmp(curve->n, (const u64 *)&private_key[0], ndigits) != 1)
925 return -EINVAL;
926
927 return 0;
928}
929
930int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits,
931 const u8 *private_key, unsigned int private_key_len,
932 u8 *public_key, unsigned int public_key_len)
933{
934 int ret = 0;
935 struct ecc_point *pk;
936 u64 priv[ndigits];
937 unsigned int nbytes;
938 const struct ecc_curve *curve = ecc_get_curve(curve_id);
939
940 if (!private_key || !curve) {
941 ret = -EINVAL;
942 goto out;
943 }
944
945 ecc_swap_digits((const u64 *)private_key, priv, ndigits);
946
947 pk = ecc_alloc_point(ndigits);
948 if (!pk) {
949 ret = -ENOMEM;
950 goto out;
951 }
952
953 ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
954 if (ecc_point_is_zero(pk)) {
955 ret = -EAGAIN;
956 goto err_free_point;
957 }
958
959 nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
960 ecc_swap_digits(pk->x, (u64 *)public_key, ndigits);
961 ecc_swap_digits(pk->y, (u64 *)&public_key[nbytes], ndigits);
962
963err_free_point:
964 ecc_free_point(pk);
965out:
966 return ret;
967}
968
969int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
970 const u8 *private_key, unsigned int private_key_len,
971 const u8 *public_key, unsigned int public_key_len,
972 u8 *secret, unsigned int secret_len)
973{
974 int ret = 0;
975 struct ecc_point *product, *pk;
976 u64 priv[ndigits];
977 u64 rand_z[ndigits];
978 unsigned int nbytes;
979 const struct ecc_curve *curve = ecc_get_curve(curve_id);
980
981 if (!private_key || !public_key || !curve) {
982 ret = -EINVAL;
983 goto out;
984 }
985
986 nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
987
988 get_random_bytes(rand_z, nbytes);
989
990 pk = ecc_alloc_point(ndigits);
991 if (!pk) {
992 ret = -ENOMEM;
993 goto out;
994 }
995
996 product = ecc_alloc_point(ndigits);
997 if (!product) {
998 ret = -ENOMEM;
999 goto err_alloc_product;
1000 }
1001
1002 ecc_swap_digits((const u64 *)public_key, pk->x, ndigits);
1003 ecc_swap_digits((const u64 *)&public_key[nbytes], pk->y, ndigits);
1004 ecc_swap_digits((const u64 *)private_key, priv, ndigits);
1005
1006 ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
1007
1008 ecc_swap_digits(product->x, (u64 *)secret, ndigits);
1009
1010 if (ecc_point_is_zero(product))
1011 ret = -EFAULT;
1012
1013 ecc_free_point(product);
1014err_alloc_product:
1015 ecc_free_point(pk);
1016out:
1017 return ret;
1018}
diff --git a/crypto/ecc.h b/crypto/ecc.h
new file mode 100644
index 000000000000..663d598c7406
--- /dev/null
+++ b/crypto/ecc.h
@@ -0,0 +1,83 @@
1/*
2 * Copyright (c) 2013, Kenneth MacKay
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26#ifndef _CRYPTO_ECC_H
27#define _CRYPTO_ECC_H
28
29#define ECC_MAX_DIGITS 4 /* 256 */
30
31#define ECC_DIGITS_TO_BYTES_SHIFT 3
32
33/**
34 * ecc_is_key_valid() - Validate a given ECDH private key
35 *
36 * @curve_id: id representing the curve to use
37 * @ndigits: curve number of digits
38 * @private_key: private key to be used for the given curve
39 * @private_key_len: private key len
40 *
41 * Returns 0 if the key is acceptable, a negative value otherwise
42 */
43int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
44 const u8 *private_key, unsigned int private_key_len);
45
46/**
47 * ecdh_make_pub_key() - Compute an ECC public key
48 *
49 * @curve_id: id representing the curve to use
50 * @private_key: pregenerated private key for the given curve
51 * @private_key_len: length of private_key
52 * @public_key: buffer for storing the public key generated
53 * @public_key_len: length of the public_key buffer
54 *
55 * Returns 0 if the public key was generated successfully, a negative value
56 * if an error occurred.
57 */
58int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
59 const u8 *private_key, unsigned int private_key_len,
60 u8 *public_key, unsigned int public_key_len);
61
62/**
63 * crypto_ecdh_shared_secret() - Compute a shared secret
64 *
65 * @curve_id: id representing the curve to use
66 * @private_key: private key of part A
67 * @private_key_len: length of private_key
68 * @public_key: public key of counterpart B
69 * @public_key_len: length of public_key
70 * @secret: buffer for storing the calculated shared secret
71 * @secret_len: length of the secret buffer
72 *
73 * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
74 * before using it for symmetric encryption or HMAC.
75 *
76 * Returns 0 if the shared secret was generated successfully, a negative value
77 * if an error occurred.
78 */
79int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
80 const u8 *private_key, unsigned int private_key_len,
81 const u8 *public_key, unsigned int public_key_len,
82 u8 *secret, unsigned int secret_len);
83#endif
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
new file mode 100644
index 000000000000..03ae5f714028
--- /dev/null
+++ b/crypto/ecc_curve_defs.h
@@ -0,0 +1,57 @@
1#ifndef _CRYTO_ECC_CURVE_DEFS_H
2#define _CRYTO_ECC_CURVE_DEFS_H
3
4struct ecc_point {
5 u64 *x;
6 u64 *y;
7 u8 ndigits;
8};
9
10struct ecc_curve {
11 char *name;
12 struct ecc_point g;
13 u64 *p;
14 u64 *n;
15};
16
17/* NIST P-192 */
18static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
19 0x188DA80EB03090F6ull };
20static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull,
21 0x07192B95FFC8DA78ull };
22static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull,
23 0xFFFFFFFFFFFFFFFFull };
24static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull,
25 0xFFFFFFFFFFFFFFFFull };
26static struct ecc_curve nist_p192 = {
27 .name = "nist_192",
28 .g = {
29 .x = nist_p192_g_x,
30 .y = nist_p192_g_y,
31 .ndigits = 3,
32 },
33 .p = nist_p192_p,
34 .n = nist_p192_n
35};
36
37/* NIST P-256 */
38static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull,
39 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull };
40static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull,
41 0x8EE7EB4A7C0F9E16ull, 0x4FE342E2FE1A7F9Bull };
42static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull,
43 0x0000000000000000ull, 0xFFFFFFFF00000001ull };
44static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull,
45 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull };
46static struct ecc_curve nist_p256 = {
47 .name = "nist_256",
48 .g = {
49 .x = nist_p256_g_x,
50 .y = nist_p256_g_y,
51 .ndigits = 4,
52 },
53 .p = nist_p256_p,
54 .n = nist_p256_n
55};
56
57#endif
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
new file mode 100644
index 000000000000..3de289806d67
--- /dev/null
+++ b/crypto/ecdh.c
@@ -0,0 +1,151 @@
1/* ECDH key-agreement protocol
2 *
3 * Copyright (c) 2016, Intel Corporation
4 * Authors: Salvator Benedetto <salvatore.benedetto@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <crypto/internal/kpp.h>
14#include <crypto/kpp.h>
15#include <crypto/ecdh.h>
16#include <linux/scatterlist.h>
17#include "ecc.h"
18
19struct ecdh_ctx {
20 unsigned int curve_id;
21 unsigned int ndigits;
22 u64 private_key[ECC_MAX_DIGITS];
23 u64 public_key[2 * ECC_MAX_DIGITS];
24 u64 shared_secret[ECC_MAX_DIGITS];
25};
26
27static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
28{
29 return kpp_tfm_ctx(tfm);
30}
31
32static unsigned int ecdh_supported_curve(unsigned int curve_id)
33{
34 switch (curve_id) {
35 case ECC_CURVE_NIST_P192: return 3;
36 case ECC_CURVE_NIST_P256: return 4;
37 default: return 0;
38 }
39}
40
41static int ecdh_set_secret(struct crypto_kpp *tfm, void *buf, unsigned int len)
42{
43 struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
44 struct ecdh params;
45 unsigned int ndigits;
46
47 if (crypto_ecdh_decode_key(buf, len, &params) < 0)
48 return -EINVAL;
49
50 ndigits = ecdh_supported_curve(params.curve_id);
51 if (!ndigits)
52 return -EINVAL;
53
54 ctx->curve_id = params.curve_id;
55 ctx->ndigits = ndigits;
56
57 if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
58 (const u8 *)params.key, params.key_size) < 0)
59 return -EINVAL;
60
61 memcpy(ctx->private_key, params.key, params.key_size);
62
63 return 0;
64}
65
66static int ecdh_compute_value(struct kpp_request *req)
67{
68 int ret = 0;
69 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
70 struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
71 size_t copied, nbytes;
72 void *buf;
73
74 nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
75
76 if (req->src) {
77 copied = sg_copy_to_buffer(req->src, 1, ctx->public_key,
78 2 * nbytes);
79 if (copied != 2 * nbytes)
80 return -EINVAL;
81
82 ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
83 (const u8 *)ctx->private_key, nbytes,
84 (const u8 *)ctx->public_key, 2 * nbytes,
85 (u8 *)ctx->shared_secret, nbytes);
86
87 buf = ctx->shared_secret;
88 } else {
89 ret = ecdh_make_pub_key(ctx->curve_id, ctx->ndigits,
90 (const u8 *)ctx->private_key, nbytes,
91 (u8 *)ctx->public_key,
92 sizeof(ctx->public_key));
93 buf = ctx->public_key;
94 /* Public part is a point thus it has both coordinates */
95 nbytes *= 2;
96 }
97
98 if (ret < 0)
99 return ret;
100
101 copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes);
102 if (copied != nbytes)
103 return -EINVAL;
104
105 return ret;
106}
107
108static int ecdh_max_size(struct crypto_kpp *tfm)
109{
110 struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
111 int nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
112
113 /* Public key is made of two coordinates */
114 return 2 * nbytes;
115}
116
117static void no_exit_tfm(struct crypto_kpp *tfm)
118{
119 return;
120}
121
122static struct kpp_alg ecdh = {
123 .set_secret = ecdh_set_secret,
124 .generate_public_key = ecdh_compute_value,
125 .compute_shared_secret = ecdh_compute_value,
126 .max_size = ecdh_max_size,
127 .exit = no_exit_tfm,
128 .base = {
129 .cra_name = "ecdh",
130 .cra_driver_name = "ecdh-generic",
131 .cra_priority = 100,
132 .cra_module = THIS_MODULE,
133 .cra_ctxsize = sizeof(struct ecdh_ctx),
134 },
135};
136
137static int ecdh_init(void)
138{
139 return crypto_register_kpp(&ecdh);
140}
141
142static void ecdh_exit(void)
143{
144 crypto_unregister_kpp(&ecdh);
145}
146
147module_init(ecdh_init);
148module_exit(ecdh_exit);
149MODULE_ALIAS_CRYPTO("ecdh");
150MODULE_LICENSE("GPL");
151MODULE_DESCRIPTION("ECDH generic algorithm");
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
new file mode 100644
index 000000000000..3cd8a2414e60
--- /dev/null
+++ b/crypto/ecdh_helper.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright (c) 2016, Intel Corporation
3 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public Licence
7 * as published by the Free Software Foundation; either version
8 * 2 of the Licence, or (at your option) any later version.
9 */
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/err.h>
13#include <linux/string.h>
14#include <crypto/ecdh.h>
15#include <crypto/kpp.h>
16
17#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short))
18
19static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz)
20{
21 memcpy(dst, src, sz);
22 return dst + sz;
23}
24
25static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
26{
27 memcpy(dst, src, sz);
28 return src + sz;
29}
30
31int crypto_ecdh_key_len(const struct ecdh *params)
32{
33 return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
34}
35EXPORT_SYMBOL_GPL(crypto_ecdh_key_len);
36
37int crypto_ecdh_encode_key(char *buf, unsigned int len,
38 const struct ecdh *params)
39{
40 u8 *ptr = buf;
41 struct kpp_secret secret = {
42 .type = CRYPTO_KPP_SECRET_TYPE_ECDH,
43 .len = len
44 };
45
46 if (unlikely(!buf))
47 return -EINVAL;
48
49 if (len != crypto_ecdh_key_len(params))
50 return -EINVAL;
51
52 ptr = ecdh_pack_data(ptr, &secret, sizeof(secret));
53 ptr = ecdh_pack_data(ptr, &params->curve_id, sizeof(params->curve_id));
54 ptr = ecdh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
55 ecdh_pack_data(ptr, params->key, params->key_size);
56
57 return 0;
58}
59EXPORT_SYMBOL_GPL(crypto_ecdh_encode_key);
60
61int crypto_ecdh_decode_key(const char *buf, unsigned int len,
62 struct ecdh *params)
63{
64 const u8 *ptr = buf;
65 struct kpp_secret secret;
66
67 if (unlikely(!buf || len < ECDH_KPP_SECRET_MIN_SIZE))
68 return -EINVAL;
69
70 ptr = ecdh_unpack_data(&secret, ptr, sizeof(secret));
71 if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
72 return -EINVAL;
73
74 ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
75 ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
76 if (secret.len != crypto_ecdh_key_len(params))
77 return -EINVAL;
78
79 /* Don't allocate memory. Set pointer to data
80 * within the given buffer
81 */
82 params->key = (void *)ptr;
83
84 return 0;
85}
86EXPORT_SYMBOL_GPL(crypto_ecdh_decode_key);
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index b96a84560b67..1b01fe98e91f 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -20,6 +20,7 @@
20 20
21#include <crypto/internal/geniv.h> 21#include <crypto/internal/geniv.h>
22#include <crypto/scatterwalk.h> 22#include <crypto/scatterwalk.h>
23#include <crypto/skcipher.h>
23#include <linux/err.h> 24#include <linux/err.h>
24#include <linux/init.h> 25#include <linux/init.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
@@ -112,13 +113,16 @@ static int echainiv_encrypt(struct aead_request *req)
112 info = req->iv; 113 info = req->iv;
113 114
114 if (req->src != req->dst) { 115 if (req->src != req->dst) {
115 struct blkcipher_desc desc = { 116 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
116 .tfm = ctx->null,
117 };
118 117
119 err = crypto_blkcipher_encrypt( 118 skcipher_request_set_tfm(nreq, ctx->sknull);
120 &desc, req->dst, req->src, 119 skcipher_request_set_callback(nreq, req->base.flags,
121 req->assoclen + req->cryptlen); 120 NULL, NULL);
121 skcipher_request_set_crypt(nreq, req->src, req->dst,
122 req->assoclen + req->cryptlen,
123 NULL);
124
125 err = crypto_skcipher_encrypt(nreq);
122 if (err) 126 if (err)
123 return err; 127 return err;
124 } 128 }
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
deleted file mode 100644
index 16dda72fc4f8..000000000000
--- a/crypto/eseqiv.c
+++ /dev/null
@@ -1,242 +0,0 @@
1/*
2 * eseqiv: Encrypted Sequence Number IV Generator
3 *
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt and then encrypting it with the same key as used to encrypt
6 * the plain text. This algorithm requires that the block size be equal
7 * to the IV size. It is mainly useful for CBC.
8 *
9 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 */
17
18#include <crypto/internal/skcipher.h>
19#include <crypto/rng.h>
20#include <crypto/scatterwalk.h>
21#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/scatterlist.h>
27#include <linux/spinlock.h>
28#include <linux/string.h>
29
30struct eseqiv_request_ctx {
31 struct scatterlist src[2];
32 struct scatterlist dst[2];
33 char tail[];
34};
35
36struct eseqiv_ctx {
37 spinlock_t lock;
38 unsigned int reqoff;
39 char salt[];
40};
41
42static void eseqiv_complete2(struct skcipher_givcrypt_request *req)
43{
44 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
45 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
46
47 memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail,
48 crypto_ablkcipher_alignmask(geniv) + 1),
49 crypto_ablkcipher_ivsize(geniv));
50}
51
52static void eseqiv_complete(struct crypto_async_request *base, int err)
53{
54 struct skcipher_givcrypt_request *req = base->data;
55
56 if (err)
57 goto out;
58
59 eseqiv_complete2(req);
60
61out:
62 skcipher_givcrypt_complete(req, err);
63}
64
65static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
66{
67 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
68 struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
69 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
70 struct ablkcipher_request *subreq;
71 crypto_completion_t compl;
72 void *data;
73 struct scatterlist *osrc, *odst;
74 struct scatterlist *dst;
75 struct page *srcp;
76 struct page *dstp;
77 u8 *giv;
78 u8 *vsrc;
79 u8 *vdst;
80 __be64 seq;
81 unsigned int ivsize;
82 unsigned int len;
83 int err;
84
85 subreq = (void *)(reqctx->tail + ctx->reqoff);
86 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
87
88 giv = req->giv;
89 compl = req->creq.base.complete;
90 data = req->creq.base.data;
91
92 osrc = req->creq.src;
93 odst = req->creq.dst;
94 srcp = sg_page(osrc);
95 dstp = sg_page(odst);
96 vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
97 vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
98
99 ivsize = crypto_ablkcipher_ivsize(geniv);
100
101 if (vsrc != giv + ivsize && vdst != giv + ivsize) {
102 giv = PTR_ALIGN((u8 *)reqctx->tail,
103 crypto_ablkcipher_alignmask(geniv) + 1);
104 compl = eseqiv_complete;
105 data = req;
106 }
107
108 ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
109 data);
110
111 sg_init_table(reqctx->src, 2);
112 sg_set_buf(reqctx->src, giv, ivsize);
113 scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
114
115 dst = reqctx->src;
116 if (osrc != odst) {
117 sg_init_table(reqctx->dst, 2);
118 sg_set_buf(reqctx->dst, giv, ivsize);
119 scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
120
121 dst = reqctx->dst;
122 }
123
124 ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
125 req->creq.nbytes + ivsize,
126 req->creq.info);
127
128 memcpy(req->creq.info, ctx->salt, ivsize);
129
130 len = ivsize;
131 if (ivsize > sizeof(u64)) {
132 memset(req->giv, 0, ivsize - sizeof(u64));
133 len = sizeof(u64);
134 }
135 seq = cpu_to_be64(req->seq);
136 memcpy(req->giv + ivsize - len, &seq, len);
137
138 err = crypto_ablkcipher_encrypt(subreq);
139 if (err)
140 goto out;
141
142 if (giv != req->giv)
143 eseqiv_complete2(req);
144
145out:
146 return err;
147}
148
149static int eseqiv_init(struct crypto_tfm *tfm)
150{
151 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
152 struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
153 unsigned long alignmask;
154 unsigned int reqsize;
155 int err;
156
157 spin_lock_init(&ctx->lock);
158
159 alignmask = crypto_tfm_ctx_alignment() - 1;
160 reqsize = sizeof(struct eseqiv_request_ctx);
161
162 if (alignmask & reqsize) {
163 alignmask &= reqsize;
164 alignmask--;
165 }
166
167 alignmask = ~alignmask;
168 alignmask &= crypto_ablkcipher_alignmask(geniv);
169
170 reqsize += alignmask;
171 reqsize += crypto_ablkcipher_ivsize(geniv);
172 reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
173
174 ctx->reqoff = reqsize - sizeof(struct eseqiv_request_ctx);
175
176 tfm->crt_ablkcipher.reqsize = reqsize +
177 sizeof(struct ablkcipher_request);
178
179 err = 0;
180 if (!crypto_get_default_rng()) {
181 crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
182 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
183 crypto_ablkcipher_ivsize(geniv));
184 crypto_put_default_rng();
185 }
186
187 return err ?: skcipher_geniv_init(tfm);
188}
189
190static struct crypto_template eseqiv_tmpl;
191
192static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
193{
194 struct crypto_instance *inst;
195 int err;
196
197 inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
198 if (IS_ERR(inst))
199 goto out;
200
201 err = -EINVAL;
202 if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
203 goto free_inst;
204
205 inst->alg.cra_init = eseqiv_init;
206 inst->alg.cra_exit = skcipher_geniv_exit;
207
208 inst->alg.cra_ctxsize = sizeof(struct eseqiv_ctx);
209 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
210
211out:
212 return inst;
213
214free_inst:
215 skcipher_geniv_free(inst);
216 inst = ERR_PTR(err);
217 goto out;
218}
219
220static struct crypto_template eseqiv_tmpl = {
221 .name = "eseqiv",
222 .alloc = eseqiv_alloc,
223 .free = skcipher_geniv_free,
224 .module = THIS_MODULE,
225};
226
227static int __init eseqiv_module_init(void)
228{
229 return crypto_register_template(&eseqiv_tmpl);
230}
231
232static void __exit eseqiv_module_exit(void)
233{
234 crypto_unregister_template(&eseqiv_tmpl);
235}
236
237module_init(eseqiv_module_init);
238module_exit(eseqiv_module_exit);
239
240MODULE_LICENSE("GPL");
241MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
242MODULE_ALIAS_CRYPTO("eseqiv");
diff --git a/crypto/gcm.c b/crypto/gcm.c
index bec329b3de8d..70a892e87ccb 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -29,7 +29,7 @@ struct gcm_instance_ctx {
29}; 29};
30 30
31struct crypto_gcm_ctx { 31struct crypto_gcm_ctx {
32 struct crypto_ablkcipher *ctr; 32 struct crypto_skcipher *ctr;
33 struct crypto_ahash *ghash; 33 struct crypto_ahash *ghash;
34}; 34};
35 35
@@ -50,7 +50,7 @@ struct crypto_rfc4543_instance_ctx {
50 50
51struct crypto_rfc4543_ctx { 51struct crypto_rfc4543_ctx {
52 struct crypto_aead *child; 52 struct crypto_aead *child;
53 struct crypto_blkcipher *null; 53 struct crypto_skcipher *null;
54 u8 nonce[4]; 54 u8 nonce[4];
55}; 55};
56 56
@@ -74,7 +74,7 @@ struct crypto_gcm_req_priv_ctx {
74 struct crypto_gcm_ghash_ctx ghash_ctx; 74 struct crypto_gcm_ghash_ctx ghash_ctx;
75 union { 75 union {
76 struct ahash_request ahreq; 76 struct ahash_request ahreq;
77 struct ablkcipher_request abreq; 77 struct skcipher_request skreq;
78 } u; 78 } u;
79}; 79};
80 80
@@ -114,7 +114,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
114{ 114{
115 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); 115 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
116 struct crypto_ahash *ghash = ctx->ghash; 116 struct crypto_ahash *ghash = ctx->ghash;
117 struct crypto_ablkcipher *ctr = ctx->ctr; 117 struct crypto_skcipher *ctr = ctx->ctr;
118 struct { 118 struct {
119 be128 hash; 119 be128 hash;
120 u8 iv[8]; 120 u8 iv[8];
@@ -122,35 +122,35 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
122 struct crypto_gcm_setkey_result result; 122 struct crypto_gcm_setkey_result result;
123 123
124 struct scatterlist sg[1]; 124 struct scatterlist sg[1];
125 struct ablkcipher_request req; 125 struct skcipher_request req;
126 } *data; 126 } *data;
127 int err; 127 int err;
128 128
129 crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); 129 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
130 crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) & 130 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
131 CRYPTO_TFM_REQ_MASK); 131 CRYPTO_TFM_REQ_MASK);
132 err = crypto_ablkcipher_setkey(ctr, key, keylen); 132 err = crypto_skcipher_setkey(ctr, key, keylen);
133 crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) & 133 crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
134 CRYPTO_TFM_RES_MASK); 134 CRYPTO_TFM_RES_MASK);
135 if (err) 135 if (err)
136 return err; 136 return err;
137 137
138 data = kzalloc(sizeof(*data) + crypto_ablkcipher_reqsize(ctr), 138 data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
139 GFP_KERNEL); 139 GFP_KERNEL);
140 if (!data) 140 if (!data)
141 return -ENOMEM; 141 return -ENOMEM;
142 142
143 init_completion(&data->result.completion); 143 init_completion(&data->result.completion);
144 sg_init_one(data->sg, &data->hash, sizeof(data->hash)); 144 sg_init_one(data->sg, &data->hash, sizeof(data->hash));
145 ablkcipher_request_set_tfm(&data->req, ctr); 145 skcipher_request_set_tfm(&data->req, ctr);
146 ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | 146 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
147 CRYPTO_TFM_REQ_MAY_BACKLOG, 147 CRYPTO_TFM_REQ_MAY_BACKLOG,
148 crypto_gcm_setkey_done, 148 crypto_gcm_setkey_done,
149 &data->result); 149 &data->result);
150 ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, 150 skcipher_request_set_crypt(&data->req, data->sg, data->sg,
151 sizeof(data->hash), data->iv); 151 sizeof(data->hash), data->iv);
152 152
153 err = crypto_ablkcipher_encrypt(&data->req); 153 err = crypto_skcipher_encrypt(&data->req);
154 if (err == -EINPROGRESS || err == -EBUSY) { 154 if (err == -EINPROGRESS || err == -EBUSY) {
155 err = wait_for_completion_interruptible( 155 err = wait_for_completion_interruptible(
156 &data->result.completion); 156 &data->result.completion);
@@ -223,13 +223,13 @@ static void crypto_gcm_init_crypt(struct aead_request *req,
223 struct crypto_aead *aead = crypto_aead_reqtfm(req); 223 struct crypto_aead *aead = crypto_aead_reqtfm(req);
224 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); 224 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
225 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 225 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
226 struct ablkcipher_request *ablk_req = &pctx->u.abreq; 226 struct skcipher_request *skreq = &pctx->u.skreq;
227 struct scatterlist *dst; 227 struct scatterlist *dst;
228 228
229 dst = req->src == req->dst ? pctx->src : pctx->dst; 229 dst = req->src == req->dst ? pctx->src : pctx->dst;
230 230
231 ablkcipher_request_set_tfm(ablk_req, ctx->ctr); 231 skcipher_request_set_tfm(skreq, ctx->ctr);
232 ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, 232 skcipher_request_set_crypt(skreq, pctx->src, dst,
233 cryptlen + sizeof(pctx->auth_tag), 233 cryptlen + sizeof(pctx->auth_tag),
234 pctx->iv); 234 pctx->iv);
235} 235}
@@ -494,14 +494,14 @@ out:
494static int crypto_gcm_encrypt(struct aead_request *req) 494static int crypto_gcm_encrypt(struct aead_request *req)
495{ 495{
496 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 496 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
497 struct ablkcipher_request *abreq = &pctx->u.abreq; 497 struct skcipher_request *skreq = &pctx->u.skreq;
498 u32 flags = aead_request_flags(req); 498 u32 flags = aead_request_flags(req);
499 499
500 crypto_gcm_init_common(req); 500 crypto_gcm_init_common(req);
501 crypto_gcm_init_crypt(req, req->cryptlen); 501 crypto_gcm_init_crypt(req, req->cryptlen);
502 ablkcipher_request_set_callback(abreq, flags, gcm_encrypt_done, req); 502 skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req);
503 503
504 return crypto_ablkcipher_encrypt(abreq) ?: 504 return crypto_skcipher_encrypt(skreq) ?:
505 gcm_encrypt_continue(req, flags); 505 gcm_encrypt_continue(req, flags);
506} 506}
507 507
@@ -533,12 +533,12 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
533static int gcm_dec_hash_continue(struct aead_request *req, u32 flags) 533static int gcm_dec_hash_continue(struct aead_request *req, u32 flags)
534{ 534{
535 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 535 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
536 struct ablkcipher_request *abreq = &pctx->u.abreq; 536 struct skcipher_request *skreq = &pctx->u.skreq;
537 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 537 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
538 538
539 crypto_gcm_init_crypt(req, gctx->cryptlen); 539 crypto_gcm_init_crypt(req, gctx->cryptlen);
540 ablkcipher_request_set_callback(abreq, flags, gcm_decrypt_done, req); 540 skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req);
541 return crypto_ablkcipher_decrypt(abreq) ?: crypto_gcm_verify(req); 541 return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req);
542} 542}
543 543
544static int crypto_gcm_decrypt(struct aead_request *req) 544static int crypto_gcm_decrypt(struct aead_request *req)
@@ -566,7 +566,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
566 struct aead_instance *inst = aead_alg_instance(tfm); 566 struct aead_instance *inst = aead_alg_instance(tfm);
567 struct gcm_instance_ctx *ictx = aead_instance_ctx(inst); 567 struct gcm_instance_ctx *ictx = aead_instance_ctx(inst);
568 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); 568 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
569 struct crypto_ablkcipher *ctr; 569 struct crypto_skcipher *ctr;
570 struct crypto_ahash *ghash; 570 struct crypto_ahash *ghash;
571 unsigned long align; 571 unsigned long align;
572 int err; 572 int err;
@@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
575 if (IS_ERR(ghash)) 575 if (IS_ERR(ghash))
576 return PTR_ERR(ghash); 576 return PTR_ERR(ghash);
577 577
578 ctr = crypto_spawn_skcipher(&ictx->ctr); 578 ctr = crypto_spawn_skcipher2(&ictx->ctr);
579 err = PTR_ERR(ctr); 579 err = PTR_ERR(ctr);
580 if (IS_ERR(ctr)) 580 if (IS_ERR(ctr))
581 goto err_free_hash; 581 goto err_free_hash;
@@ -587,8 +587,8 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
587 align &= ~(crypto_tfm_ctx_alignment() - 1); 587 align &= ~(crypto_tfm_ctx_alignment() - 1);
588 crypto_aead_set_reqsize(tfm, 588 crypto_aead_set_reqsize(tfm,
589 align + offsetof(struct crypto_gcm_req_priv_ctx, u) + 589 align + offsetof(struct crypto_gcm_req_priv_ctx, u) +
590 max(sizeof(struct ablkcipher_request) + 590 max(sizeof(struct skcipher_request) +
591 crypto_ablkcipher_reqsize(ctr), 591 crypto_skcipher_reqsize(ctr),
592 sizeof(struct ahash_request) + 592 sizeof(struct ahash_request) +
593 crypto_ahash_reqsize(ghash))); 593 crypto_ahash_reqsize(ghash)));
594 594
@@ -604,7 +604,7 @@ static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
604 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); 604 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
605 605
606 crypto_free_ahash(ctx->ghash); 606 crypto_free_ahash(ctx->ghash);
607 crypto_free_ablkcipher(ctx->ctr); 607 crypto_free_skcipher(ctx->ctr);
608} 608}
609 609
610static void crypto_gcm_free(struct aead_instance *inst) 610static void crypto_gcm_free(struct aead_instance *inst)
@@ -624,7 +624,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
624{ 624{
625 struct crypto_attr_type *algt; 625 struct crypto_attr_type *algt;
626 struct aead_instance *inst; 626 struct aead_instance *inst;
627 struct crypto_alg *ctr; 627 struct skcipher_alg *ctr;
628 struct crypto_alg *ghash_alg; 628 struct crypto_alg *ghash_alg;
629 struct hash_alg_common *ghash; 629 struct hash_alg_common *ghash;
630 struct gcm_instance_ctx *ctx; 630 struct gcm_instance_ctx *ctx;
@@ -639,7 +639,9 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
639 639
640 ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, 640 ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
641 CRYPTO_ALG_TYPE_HASH, 641 CRYPTO_ALG_TYPE_HASH,
642 CRYPTO_ALG_TYPE_AHASH_MASK); 642 CRYPTO_ALG_TYPE_AHASH_MASK |
643 crypto_requires_sync(algt->type,
644 algt->mask));
643 if (IS_ERR(ghash_alg)) 645 if (IS_ERR(ghash_alg))
644 return PTR_ERR(ghash_alg); 646 return PTR_ERR(ghash_alg);
645 647
@@ -661,41 +663,42 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
661 goto err_drop_ghash; 663 goto err_drop_ghash;
662 664
663 crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); 665 crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
664 err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, 666 err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0,
665 crypto_requires_sync(algt->type, 667 crypto_requires_sync(algt->type,
666 algt->mask)); 668 algt->mask));
667 if (err) 669 if (err)
668 goto err_drop_ghash; 670 goto err_drop_ghash;
669 671
670 ctr = crypto_skcipher_spawn_alg(&ctx->ctr); 672 ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
671 673
672 /* We only support 16-byte blocks. */ 674 /* We only support 16-byte blocks. */
673 if (ctr->cra_ablkcipher.ivsize != 16) 675 if (crypto_skcipher_alg_ivsize(ctr) != 16)
674 goto out_put_ctr; 676 goto out_put_ctr;
675 677
676 /* Not a stream cipher? */ 678 /* Not a stream cipher? */
677 err = -EINVAL; 679 err = -EINVAL;
678 if (ctr->cra_blocksize != 1) 680 if (ctr->base.cra_blocksize != 1)
679 goto out_put_ctr; 681 goto out_put_ctr;
680 682
681 err = -ENAMETOOLONG; 683 err = -ENAMETOOLONG;
682 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 684 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
683 "gcm_base(%s,%s)", ctr->cra_driver_name, 685 "gcm_base(%s,%s)", ctr->base.cra_driver_name,
684 ghash_alg->cra_driver_name) >= 686 ghash_alg->cra_driver_name) >=
685 CRYPTO_MAX_ALG_NAME) 687 CRYPTO_MAX_ALG_NAME)
686 goto out_put_ctr; 688 goto out_put_ctr;
687 689
688 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME); 690 memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
689 691
690 inst->alg.base.cra_flags = (ghash->base.cra_flags | ctr->cra_flags) & 692 inst->alg.base.cra_flags = (ghash->base.cra_flags |
691 CRYPTO_ALG_ASYNC; 693 ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
692 inst->alg.base.cra_priority = (ghash->base.cra_priority + 694 inst->alg.base.cra_priority = (ghash->base.cra_priority +
693 ctr->cra_priority) / 2; 695 ctr->base.cra_priority) / 2;
694 inst->alg.base.cra_blocksize = 1; 696 inst->alg.base.cra_blocksize = 1;
695 inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | 697 inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
696 ctr->cra_alignmask; 698 ctr->base.cra_alignmask;
697 inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); 699 inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
698 inst->alg.ivsize = 12; 700 inst->alg.ivsize = 12;
701 inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
699 inst->alg.maxauthsize = 16; 702 inst->alg.maxauthsize = 16;
700 inst->alg.init = crypto_gcm_init_tfm; 703 inst->alg.init = crypto_gcm_init_tfm;
701 inst->alg.exit = crypto_gcm_exit_tfm; 704 inst->alg.exit = crypto_gcm_exit_tfm;
@@ -980,6 +983,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
980 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); 983 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
981 984
982 inst->alg.ivsize = 8; 985 inst->alg.ivsize = 8;
986 inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
983 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 987 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
984 988
985 inst->alg.init = crypto_rfc4106_init_tfm; 989 inst->alg.init = crypto_rfc4106_init_tfm;
@@ -1084,11 +1088,13 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
1084 unsigned int authsize = crypto_aead_authsize(aead); 1088 unsigned int authsize = crypto_aead_authsize(aead);
1085 unsigned int nbytes = req->assoclen + req->cryptlen - 1089 unsigned int nbytes = req->assoclen + req->cryptlen -
1086 (enc ? 0 : authsize); 1090 (enc ? 0 : authsize);
1087 struct blkcipher_desc desc = { 1091 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
1088 .tfm = ctx->null,
1089 };
1090 1092
1091 return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes); 1093 skcipher_request_set_tfm(nreq, ctx->null);
1094 skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
1095 skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
1096
1097 return crypto_skcipher_encrypt(nreq);
1092} 1098}
1093 1099
1094static int crypto_rfc4543_encrypt(struct aead_request *req) 1100static int crypto_rfc4543_encrypt(struct aead_request *req)
@@ -1108,7 +1114,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
1108 struct crypto_aead_spawn *spawn = &ictx->aead; 1114 struct crypto_aead_spawn *spawn = &ictx->aead;
1109 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); 1115 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
1110 struct crypto_aead *aead; 1116 struct crypto_aead *aead;
1111 struct crypto_blkcipher *null; 1117 struct crypto_skcipher *null;
1112 unsigned long align; 1118 unsigned long align;
1113 int err = 0; 1119 int err = 0;
1114 1120
@@ -1116,7 +1122,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
1116 if (IS_ERR(aead)) 1122 if (IS_ERR(aead))
1117 return PTR_ERR(aead); 1123 return PTR_ERR(aead);
1118 1124
1119 null = crypto_get_default_null_skcipher(); 1125 null = crypto_get_default_null_skcipher2();
1120 err = PTR_ERR(null); 1126 err = PTR_ERR(null);
1121 if (IS_ERR(null)) 1127 if (IS_ERR(null))
1122 goto err_free_aead; 1128 goto err_free_aead;
@@ -1144,7 +1150,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
1144 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); 1150 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
1145 1151
1146 crypto_free_aead(ctx->child); 1152 crypto_free_aead(ctx->child);
1147 crypto_put_default_null_skcipher(); 1153 crypto_put_default_null_skcipher2();
1148} 1154}
1149 1155
1150static void crypto_rfc4543_free(struct aead_instance *inst) 1156static void crypto_rfc4543_free(struct aead_instance *inst)
@@ -1219,6 +1225,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
1219 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); 1225 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
1220 1226
1221 inst->alg.ivsize = 8; 1227 inst->alg.ivsize = 8;
1228 inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
1222 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 1229 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
1223 1230
1224 inst->alg.init = crypto_rfc4543_init_tfm; 1231 inst->alg.init = crypto_rfc4543_init_tfm;
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 597cedd3531c..c4938497eedb 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -87,24 +87,28 @@ void jent_memcpy(void *dest, const void *src, unsigned int n)
87 memcpy(dest, src, n); 87 memcpy(dest, src, n);
88} 88}
89 89
90/*
91 * Obtain a high-resolution time stamp value. The time stamp is used to measure
92 * the execution time of a given code path and its variations. Hence, the time
93 * stamp must have a sufficiently high resolution.
94 *
95 * Note, if the function returns zero because a given architecture does not
96 * implement a high-resolution time stamp, the RNG code's runtime test
97 * will detect it and will not produce output.
98 */
90void jent_get_nstime(__u64 *out) 99void jent_get_nstime(__u64 *out)
91{ 100{
92 struct timespec ts;
93 __u64 tmp = 0; 101 __u64 tmp = 0;
94 102
95 tmp = random_get_entropy(); 103 tmp = random_get_entropy();
96 104
97 /* 105 /*
98 * If random_get_entropy does not return a value (which is possible on, 106 * If random_get_entropy does not return a value, i.e. it is not
99 * for example, MIPS), invoke __getnstimeofday 107 * implemented for a given architecture, use a clock source.
100 * hoping that there are timers we can work with. 108 * hoping that there are timers we can work with.
101 */ 109 */
102 if ((0 == tmp) && 110 if (tmp == 0)
103 (0 == __getnstimeofday(&ts))) { 111 tmp = ktime_get_ns();
104 tmp = ts.tv_sec;
105 tmp = tmp << 32;
106 tmp = tmp | ts.tv_nsec;
107 }
108 112
109 *out = tmp; 113 *out = tmp;
110} 114}
diff --git a/crypto/kpp.c b/crypto/kpp.c
new file mode 100644
index 000000000000..d36ce05eee43
--- /dev/null
+++ b/crypto/kpp.c
@@ -0,0 +1,123 @@
1/*
2 * Key-agreement Protocol Primitives (KPP)
3 *
4 * Copyright (c) 2016, Intel Corporation
5 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/seq_file.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/crypto.h>
20#include <crypto/algapi.h>
21#include <linux/cryptouser.h>
22#include <net/netlink.h>
23#include <crypto/kpp.h>
24#include <crypto/internal/kpp.h>
25#include "internal.h"
26
27#ifdef CONFIG_NET
28static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
29{
30 struct crypto_report_kpp rkpp;
31
32 strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
33
34 if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
35 sizeof(struct crypto_report_kpp), &rkpp))
36 goto nla_put_failure;
37 return 0;
38
39nla_put_failure:
40 return -EMSGSIZE;
41}
42#else
43static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
44{
45 return -ENOSYS;
46}
47#endif
48
49static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
50 __attribute__ ((unused));
51
52static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
53{
54 seq_puts(m, "type : kpp\n");
55}
56
57static void crypto_kpp_exit_tfm(struct crypto_tfm *tfm)
58{
59 struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
60 struct kpp_alg *alg = crypto_kpp_alg(kpp);
61
62 alg->exit(kpp);
63}
64
65static int crypto_kpp_init_tfm(struct crypto_tfm *tfm)
66{
67 struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
68 struct kpp_alg *alg = crypto_kpp_alg(kpp);
69
70 if (alg->exit)
71 kpp->base.exit = crypto_kpp_exit_tfm;
72
73 if (alg->init)
74 return alg->init(kpp);
75
76 return 0;
77}
78
79static const struct crypto_type crypto_kpp_type = {
80 .extsize = crypto_alg_extsize,
81 .init_tfm = crypto_kpp_init_tfm,
82#ifdef CONFIG_PROC_FS
83 .show = crypto_kpp_show,
84#endif
85 .report = crypto_kpp_report,
86 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
87 .maskset = CRYPTO_ALG_TYPE_MASK,
88 .type = CRYPTO_ALG_TYPE_KPP,
89 .tfmsize = offsetof(struct crypto_kpp, base),
90};
91
92struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
93{
94 return crypto_alloc_tfm(alg_name, &crypto_kpp_type, type, mask);
95}
96EXPORT_SYMBOL_GPL(crypto_alloc_kpp);
97
98static void kpp_prepare_alg(struct kpp_alg *alg)
99{
100 struct crypto_alg *base = &alg->base;
101
102 base->cra_type = &crypto_kpp_type;
103 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
104 base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
105}
106
107int crypto_register_kpp(struct kpp_alg *alg)
108{
109 struct crypto_alg *base = &alg->base;
110
111 kpp_prepare_alg(alg);
112 return crypto_register_alg(base);
113}
114EXPORT_SYMBOL_GPL(crypto_register_kpp);
115
116void crypto_unregister_kpp(struct kpp_alg *alg)
117{
118 crypto_unregister_alg(&alg->base);
119}
120EXPORT_SYMBOL_GPL(crypto_unregister_kpp);
121
122MODULE_LICENSE("GPL");
123MODULE_DESCRIPTION("Key-agreement Protocol Primitives");
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index c4eb9da49d4f..86fb59b109a9 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -41,7 +41,7 @@ struct mcryptd_flush_list {
41static struct mcryptd_flush_list __percpu *mcryptd_flist; 41static struct mcryptd_flush_list __percpu *mcryptd_flist;
42 42
43struct hashd_instance_ctx { 43struct hashd_instance_ctx {
44 struct crypto_shash_spawn spawn; 44 struct crypto_ahash_spawn spawn;
45 struct mcryptd_queue *queue; 45 struct mcryptd_queue *queue;
46}; 46};
47 47
@@ -272,18 +272,18 @@ static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
272{ 272{
273 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 273 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
274 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 274 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
275 struct crypto_shash_spawn *spawn = &ictx->spawn; 275 struct crypto_ahash_spawn *spawn = &ictx->spawn;
276 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 276 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
277 struct crypto_shash *hash; 277 struct crypto_ahash *hash;
278 278
279 hash = crypto_spawn_shash(spawn); 279 hash = crypto_spawn_ahash(spawn);
280 if (IS_ERR(hash)) 280 if (IS_ERR(hash))
281 return PTR_ERR(hash); 281 return PTR_ERR(hash);
282 282
283 ctx->child = hash; 283 ctx->child = hash;
284 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 284 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
285 sizeof(struct mcryptd_hash_request_ctx) + 285 sizeof(struct mcryptd_hash_request_ctx) +
286 crypto_shash_descsize(hash)); 286 crypto_ahash_reqsize(hash));
287 return 0; 287 return 0;
288} 288}
289 289
@@ -291,21 +291,21 @@ static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
291{ 291{
292 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 292 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
293 293
294 crypto_free_shash(ctx->child); 294 crypto_free_ahash(ctx->child);
295} 295}
296 296
297static int mcryptd_hash_setkey(struct crypto_ahash *parent, 297static int mcryptd_hash_setkey(struct crypto_ahash *parent,
298 const u8 *key, unsigned int keylen) 298 const u8 *key, unsigned int keylen)
299{ 299{
300 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 300 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
301 struct crypto_shash *child = ctx->child; 301 struct crypto_ahash *child = ctx->child;
302 int err; 302 int err;
303 303
304 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 304 crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
305 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 305 crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
306 CRYPTO_TFM_REQ_MASK); 306 CRYPTO_TFM_REQ_MASK);
307 err = crypto_shash_setkey(child, key, keylen); 307 err = crypto_ahash_setkey(child, key, keylen);
308 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & 308 crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
309 CRYPTO_TFM_RES_MASK); 309 CRYPTO_TFM_RES_MASK);
310 return err; 310 return err;
311} 311}
@@ -331,20 +331,20 @@ static int mcryptd_hash_enqueue(struct ahash_request *req,
331static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) 331static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
332{ 332{
333 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 333 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
334 struct crypto_shash *child = ctx->child; 334 struct crypto_ahash *child = ctx->child;
335 struct ahash_request *req = ahash_request_cast(req_async); 335 struct ahash_request *req = ahash_request_cast(req_async);
336 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 336 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
337 struct shash_desc *desc = &rctx->desc; 337 struct ahash_request *desc = &rctx->areq;
338 338
339 if (unlikely(err == -EINPROGRESS)) 339 if (unlikely(err == -EINPROGRESS))
340 goto out; 340 goto out;
341 341
342 desc->tfm = child; 342 ahash_request_set_tfm(desc, child);
343 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 343 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
344 rctx->complete, req_async);
344 345
345 err = crypto_shash_init(desc); 346 rctx->out = req->result;
346 347 err = crypto_ahash_init(desc);
347 req->base.complete = rctx->complete;
348 348
349out: 349out:
350 local_bh_disable(); 350 local_bh_disable();
@@ -365,7 +365,8 @@ static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
365 if (unlikely(err == -EINPROGRESS)) 365 if (unlikely(err == -EINPROGRESS))
366 goto out; 366 goto out;
367 367
368 err = shash_ahash_mcryptd_update(req, &rctx->desc); 368 rctx->out = req->result;
369 err = ahash_mcryptd_update(&rctx->areq);
369 if (err) { 370 if (err) {
370 req->base.complete = rctx->complete; 371 req->base.complete = rctx->complete;
371 goto out; 372 goto out;
@@ -391,7 +392,8 @@ static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
391 if (unlikely(err == -EINPROGRESS)) 392 if (unlikely(err == -EINPROGRESS))
392 goto out; 393 goto out;
393 394
394 err = shash_ahash_mcryptd_final(req, &rctx->desc); 395 rctx->out = req->result;
396 err = ahash_mcryptd_final(&rctx->areq);
395 if (err) { 397 if (err) {
396 req->base.complete = rctx->complete; 398 req->base.complete = rctx->complete;
397 goto out; 399 goto out;
@@ -416,8 +418,8 @@ static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
416 418
417 if (unlikely(err == -EINPROGRESS)) 419 if (unlikely(err == -EINPROGRESS))
418 goto out; 420 goto out;
419 421 rctx->out = req->result;
420 err = shash_ahash_mcryptd_finup(req, &rctx->desc); 422 err = ahash_mcryptd_finup(&rctx->areq);
421 423
422 if (err) { 424 if (err) {
423 req->base.complete = rctx->complete; 425 req->base.complete = rctx->complete;
@@ -439,25 +441,21 @@ static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
439static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) 441static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
440{ 442{
441 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 443 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
442 struct crypto_shash *child = ctx->child; 444 struct crypto_ahash *child = ctx->child;
443 struct ahash_request *req = ahash_request_cast(req_async); 445 struct ahash_request *req = ahash_request_cast(req_async);
444 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 446 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
445 struct shash_desc *desc = &rctx->desc; 447 struct ahash_request *desc = &rctx->areq;
446 448
447 if (unlikely(err == -EINPROGRESS)) 449 if (unlikely(err == -EINPROGRESS))
448 goto out; 450 goto out;
449 451
450 desc->tfm = child; 452 ahash_request_set_tfm(desc, child);
451 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ 453 ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
452 454 rctx->complete, req_async);
453 err = shash_ahash_mcryptd_digest(req, desc);
454 455
455 if (err) { 456 rctx->out = req->result;
456 req->base.complete = rctx->complete; 457 err = ahash_mcryptd_digest(desc);
457 goto out;
458 }
459 458
460 return;
461out: 459out:
462 local_bh_disable(); 460 local_bh_disable();
463 rctx->complete(&req->base, err); 461 rctx->complete(&req->base, err);
@@ -473,14 +471,14 @@ static int mcryptd_hash_export(struct ahash_request *req, void *out)
473{ 471{
474 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 472 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
475 473
476 return crypto_shash_export(&rctx->desc, out); 474 return crypto_ahash_export(&rctx->areq, out);
477} 475}
478 476
479static int mcryptd_hash_import(struct ahash_request *req, const void *in) 477static int mcryptd_hash_import(struct ahash_request *req, const void *in)
480{ 478{
481 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 479 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
482 480
483 return crypto_shash_import(&rctx->desc, in); 481 return crypto_ahash_import(&rctx->areq, in);
484} 482}
485 483
486static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, 484static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
@@ -488,7 +486,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
488{ 486{
489 struct hashd_instance_ctx *ctx; 487 struct hashd_instance_ctx *ctx;
490 struct ahash_instance *inst; 488 struct ahash_instance *inst;
491 struct shash_alg *salg; 489 struct hash_alg_common *halg;
492 struct crypto_alg *alg; 490 struct crypto_alg *alg;
493 u32 type = 0; 491 u32 type = 0;
494 u32 mask = 0; 492 u32 mask = 0;
@@ -496,11 +494,11 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
496 494
497 mcryptd_check_internal(tb, &type, &mask); 495 mcryptd_check_internal(tb, &type, &mask);
498 496
499 salg = shash_attr_alg(tb[1], type, mask); 497 halg = ahash_attr_alg(tb[1], type, mask);
500 if (IS_ERR(salg)) 498 if (IS_ERR(halg))
501 return PTR_ERR(salg); 499 return PTR_ERR(halg);
502 500
503 alg = &salg->base; 501 alg = &halg->base;
504 pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); 502 pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
505 inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), 503 inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
506 sizeof(*ctx)); 504 sizeof(*ctx));
@@ -511,7 +509,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
511 ctx = ahash_instance_ctx(inst); 509 ctx = ahash_instance_ctx(inst);
512 ctx->queue = queue; 510 ctx->queue = queue;
513 511
514 err = crypto_init_shash_spawn(&ctx->spawn, salg, 512 err = crypto_init_ahash_spawn(&ctx->spawn, halg,
515 ahash_crypto_instance(inst)); 513 ahash_crypto_instance(inst));
516 if (err) 514 if (err)
517 goto out_free_inst; 515 goto out_free_inst;
@@ -521,8 +519,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
521 type |= CRYPTO_ALG_INTERNAL; 519 type |= CRYPTO_ALG_INTERNAL;
522 inst->alg.halg.base.cra_flags = type; 520 inst->alg.halg.base.cra_flags = type;
523 521
524 inst->alg.halg.digestsize = salg->digestsize; 522 inst->alg.halg.digestsize = halg->digestsize;
525 inst->alg.halg.statesize = salg->statesize; 523 inst->alg.halg.statesize = halg->statesize;
526 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); 524 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
527 525
528 inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; 526 inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
@@ -539,7 +537,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
539 537
540 err = ahash_register_instance(tmpl, inst); 538 err = ahash_register_instance(tmpl, inst);
541 if (err) { 539 if (err) {
542 crypto_drop_shash(&ctx->spawn); 540 crypto_drop_ahash(&ctx->spawn);
543out_free_inst: 541out_free_inst:
544 kfree(inst); 542 kfree(inst);
545 } 543 }
@@ -575,7 +573,7 @@ static void mcryptd_free(struct crypto_instance *inst)
575 573
576 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 574 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
577 case CRYPTO_ALG_TYPE_AHASH: 575 case CRYPTO_ALG_TYPE_AHASH:
578 crypto_drop_shash(&hctx->spawn); 576 crypto_drop_ahash(&hctx->spawn);
579 kfree(ahash_instance(inst)); 577 kfree(ahash_instance(inst));
580 return; 578 return;
581 default: 579 default:
@@ -612,55 +610,38 @@ struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
612} 610}
613EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); 611EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
614 612
615int shash_ahash_mcryptd_digest(struct ahash_request *req, 613int ahash_mcryptd_digest(struct ahash_request *desc)
616 struct shash_desc *desc)
617{ 614{
618 int err; 615 int err;
619 616
620 err = crypto_shash_init(desc) ?: 617 err = crypto_ahash_init(desc) ?:
621 shash_ahash_mcryptd_finup(req, desc); 618 ahash_mcryptd_finup(desc);
622 619
623 return err; 620 return err;
624} 621}
625EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
626 622
627int shash_ahash_mcryptd_update(struct ahash_request *req, 623int ahash_mcryptd_update(struct ahash_request *desc)
628 struct shash_desc *desc)
629{ 624{
630 struct crypto_shash *tfm = desc->tfm;
631 struct shash_alg *shash = crypto_shash_alg(tfm);
632
633 /* alignment is to be done by multi-buffer crypto algorithm if needed */ 625 /* alignment is to be done by multi-buffer crypto algorithm if needed */
634 626
635 return shash->update(desc, NULL, 0); 627 return crypto_ahash_update(desc);
636} 628}
637EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
638 629
639int shash_ahash_mcryptd_finup(struct ahash_request *req, 630int ahash_mcryptd_finup(struct ahash_request *desc)
640 struct shash_desc *desc)
641{ 631{
642 struct crypto_shash *tfm = desc->tfm;
643 struct shash_alg *shash = crypto_shash_alg(tfm);
644
645 /* alignment is to be done by multi-buffer crypto algorithm if needed */ 632 /* alignment is to be done by multi-buffer crypto algorithm if needed */
646 633
647 return shash->finup(desc, NULL, 0, req->result); 634 return crypto_ahash_finup(desc);
648} 635}
649EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
650 636
651int shash_ahash_mcryptd_final(struct ahash_request *req, 637int ahash_mcryptd_final(struct ahash_request *desc)
652 struct shash_desc *desc)
653{ 638{
654 struct crypto_shash *tfm = desc->tfm;
655 struct shash_alg *shash = crypto_shash_alg(tfm);
656
657 /* alignment is to be done by multi-buffer crypto algorithm if needed */ 639 /* alignment is to be done by multi-buffer crypto algorithm if needed */
658 640
659 return shash->final(desc, req->result); 641 return crypto_ahash_final(desc);
660} 642}
661EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
662 643
663struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) 644struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
664{ 645{
665 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 646 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
666 647
@@ -668,12 +649,12 @@ struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
668} 649}
669EXPORT_SYMBOL_GPL(mcryptd_ahash_child); 650EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
670 651
671struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) 652struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
672{ 653{
673 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 654 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
674 return &rctx->desc; 655 return &rctx->areq;
675} 656}
676EXPORT_SYMBOL_GPL(mcryptd_shash_desc); 657EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
677 658
678void mcryptd_free_ahash(struct mcryptd_ahash *tfm) 659void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
679{ 660{
@@ -681,7 +662,6 @@ void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
681} 662}
682EXPORT_SYMBOL_GPL(mcryptd_free_ahash); 663EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
683 664
684
685static int __init mcryptd_init(void) 665static int __init mcryptd_init(void)
686{ 666{
687 int err, cpu; 667 int err, cpu;
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 8ba426635b1b..877019a6d3ea 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -92,19 +92,17 @@ static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
92 92
93struct pkcs1pad_ctx { 93struct pkcs1pad_ctx {
94 struct crypto_akcipher *child; 94 struct crypto_akcipher *child;
95 const char *hash_name;
96 unsigned int key_size; 95 unsigned int key_size;
97}; 96};
98 97
99struct pkcs1pad_inst_ctx { 98struct pkcs1pad_inst_ctx {
100 struct crypto_akcipher_spawn spawn; 99 struct crypto_akcipher_spawn spawn;
101 const char *hash_name; 100 const struct rsa_asn1_template *digest_info;
102}; 101};
103 102
104struct pkcs1pad_request { 103struct pkcs1pad_request {
105 struct scatterlist in_sg[3], out_sg[2]; 104 struct scatterlist in_sg[2], out_sg[1];
106 uint8_t *in_buf, *out_buf; 105 uint8_t *in_buf, *out_buf;
107
108 struct akcipher_request child_req; 106 struct akcipher_request child_req;
109}; 107};
110 108
@@ -112,40 +110,48 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
112 unsigned int keylen) 110 unsigned int keylen)
113{ 111{
114 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 112 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
115 int err, size; 113 int err;
114
115 ctx->key_size = 0;
116 116
117 err = crypto_akcipher_set_pub_key(ctx->child, key, keylen); 117 err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
118 if (err)
119 return err;
118 120
119 if (!err) { 121 /* Find out new modulus size from rsa implementation */
120 /* Find out new modulus size from rsa implementation */ 122 err = crypto_akcipher_maxsize(ctx->child);
121 size = crypto_akcipher_maxsize(ctx->child); 123 if (err < 0)
124 return err;
122 125
123 ctx->key_size = size > 0 ? size : 0; 126 if (err > PAGE_SIZE)
124 if (size <= 0) 127 return -ENOTSUPP;
125 err = size;
126 }
127 128
128 return err; 129 ctx->key_size = err;
130 return 0;
129} 131}
130 132
131static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, 133static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
132 unsigned int keylen) 134 unsigned int keylen)
133{ 135{
134 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 136 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
135 int err, size; 137 int err;
138
139 ctx->key_size = 0;
136 140
137 err = crypto_akcipher_set_priv_key(ctx->child, key, keylen); 141 err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
142 if (err)
143 return err;
138 144
139 if (!err) { 145 /* Find out new modulus size from rsa implementation */
140 /* Find out new modulus size from rsa implementation */ 146 err = crypto_akcipher_maxsize(ctx->child);
141 size = crypto_akcipher_maxsize(ctx->child); 147 if (err < 0)
148 return err;
142 149
143 ctx->key_size = size > 0 ? size : 0; 150 if (err > PAGE_SIZE)
144 if (size <= 0) 151 return -ENOTSUPP;
145 err = size;
146 }
147 152
148 return err; 153 ctx->key_size = err;
154 return 0;
149} 155}
150 156
151static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) 157static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
@@ -164,19 +170,10 @@ static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
164static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, 170static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
165 struct scatterlist *next) 171 struct scatterlist *next)
166{ 172{
167 int nsegs = next ? 1 : 0; 173 int nsegs = next ? 2 : 1;
168 174
169 if (offset_in_page(buf) + len <= PAGE_SIZE) { 175 sg_init_table(sg, nsegs);
170 nsegs += 1; 176 sg_set_buf(sg, buf, len);
171 sg_init_table(sg, nsegs);
172 sg_set_buf(sg, buf, len);
173 } else {
174 nsegs += 2;
175 sg_init_table(sg, nsegs);
176 sg_set_buf(sg + 0, buf, PAGE_SIZE - offset_in_page(buf));
177 sg_set_buf(sg + 1, buf + PAGE_SIZE - offset_in_page(buf),
178 offset_in_page(buf) + len - PAGE_SIZE);
179 }
180 177
181 if (next) 178 if (next)
182 sg_chain(sg, nsegs, next); 179 sg_chain(sg, nsegs, next);
@@ -187,37 +184,36 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
187 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 184 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
188 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 185 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
189 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 186 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
190 size_t pad_len = ctx->key_size - req_ctx->child_req.dst_len; 187 unsigned int pad_len;
191 size_t chunk_len, pad_left; 188 unsigned int len;
192 struct sg_mapping_iter miter; 189 u8 *out_buf;
193 190
194 if (!err) { 191 if (err)
195 if (pad_len) { 192 goto out;
196 sg_miter_start(&miter, req->dst, 193
197 sg_nents_for_len(req->dst, pad_len), 194 len = req_ctx->child_req.dst_len;
198 SG_MITER_ATOMIC | SG_MITER_TO_SG); 195 pad_len = ctx->key_size - len;
199 196
200 pad_left = pad_len; 197 /* Four billion to one */
201 while (pad_left) { 198 if (likely(!pad_len))
202 sg_miter_next(&miter); 199 goto out;
203 200
204 chunk_len = min(miter.length, pad_left); 201 out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
205 memset(miter.addr, 0, chunk_len); 202 err = -ENOMEM;
206 pad_left -= chunk_len; 203 if (!out_buf)
207 } 204 goto out;
208 205
209 sg_miter_stop(&miter); 206 sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
210 } 207 out_buf + pad_len, len);
211 208 sg_copy_from_buffer(req->dst,
212 sg_pcopy_from_buffer(req->dst, 209 sg_nents_for_len(req->dst, ctx->key_size),
213 sg_nents_for_len(req->dst, ctx->key_size), 210 out_buf, ctx->key_size);
214 req_ctx->out_buf, req_ctx->child_req.dst_len, 211 kzfree(out_buf);
215 pad_len); 212
216 } 213out:
217 req->dst_len = ctx->key_size; 214 req->dst_len = ctx->key_size;
218 215
219 kfree(req_ctx->in_buf); 216 kfree(req_ctx->in_buf);
220 kzfree(req_ctx->out_buf);
221 217
222 return err; 218 return err;
223} 219}
@@ -257,21 +253,8 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
257 return -EOVERFLOW; 253 return -EOVERFLOW;
258 } 254 }
259 255
260 if (ctx->key_size > PAGE_SIZE)
261 return -ENOTSUPP;
262
263 /*
264 * Replace both input and output to add the padding in the input and
265 * the potential missing leading zeros in the output.
266 */
267 req_ctx->child_req.src = req_ctx->in_sg;
268 req_ctx->child_req.src_len = ctx->key_size - 1;
269 req_ctx->child_req.dst = req_ctx->out_sg;
270 req_ctx->child_req.dst_len = ctx->key_size;
271
272 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, 256 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
273 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 257 GFP_KERNEL);
274 GFP_KERNEL : GFP_ATOMIC);
275 if (!req_ctx->in_buf) 258 if (!req_ctx->in_buf)
276 return -ENOMEM; 259 return -ENOMEM;
277 260
@@ -284,9 +267,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
284 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, 267 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
285 ctx->key_size - 1 - req->src_len, req->src); 268 ctx->key_size - 1 - req->src_len, req->src);
286 269
287 req_ctx->out_buf = kmalloc(ctx->key_size, 270 req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
288 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
289 GFP_KERNEL : GFP_ATOMIC);
290 if (!req_ctx->out_buf) { 271 if (!req_ctx->out_buf) {
291 kfree(req_ctx->in_buf); 272 kfree(req_ctx->in_buf);
292 return -ENOMEM; 273 return -ENOMEM;
@@ -299,6 +280,10 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
299 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 280 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
300 pkcs1pad_encrypt_sign_complete_cb, req); 281 pkcs1pad_encrypt_sign_complete_cb, req);
301 282
283 /* Reuse output buffer */
284 akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
285 req->dst, ctx->key_size - 1, req->dst_len);
286
302 err = crypto_akcipher_encrypt(&req_ctx->child_req); 287 err = crypto_akcipher_encrypt(&req_ctx->child_req);
303 if (err != -EINPROGRESS && 288 if (err != -EINPROGRESS &&
304 (err != -EBUSY || 289 (err != -EBUSY ||
@@ -380,18 +365,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
380 if (!ctx->key_size || req->src_len != ctx->key_size) 365 if (!ctx->key_size || req->src_len != ctx->key_size)
381 return -EINVAL; 366 return -EINVAL;
382 367
383 if (ctx->key_size > PAGE_SIZE) 368 req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
384 return -ENOTSUPP;
385
386 /* Reuse input buffer, output to a new buffer */
387 req_ctx->child_req.src = req->src;
388 req_ctx->child_req.src_len = req->src_len;
389 req_ctx->child_req.dst = req_ctx->out_sg;
390 req_ctx->child_req.dst_len = ctx->key_size ;
391
392 req_ctx->out_buf = kmalloc(ctx->key_size,
393 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
394 GFP_KERNEL : GFP_ATOMIC);
395 if (!req_ctx->out_buf) 369 if (!req_ctx->out_buf)
396 return -ENOMEM; 370 return -ENOMEM;
397 371
@@ -402,6 +376,11 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
402 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 376 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
403 pkcs1pad_decrypt_complete_cb, req); 377 pkcs1pad_decrypt_complete_cb, req);
404 378
379 /* Reuse input buffer, output to a new buffer */
380 akcipher_request_set_crypt(&req_ctx->child_req, req->src,
381 req_ctx->out_sg, req->src_len,
382 ctx->key_size);
383
405 err = crypto_akcipher_decrypt(&req_ctx->child_req); 384 err = crypto_akcipher_decrypt(&req_ctx->child_req);
406 if (err != -EINPROGRESS && 385 if (err != -EINPROGRESS &&
407 (err != -EBUSY || 386 (err != -EBUSY ||
@@ -416,20 +395,16 @@ static int pkcs1pad_sign(struct akcipher_request *req)
416 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 395 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
417 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 396 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
418 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 397 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
419 const struct rsa_asn1_template *digest_info = NULL; 398 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
399 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
400 const struct rsa_asn1_template *digest_info = ictx->digest_info;
420 int err; 401 int err;
421 unsigned int ps_end, digest_size = 0; 402 unsigned int ps_end, digest_size = 0;
422 403
423 if (!ctx->key_size) 404 if (!ctx->key_size)
424 return -EINVAL; 405 return -EINVAL;
425 406
426 if (ctx->hash_name) { 407 digest_size = digest_info->size;
427 digest_info = rsa_lookup_asn1(ctx->hash_name);
428 if (!digest_info)
429 return -EINVAL;
430
431 digest_size = digest_info->size;
432 }
433 408
434 if (req->src_len + digest_size > ctx->key_size - 11) 409 if (req->src_len + digest_size > ctx->key_size - 11)
435 return -EOVERFLOW; 410 return -EOVERFLOW;
@@ -439,21 +414,8 @@ static int pkcs1pad_sign(struct akcipher_request *req)
439 return -EOVERFLOW; 414 return -EOVERFLOW;
440 } 415 }
441 416
442 if (ctx->key_size > PAGE_SIZE)
443 return -ENOTSUPP;
444
445 /*
446 * Replace both input and output to add the padding in the input and
447 * the potential missing leading zeros in the output.
448 */
449 req_ctx->child_req.src = req_ctx->in_sg;
450 req_ctx->child_req.src_len = ctx->key_size - 1;
451 req_ctx->child_req.dst = req_ctx->out_sg;
452 req_ctx->child_req.dst_len = ctx->key_size;
453
454 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, 417 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
455 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 418 GFP_KERNEL);
456 GFP_KERNEL : GFP_ATOMIC);
457 if (!req_ctx->in_buf) 419 if (!req_ctx->in_buf)
458 return -ENOMEM; 420 return -ENOMEM;
459 421
@@ -462,29 +424,20 @@ static int pkcs1pad_sign(struct akcipher_request *req)
462 memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); 424 memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
463 req_ctx->in_buf[ps_end] = 0x00; 425 req_ctx->in_buf[ps_end] = 0x00;
464 426
465 if (digest_info) { 427 memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
466 memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data, 428 digest_info->size);
467 digest_info->size);
468 }
469 429
470 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, 430 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
471 ctx->key_size - 1 - req->src_len, req->src); 431 ctx->key_size - 1 - req->src_len, req->src);
472 432
473 req_ctx->out_buf = kmalloc(ctx->key_size,
474 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
475 GFP_KERNEL : GFP_ATOMIC);
476 if (!req_ctx->out_buf) {
477 kfree(req_ctx->in_buf);
478 return -ENOMEM;
479 }
480
481 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
482 ctx->key_size, NULL);
483
484 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 433 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
485 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 434 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
486 pkcs1pad_encrypt_sign_complete_cb, req); 435 pkcs1pad_encrypt_sign_complete_cb, req);
487 436
437 /* Reuse output buffer */
438 akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
439 req->dst, ctx->key_size - 1, req->dst_len);
440
488 err = crypto_akcipher_sign(&req_ctx->child_req); 441 err = crypto_akcipher_sign(&req_ctx->child_req);
489 if (err != -EINPROGRESS && 442 if (err != -EINPROGRESS &&
490 (err != -EBUSY || 443 (err != -EBUSY ||
@@ -499,56 +452,58 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
499 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 452 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
500 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 453 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
501 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 454 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
502 const struct rsa_asn1_template *digest_info; 455 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
456 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
457 const struct rsa_asn1_template *digest_info = ictx->digest_info;
458 unsigned int dst_len;
503 unsigned int pos; 459 unsigned int pos;
504 460 u8 *out_buf;
505 if (err == -EOVERFLOW)
506 /* Decrypted value had no leading 0 byte */
507 err = -EINVAL;
508 461
509 if (err) 462 if (err)
510 goto done; 463 goto done;
511 464
512 if (req_ctx->child_req.dst_len != ctx->key_size - 1) { 465 err = -EINVAL;
513 err = -EINVAL; 466 dst_len = req_ctx->child_req.dst_len;
467 if (dst_len < ctx->key_size - 1)
514 goto done; 468 goto done;
469
470 out_buf = req_ctx->out_buf;
471 if (dst_len == ctx->key_size) {
472 if (out_buf[0] != 0x00)
473 /* Decrypted value had no leading 0 byte */
474 goto done;
475
476 dst_len--;
477 out_buf++;
515 } 478 }
516 479
517 err = -EBADMSG; 480 err = -EBADMSG;
518 if (req_ctx->out_buf[0] != 0x01) 481 if (out_buf[0] != 0x01)
519 goto done; 482 goto done;
520 483
521 for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) 484 for (pos = 1; pos < dst_len; pos++)
522 if (req_ctx->out_buf[pos] != 0xff) 485 if (out_buf[pos] != 0xff)
523 break; 486 break;
524 487
525 if (pos < 9 || pos == req_ctx->child_req.dst_len || 488 if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
526 req_ctx->out_buf[pos] != 0x00)
527 goto done; 489 goto done;
528 pos++; 490 pos++;
529 491
530 if (ctx->hash_name) { 492 if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
531 digest_info = rsa_lookup_asn1(ctx->hash_name); 493 goto done;
532 if (!digest_info)
533 goto done;
534
535 if (memcmp(req_ctx->out_buf + pos, digest_info->data,
536 digest_info->size))
537 goto done;
538 494
539 pos += digest_info->size; 495 pos += digest_info->size;
540 }
541 496
542 err = 0; 497 err = 0;
543 498
544 if (req->dst_len < req_ctx->child_req.dst_len - pos) 499 if (req->dst_len < dst_len - pos)
545 err = -EOVERFLOW; 500 err = -EOVERFLOW;
546 req->dst_len = req_ctx->child_req.dst_len - pos; 501 req->dst_len = dst_len - pos;
547 502
548 if (!err) 503 if (!err)
549 sg_copy_from_buffer(req->dst, 504 sg_copy_from_buffer(req->dst,
550 sg_nents_for_len(req->dst, req->dst_len), 505 sg_nents_for_len(req->dst, req->dst_len),
551 req_ctx->out_buf + pos, req->dst_len); 506 out_buf + pos, req->dst_len);
552done: 507done:
553 kzfree(req_ctx->out_buf); 508 kzfree(req_ctx->out_buf);
554 509
@@ -588,18 +543,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
588 if (!ctx->key_size || req->src_len < ctx->key_size) 543 if (!ctx->key_size || req->src_len < ctx->key_size)
589 return -EINVAL; 544 return -EINVAL;
590 545
591 if (ctx->key_size > PAGE_SIZE) 546 req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
592 return -ENOTSUPP;
593
594 /* Reuse input buffer, output to a new buffer */
595 req_ctx->child_req.src = req->src;
596 req_ctx->child_req.src_len = req->src_len;
597 req_ctx->child_req.dst = req_ctx->out_sg;
598 req_ctx->child_req.dst_len = ctx->key_size;
599
600 req_ctx->out_buf = kmalloc(ctx->key_size,
601 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
602 GFP_KERNEL : GFP_ATOMIC);
603 if (!req_ctx->out_buf) 547 if (!req_ctx->out_buf)
604 return -ENOMEM; 548 return -ENOMEM;
605 549
@@ -610,6 +554,11 @@ static int pkcs1pad_verify(struct akcipher_request *req)
610 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 554 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
611 pkcs1pad_verify_complete_cb, req); 555 pkcs1pad_verify_complete_cb, req);
612 556
557 /* Reuse input buffer, output to a new buffer */
558 akcipher_request_set_crypt(&req_ctx->child_req, req->src,
559 req_ctx->out_sg, req->src_len,
560 ctx->key_size);
561
613 err = crypto_akcipher_verify(&req_ctx->child_req); 562 err = crypto_akcipher_verify(&req_ctx->child_req);
614 if (err != -EINPROGRESS && 563 if (err != -EINPROGRESS &&
615 (err != -EBUSY || 564 (err != -EBUSY ||
@@ -626,12 +575,11 @@ static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
626 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 575 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
627 struct crypto_akcipher *child_tfm; 576 struct crypto_akcipher *child_tfm;
628 577
629 child_tfm = crypto_spawn_akcipher(akcipher_instance_ctx(inst)); 578 child_tfm = crypto_spawn_akcipher(&ictx->spawn);
630 if (IS_ERR(child_tfm)) 579 if (IS_ERR(child_tfm))
631 return PTR_ERR(child_tfm); 580 return PTR_ERR(child_tfm);
632 581
633 ctx->child = child_tfm; 582 ctx->child = child_tfm;
634 ctx->hash_name = ictx->hash_name;
635 return 0; 583 return 0;
636} 584}
637 585
@@ -648,12 +596,12 @@ static void pkcs1pad_free(struct akcipher_instance *inst)
648 struct crypto_akcipher_spawn *spawn = &ctx->spawn; 596 struct crypto_akcipher_spawn *spawn = &ctx->spawn;
649 597
650 crypto_drop_akcipher(spawn); 598 crypto_drop_akcipher(spawn);
651 kfree(ctx->hash_name);
652 kfree(inst); 599 kfree(inst);
653} 600}
654 601
655static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) 602static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
656{ 603{
604 const struct rsa_asn1_template *digest_info;
657 struct crypto_attr_type *algt; 605 struct crypto_attr_type *algt;
658 struct akcipher_instance *inst; 606 struct akcipher_instance *inst;
659 struct pkcs1pad_inst_ctx *ctx; 607 struct pkcs1pad_inst_ctx *ctx;
@@ -676,7 +624,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
676 624
677 hash_name = crypto_attr_alg_name(tb[2]); 625 hash_name = crypto_attr_alg_name(tb[2]);
678 if (IS_ERR(hash_name)) 626 if (IS_ERR(hash_name))
679 hash_name = NULL; 627 return PTR_ERR(hash_name);
628
629 digest_info = rsa_lookup_asn1(hash_name);
630 if (!digest_info)
631 return -EINVAL;
680 632
681 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 633 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
682 if (!inst) 634 if (!inst)
@@ -684,7 +636,7 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
684 636
685 ctx = akcipher_instance_ctx(inst); 637 ctx = akcipher_instance_ctx(inst);
686 spawn = &ctx->spawn; 638 spawn = &ctx->spawn;
687 ctx->hash_name = hash_name ? kstrdup(hash_name, GFP_KERNEL) : NULL; 639 ctx->digest_info = digest_info;
688 640
689 crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst)); 641 crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
690 err = crypto_grab_akcipher(spawn, rsa_alg_name, 0, 642 err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
@@ -696,27 +648,14 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
696 648
697 err = -ENAMETOOLONG; 649 err = -ENAMETOOLONG;
698 650
699 if (!hash_name) { 651 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
700 if (snprintf(inst->alg.base.cra_name, 652 "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >=
701 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", 653 CRYPTO_MAX_ALG_NAME ||
702 rsa_alg->base.cra_name) >= 654 snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
703 CRYPTO_MAX_ALG_NAME || 655 "pkcs1pad(%s,%s)",
704 snprintf(inst->alg.base.cra_driver_name, 656 rsa_alg->base.cra_driver_name, hash_name) >=
705 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", 657 CRYPTO_MAX_ALG_NAME)
706 rsa_alg->base.cra_driver_name) >=
707 CRYPTO_MAX_ALG_NAME)
708 goto out_drop_alg; 658 goto out_drop_alg;
709 } else {
710 if (snprintf(inst->alg.base.cra_name,
711 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
712 rsa_alg->base.cra_name, hash_name) >=
713 CRYPTO_MAX_ALG_NAME ||
714 snprintf(inst->alg.base.cra_driver_name,
715 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
716 rsa_alg->base.cra_driver_name, hash_name) >=
717 CRYPTO_MAX_ALG_NAME)
718 goto out_free_hash;
719 }
720 659
721 inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC; 660 inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
722 inst->alg.base.cra_priority = rsa_alg->base.cra_priority; 661 inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
@@ -738,12 +677,10 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
738 677
739 err = akcipher_register_instance(tmpl, inst); 678 err = akcipher_register_instance(tmpl, inst);
740 if (err) 679 if (err)
741 goto out_free_hash; 680 goto out_drop_alg;
742 681
743 return 0; 682 return 0;
744 683
745out_free_hash:
746 kfree(ctx->hash_name);
747out_drop_alg: 684out_drop_alg:
748 crypto_drop_akcipher(spawn); 685 crypto_drop_akcipher(spawn);
749out_free_inst: 686out_free_inst:
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 77d737f52147..4c280b6a3ea9 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -10,16 +10,23 @@
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/mpi.h>
13#include <crypto/internal/rsa.h> 14#include <crypto/internal/rsa.h>
14#include <crypto/internal/akcipher.h> 15#include <crypto/internal/akcipher.h>
15#include <crypto/akcipher.h> 16#include <crypto/akcipher.h>
16#include <crypto/algapi.h> 17#include <crypto/algapi.h>
17 18
19struct rsa_mpi_key {
20 MPI n;
21 MPI e;
22 MPI d;
23};
24
18/* 25/*
19 * RSAEP function [RFC3447 sec 5.1.1] 26 * RSAEP function [RFC3447 sec 5.1.1]
20 * c = m^e mod n; 27 * c = m^e mod n;
21 */ 28 */
22static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m) 29static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m)
23{ 30{
24 /* (1) Validate 0 <= m < n */ 31 /* (1) Validate 0 <= m < n */
25 if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) 32 if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
@@ -33,7 +40,7 @@ static int _rsa_enc(const struct rsa_key *key, MPI c, MPI m)
33 * RSADP function [RFC3447 sec 5.1.2] 40 * RSADP function [RFC3447 sec 5.1.2]
34 * m = c^d mod n; 41 * m = c^d mod n;
35 */ 42 */
36static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c) 43static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c)
37{ 44{
38 /* (1) Validate 0 <= c < n */ 45 /* (1) Validate 0 <= c < n */
39 if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0) 46 if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0)
@@ -47,7 +54,7 @@ static int _rsa_dec(const struct rsa_key *key, MPI m, MPI c)
47 * RSASP1 function [RFC3447 sec 5.2.1] 54 * RSASP1 function [RFC3447 sec 5.2.1]
48 * s = m^d mod n 55 * s = m^d mod n
49 */ 56 */
50static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m) 57static int _rsa_sign(const struct rsa_mpi_key *key, MPI s, MPI m)
51{ 58{
52 /* (1) Validate 0 <= m < n */ 59 /* (1) Validate 0 <= m < n */
53 if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0) 60 if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
@@ -61,7 +68,7 @@ static int _rsa_sign(const struct rsa_key *key, MPI s, MPI m)
61 * RSAVP1 function [RFC3447 sec 5.2.2] 68 * RSAVP1 function [RFC3447 sec 5.2.2]
62 * m = s^e mod n; 69 * m = s^e mod n;
63 */ 70 */
64static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s) 71static int _rsa_verify(const struct rsa_mpi_key *key, MPI m, MPI s)
65{ 72{
66 /* (1) Validate 0 <= s < n */ 73 /* (1) Validate 0 <= s < n */
67 if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0) 74 if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
@@ -71,7 +78,7 @@ static int _rsa_verify(const struct rsa_key *key, MPI m, MPI s)
71 return mpi_powm(m, s, key->e, key->n); 78 return mpi_powm(m, s, key->e, key->n);
72} 79}
73 80
74static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm) 81static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
75{ 82{
76 return akcipher_tfm_ctx(tfm); 83 return akcipher_tfm_ctx(tfm);
77} 84}
@@ -79,7 +86,7 @@ static inline struct rsa_key *rsa_get_key(struct crypto_akcipher *tfm)
79static int rsa_enc(struct akcipher_request *req) 86static int rsa_enc(struct akcipher_request *req)
80{ 87{
81 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 88 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
82 const struct rsa_key *pkey = rsa_get_key(tfm); 89 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
83 MPI m, c = mpi_alloc(0); 90 MPI m, c = mpi_alloc(0);
84 int ret = 0; 91 int ret = 0;
85 int sign; 92 int sign;
@@ -101,7 +108,7 @@ static int rsa_enc(struct akcipher_request *req)
101 if (ret) 108 if (ret)
102 goto err_free_m; 109 goto err_free_m;
103 110
104 ret = mpi_write_to_sgl(c, req->dst, &req->dst_len, &sign); 111 ret = mpi_write_to_sgl(c, req->dst, req->dst_len, &sign);
105 if (ret) 112 if (ret)
106 goto err_free_m; 113 goto err_free_m;
107 114
@@ -118,7 +125,7 @@ err_free_c:
118static int rsa_dec(struct akcipher_request *req) 125static int rsa_dec(struct akcipher_request *req)
119{ 126{
120 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 127 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
121 const struct rsa_key *pkey = rsa_get_key(tfm); 128 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
122 MPI c, m = mpi_alloc(0); 129 MPI c, m = mpi_alloc(0);
123 int ret = 0; 130 int ret = 0;
124 int sign; 131 int sign;
@@ -140,7 +147,7 @@ static int rsa_dec(struct akcipher_request *req)
140 if (ret) 147 if (ret)
141 goto err_free_c; 148 goto err_free_c;
142 149
143 ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign); 150 ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
144 if (ret) 151 if (ret)
145 goto err_free_c; 152 goto err_free_c;
146 153
@@ -156,7 +163,7 @@ err_free_m:
156static int rsa_sign(struct akcipher_request *req) 163static int rsa_sign(struct akcipher_request *req)
157{ 164{
158 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 165 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
159 const struct rsa_key *pkey = rsa_get_key(tfm); 166 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
160 MPI m, s = mpi_alloc(0); 167 MPI m, s = mpi_alloc(0);
161 int ret = 0; 168 int ret = 0;
162 int sign; 169 int sign;
@@ -178,7 +185,7 @@ static int rsa_sign(struct akcipher_request *req)
178 if (ret) 185 if (ret)
179 goto err_free_m; 186 goto err_free_m;
180 187
181 ret = mpi_write_to_sgl(s, req->dst, &req->dst_len, &sign); 188 ret = mpi_write_to_sgl(s, req->dst, req->dst_len, &sign);
182 if (ret) 189 if (ret)
183 goto err_free_m; 190 goto err_free_m;
184 191
@@ -195,7 +202,7 @@ err_free_s:
195static int rsa_verify(struct akcipher_request *req) 202static int rsa_verify(struct akcipher_request *req)
196{ 203{
197 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 204 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
198 const struct rsa_key *pkey = rsa_get_key(tfm); 205 const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
199 MPI s, m = mpi_alloc(0); 206 MPI s, m = mpi_alloc(0);
200 int ret = 0; 207 int ret = 0;
201 int sign; 208 int sign;
@@ -219,7 +226,7 @@ static int rsa_verify(struct akcipher_request *req)
219 if (ret) 226 if (ret)
220 goto err_free_s; 227 goto err_free_s;
221 228
222 ret = mpi_write_to_sgl(m, req->dst, &req->dst_len, &sign); 229 ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
223 if (ret) 230 if (ret)
224 goto err_free_s; 231 goto err_free_s;
225 232
@@ -233,6 +240,16 @@ err_free_m:
233 return ret; 240 return ret;
234} 241}
235 242
243static void rsa_free_mpi_key(struct rsa_mpi_key *key)
244{
245 mpi_free(key->d);
246 mpi_free(key->e);
247 mpi_free(key->n);
248 key->d = NULL;
249 key->e = NULL;
250 key->n = NULL;
251}
252
236static int rsa_check_key_length(unsigned int len) 253static int rsa_check_key_length(unsigned int len)
237{ 254{
238 switch (len) { 255 switch (len) {
@@ -251,49 +268,87 @@ static int rsa_check_key_length(unsigned int len)
251static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, 268static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
252 unsigned int keylen) 269 unsigned int keylen)
253{ 270{
254 struct rsa_key *pkey = akcipher_tfm_ctx(tfm); 271 struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
272 struct rsa_key raw_key = {0};
255 int ret; 273 int ret;
256 274
257 ret = rsa_parse_pub_key(pkey, key, keylen); 275 /* Free the old MPI key if any */
276 rsa_free_mpi_key(mpi_key);
277
278 ret = rsa_parse_pub_key(&raw_key, key, keylen);
258 if (ret) 279 if (ret)
259 return ret; 280 return ret;
260 281
261 if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) { 282 mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
262 rsa_free_key(pkey); 283 if (!mpi_key->e)
263 ret = -EINVAL; 284 goto err;
285
286 mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
287 if (!mpi_key->n)
288 goto err;
289
290 if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
291 rsa_free_mpi_key(mpi_key);
292 return -EINVAL;
264 } 293 }
265 return ret; 294
295 return 0;
296
297err:
298 rsa_free_mpi_key(mpi_key);
299 return -ENOMEM;
266} 300}
267 301
268static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, 302static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
269 unsigned int keylen) 303 unsigned int keylen)
270{ 304{
271 struct rsa_key *pkey = akcipher_tfm_ctx(tfm); 305 struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
306 struct rsa_key raw_key = {0};
272 int ret; 307 int ret;
273 308
274 ret = rsa_parse_priv_key(pkey, key, keylen); 309 /* Free the old MPI key if any */
310 rsa_free_mpi_key(mpi_key);
311
312 ret = rsa_parse_priv_key(&raw_key, key, keylen);
275 if (ret) 313 if (ret)
276 return ret; 314 return ret;
277 315
278 if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) { 316 mpi_key->d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
279 rsa_free_key(pkey); 317 if (!mpi_key->d)
280 ret = -EINVAL; 318 goto err;
319
320 mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
321 if (!mpi_key->e)
322 goto err;
323
324 mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
325 if (!mpi_key->n)
326 goto err;
327
328 if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
329 rsa_free_mpi_key(mpi_key);
330 return -EINVAL;
281 } 331 }
282 return ret; 332
333 return 0;
334
335err:
336 rsa_free_mpi_key(mpi_key);
337 return -ENOMEM;
283} 338}
284 339
285static int rsa_max_size(struct crypto_akcipher *tfm) 340static int rsa_max_size(struct crypto_akcipher *tfm)
286{ 341{
287 struct rsa_key *pkey = akcipher_tfm_ctx(tfm); 342 struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
288 343
289 return pkey->n ? mpi_get_size(pkey->n) : -EINVAL; 344 return pkey->n ? mpi_get_size(pkey->n) : -EINVAL;
290} 345}
291 346
292static void rsa_exit_tfm(struct crypto_akcipher *tfm) 347static void rsa_exit_tfm(struct crypto_akcipher *tfm)
293{ 348{
294 struct rsa_key *pkey = akcipher_tfm_ctx(tfm); 349 struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
295 350
296 rsa_free_key(pkey); 351 rsa_free_mpi_key(pkey);
297} 352}
298 353
299static struct akcipher_alg rsa = { 354static struct akcipher_alg rsa = {
@@ -310,7 +365,7 @@ static struct akcipher_alg rsa = {
310 .cra_driver_name = "rsa-generic", 365 .cra_driver_name = "rsa-generic",
311 .cra_priority = 100, 366 .cra_priority = 100,
312 .cra_module = THIS_MODULE, 367 .cra_module = THIS_MODULE,
313 .cra_ctxsize = sizeof(struct rsa_key), 368 .cra_ctxsize = sizeof(struct rsa_mpi_key),
314 }, 369 },
315}; 370};
316 371
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index d226f48d0907..4df6451e7543 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -22,20 +22,29 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
22 const void *value, size_t vlen) 22 const void *value, size_t vlen)
23{ 23{
24 struct rsa_key *key = context; 24 struct rsa_key *key = context;
25 const u8 *ptr = value;
26 size_t n_sz = vlen;
25 27
26 key->n = mpi_read_raw_data(value, vlen); 28 /* invalid key provided */
27 29 if (!value || !vlen)
28 if (!key->n)
29 return -ENOMEM;
30
31 /* In FIPS mode only allow key size 2K & 3K */
32 if (fips_enabled && (mpi_get_size(key->n) != 256 &&
33 mpi_get_size(key->n) != 384)) {
34 pr_err("RSA: key size not allowed in FIPS mode\n");
35 mpi_free(key->n);
36 key->n = NULL;
37 return -EINVAL; 30 return -EINVAL;
31
32 if (fips_enabled) {
33 while (!*ptr && n_sz) {
34 ptr++;
35 n_sz--;
36 }
37
38 /* In FIPS mode only allow key size 2K & 3K */
39 if (n_sz != 256 && n_sz != 384) {
40 pr_err("RSA: key size not allowed in FIPS mode\n");
41 return -EINVAL;
42 }
38 } 43 }
44
45 key->n = value;
46 key->n_sz = vlen;
47
39 return 0; 48 return 0;
40} 49}
41 50
@@ -44,10 +53,12 @@ int rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
44{ 53{
45 struct rsa_key *key = context; 54 struct rsa_key *key = context;
46 55
47 key->e = mpi_read_raw_data(value, vlen); 56 /* invalid key provided */
57 if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
58 return -EINVAL;
48 59
49 if (!key->e) 60 key->e = value;
50 return -ENOMEM; 61 key->e_sz = vlen;
51 62
52 return 0; 63 return 0;
53} 64}
@@ -57,46 +68,95 @@ int rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
57{ 68{
58 struct rsa_key *key = context; 69 struct rsa_key *key = context;
59 70
60 key->d = mpi_read_raw_data(value, vlen); 71 /* invalid key provided */
72 if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
73 return -EINVAL;
61 74
62 if (!key->d) 75 key->d = value;
63 return -ENOMEM; 76 key->d_sz = vlen;
64 77
65 /* In FIPS mode only allow key size 2K & 3K */ 78 return 0;
66 if (fips_enabled && (mpi_get_size(key->d) != 256 && 79}
67 mpi_get_size(key->d) != 384)) { 80
68 pr_err("RSA: key size not allowed in FIPS mode\n"); 81int rsa_get_p(void *context, size_t hdrlen, unsigned char tag,
69 mpi_free(key->d); 82 const void *value, size_t vlen)
70 key->d = NULL; 83{
84 struct rsa_key *key = context;
85
86 /* invalid key provided */
87 if (!value || !vlen || vlen > key->n_sz)
71 return -EINVAL; 88 return -EINVAL;
72 } 89
90 key->p = value;
91 key->p_sz = vlen;
92
73 return 0; 93 return 0;
74} 94}
75 95
76static void free_mpis(struct rsa_key *key) 96int rsa_get_q(void *context, size_t hdrlen, unsigned char tag,
97 const void *value, size_t vlen)
77{ 98{
78 mpi_free(key->n); 99 struct rsa_key *key = context;
79 mpi_free(key->e); 100
80 mpi_free(key->d); 101 /* invalid key provided */
81 key->n = NULL; 102 if (!value || !vlen || vlen > key->n_sz)
82 key->e = NULL; 103 return -EINVAL;
83 key->d = NULL; 104
105 key->q = value;
106 key->q_sz = vlen;
107
108 return 0;
84} 109}
85 110
86/** 111int rsa_get_dp(void *context, size_t hdrlen, unsigned char tag,
87 * rsa_free_key() - frees rsa key allocated by rsa_parse_key() 112 const void *value, size_t vlen)
88 * 113{
89 * @rsa_key: struct rsa_key key representation 114 struct rsa_key *key = context;
90 */ 115
91void rsa_free_key(struct rsa_key *key) 116 /* invalid key provided */
117 if (!value || !vlen || vlen > key->n_sz)
118 return -EINVAL;
119
120 key->dp = value;
121 key->dp_sz = vlen;
122
123 return 0;
124}
125
126int rsa_get_dq(void *context, size_t hdrlen, unsigned char tag,
127 const void *value, size_t vlen)
92{ 128{
93 free_mpis(key); 129 struct rsa_key *key = context;
130
131 /* invalid key provided */
132 if (!value || !vlen || vlen > key->n_sz)
133 return -EINVAL;
134
135 key->dq = value;
136 key->dq_sz = vlen;
137
138 return 0;
139}
140
141int rsa_get_qinv(void *context, size_t hdrlen, unsigned char tag,
142 const void *value, size_t vlen)
143{
144 struct rsa_key *key = context;
145
146 /* invalid key provided */
147 if (!value || !vlen || vlen > key->n_sz)
148 return -EINVAL;
149
150 key->qinv = value;
151 key->qinv_sz = vlen;
152
153 return 0;
94} 154}
95EXPORT_SYMBOL_GPL(rsa_free_key);
96 155
97/** 156/**
98 * rsa_parse_pub_key() - extracts an rsa public key from BER encoded buffer 157 * rsa_parse_pub_key() - decodes the BER encoded buffer and stores in the
99 * and stores it in the provided struct rsa_key 158 * provided struct rsa_key, pointers to the raw key as is,
159 * so that the caller can copy it or MPI parse it, etc.
100 * 160 *
101 * @rsa_key: struct rsa_key key representation 161 * @rsa_key: struct rsa_key key representation
102 * @key: key in BER format 162 * @key: key in BER format
@@ -107,23 +167,15 @@ EXPORT_SYMBOL_GPL(rsa_free_key);
107int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, 167int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
108 unsigned int key_len) 168 unsigned int key_len)
109{ 169{
110 int ret; 170 return asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len);
111
112 free_mpis(rsa_key);
113 ret = asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len);
114 if (ret < 0)
115 goto error;
116
117 return 0;
118error:
119 free_mpis(rsa_key);
120 return ret;
121} 171}
122EXPORT_SYMBOL_GPL(rsa_parse_pub_key); 172EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
123 173
124/** 174/**
125 * rsa_parse_pub_key() - extracts an rsa private key from BER encoded buffer 175 * rsa_parse_priv_key() - decodes the BER encoded buffer and stores in the
126 * and stores it in the provided struct rsa_key 176 * provided struct rsa_key, pointers to the raw key
177 * as is, so that the caller can copy it or MPI parse it,
178 * etc.
127 * 179 *
128 * @rsa_key: struct rsa_key key representation 180 * @rsa_key: struct rsa_key key representation
129 * @key: key in BER format 181 * @key: key in BER format
@@ -134,16 +186,6 @@ EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
134int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, 186int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
135 unsigned int key_len) 187 unsigned int key_len)
136{ 188{
137 int ret; 189 return asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len);
138
139 free_mpis(rsa_key);
140 ret = asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len);
141 if (ret < 0)
142 goto error;
143
144 return 0;
145error:
146 free_mpis(rsa_key);
147 return ret;
148} 190}
149EXPORT_SYMBOL_GPL(rsa_parse_priv_key); 191EXPORT_SYMBOL_GPL(rsa_parse_priv_key);
diff --git a/crypto/rsaprivkey.asn1 b/crypto/rsaprivkey.asn1
index 731aea5edb0c..4ce06758e8af 100644
--- a/crypto/rsaprivkey.asn1
+++ b/crypto/rsaprivkey.asn1
@@ -3,9 +3,9 @@ RsaPrivKey ::= SEQUENCE {
3 n INTEGER ({ rsa_get_n }), 3 n INTEGER ({ rsa_get_n }),
4 e INTEGER ({ rsa_get_e }), 4 e INTEGER ({ rsa_get_e }),
5 d INTEGER ({ rsa_get_d }), 5 d INTEGER ({ rsa_get_d }),
6 prime1 INTEGER, 6 prime1 INTEGER ({ rsa_get_p }),
7 prime2 INTEGER, 7 prime2 INTEGER ({ rsa_get_q }),
8 exponent1 INTEGER, 8 exponent1 INTEGER ({ rsa_get_dp }),
9 exponent2 INTEGER, 9 exponent2 INTEGER ({ rsa_get_dq }),
10 coefficient INTEGER 10 coefficient INTEGER ({ rsa_get_qinv })
11} 11}
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index ea5815c5e128..52ce17a3dd63 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -18,8 +18,6 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
24 22
25static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) 23static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
@@ -30,53 +28,6 @@ static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
30 memcpy(dst, src, nbytes); 28 memcpy(dst, src, nbytes);
31} 29}
32 30
33void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
34{
35 walk->sg = sg;
36
37 BUG_ON(!sg->length);
38
39 walk->offset = sg->offset;
40}
41EXPORT_SYMBOL_GPL(scatterwalk_start);
42
43void *scatterwalk_map(struct scatter_walk *walk)
44{
45 return kmap_atomic(scatterwalk_page(walk)) +
46 offset_in_page(walk->offset);
47}
48EXPORT_SYMBOL_GPL(scatterwalk_map);
49
50static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
51 unsigned int more)
52{
53 if (out) {
54 struct page *page;
55
56 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
57 /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
58 * PageSlab cannot be optimised away per se due to
59 * use of volatile pointer.
60 */
61 if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
62 flush_dcache_page(page);
63 }
64
65 if (more) {
66 walk->offset += PAGE_SIZE - 1;
67 walk->offset &= PAGE_MASK;
68 if (walk->offset >= walk->sg->offset + walk->sg->length)
69 scatterwalk_start(walk, sg_next(walk->sg));
70 }
71}
72
73void scatterwalk_done(struct scatter_walk *walk, int out, int more)
74{
75 if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
76 scatterwalk_pagedone(walk, out, more);
77}
78EXPORT_SYMBOL_GPL(scatterwalk_done);
79
80void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 31void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
81 size_t nbytes, int out) 32 size_t nbytes, int out)
82{ 33{
@@ -87,9 +38,11 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
87 if (len_this_page > nbytes) 38 if (len_this_page > nbytes)
88 len_this_page = nbytes; 39 len_this_page = nbytes;
89 40
90 vaddr = scatterwalk_map(walk); 41 if (out != 2) {
91 memcpy_dir(buf, vaddr, len_this_page, out); 42 vaddr = scatterwalk_map(walk);
92 scatterwalk_unmap(vaddr); 43 memcpy_dir(buf, vaddr, len_this_page, out);
44 scatterwalk_unmap(vaddr);
45 }
93 46
94 scatterwalk_advance(walk, len_this_page); 47 scatterwalk_advance(walk, len_this_page);
95 48
@@ -99,7 +52,7 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
99 buf += len_this_page; 52 buf += len_this_page;
100 nbytes -= len_this_page; 53 nbytes -= len_this_page;
101 54
102 scatterwalk_pagedone(walk, out, 1); 55 scatterwalk_pagedone(walk, out & 1, 1);
103 } 56 }
104} 57}
105EXPORT_SYMBOL_GPL(scatterwalk_copychunks); 58EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
@@ -125,28 +78,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
125} 78}
126EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); 79EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
127 80
128int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes)
129{
130 int offset = 0, n = 0;
131
132 /* num_bytes is too small */
133 if (num_bytes < sg->length)
134 return -1;
135
136 do {
137 offset += sg->length;
138 n++;
139 sg = sg_next(sg);
140
141 /* num_bytes is too large */
142 if (unlikely(!sg && (num_bytes < offset)))
143 return -1;
144 } while (sg && (num_bytes > offset));
145
146 return n;
147}
148EXPORT_SYMBOL_GPL(scatterwalk_bytes_sglen);
149
150struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], 81struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
151 struct scatterlist *src, 82 struct scatterlist *src,
152 unsigned int len) 83 unsigned int len)
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 15a749a5cab7..c7049231861f 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -14,50 +14,17 @@
14 */ 14 */
15 15
16#include <crypto/internal/geniv.h> 16#include <crypto/internal/geniv.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/rng.h>
19#include <crypto/scatterwalk.h> 17#include <crypto/scatterwalk.h>
18#include <crypto/skcipher.h>
20#include <linux/err.h> 19#include <linux/err.h>
21#include <linux/init.h> 20#include <linux/init.h>
22#include <linux/kernel.h> 21#include <linux/kernel.h>
23#include <linux/module.h> 22#include <linux/module.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/string.h> 24#include <linux/string.h>
27 25
28struct seqiv_ctx {
29 spinlock_t lock;
30 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
31};
32
33static void seqiv_free(struct crypto_instance *inst); 26static void seqiv_free(struct crypto_instance *inst);
34 27
35static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
36{
37 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
38 struct crypto_ablkcipher *geniv;
39
40 if (err == -EINPROGRESS)
41 return;
42
43 if (err)
44 goto out;
45
46 geniv = skcipher_givcrypt_reqtfm(req);
47 memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
48
49out:
50 kfree(subreq->info);
51}
52
53static void seqiv_complete(struct crypto_async_request *base, int err)
54{
55 struct skcipher_givcrypt_request *req = base->data;
56
57 seqiv_complete2(req, err);
58 skcipher_givcrypt_complete(req, err);
59}
60
61static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) 28static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
62{ 29{
63 struct aead_request *subreq = aead_request_ctx(req); 30 struct aead_request *subreq = aead_request_ctx(req);
@@ -85,65 +52,6 @@ static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
85 aead_request_complete(req, err); 52 aead_request_complete(req, err);
86} 53}
87 54
88static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
89 unsigned int ivsize)
90{
91 unsigned int len = ivsize;
92
93 if (ivsize > sizeof(u64)) {
94 memset(info, 0, ivsize - sizeof(u64));
95 len = sizeof(u64);
96 }
97 seq = cpu_to_be64(seq);
98 memcpy(info + ivsize - len, &seq, len);
99 crypto_xor(info, ctx->salt, ivsize);
100}
101
102static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
103{
104 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
105 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
106 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
107 crypto_completion_t compl;
108 void *data;
109 u8 *info;
110 unsigned int ivsize;
111 int err;
112
113 ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
114
115 compl = req->creq.base.complete;
116 data = req->creq.base.data;
117 info = req->creq.info;
118
119 ivsize = crypto_ablkcipher_ivsize(geniv);
120
121 if (unlikely(!IS_ALIGNED((unsigned long)info,
122 crypto_ablkcipher_alignmask(geniv) + 1))) {
123 info = kmalloc(ivsize, req->creq.base.flags &
124 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
125 GFP_ATOMIC);
126 if (!info)
127 return -ENOMEM;
128
129 compl = seqiv_complete;
130 data = req;
131 }
132
133 ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
134 data);
135 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
136 req->creq.nbytes, info);
137
138 seqiv_geniv(ctx, info, req->seq, ivsize);
139 memcpy(req->giv, info, ivsize);
140
141 err = crypto_ablkcipher_encrypt(subreq);
142 if (unlikely(info != req->creq.info))
143 seqiv_complete2(req, err);
144 return err;
145}
146
147static int seqiv_aead_encrypt(struct aead_request *req) 55static int seqiv_aead_encrypt(struct aead_request *req)
148{ 56{
149 struct crypto_aead *geniv = crypto_aead_reqtfm(req); 57 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
@@ -165,12 +73,16 @@ static int seqiv_aead_encrypt(struct aead_request *req)
165 info = req->iv; 73 info = req->iv;
166 74
167 if (req->src != req->dst) { 75 if (req->src != req->dst) {
168 struct blkcipher_desc desc = { 76 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
169 .tfm = ctx->null, 77
170 }; 78 skcipher_request_set_tfm(nreq, ctx->sknull);
79 skcipher_request_set_callback(nreq, req->base.flags,
80 NULL, NULL);
81 skcipher_request_set_crypt(nreq, req->src, req->dst,
82 req->assoclen + req->cryptlen,
83 NULL);
171 84
172 err = crypto_blkcipher_encrypt(&desc, req->dst, req->src, 85 err = crypto_skcipher_encrypt(nreq);
173 req->assoclen + req->cryptlen);
174 if (err) 86 if (err)
175 return err; 87 return err;
176 } 88 }
@@ -229,62 +141,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
229 return crypto_aead_decrypt(subreq); 141 return crypto_aead_decrypt(subreq);
230} 142}
231 143
232static int seqiv_init(struct crypto_tfm *tfm)
233{
234 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
235 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
236 int err;
237
238 spin_lock_init(&ctx->lock);
239
240 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
241
242 err = 0;
243 if (!crypto_get_default_rng()) {
244 crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
245 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
246 crypto_ablkcipher_ivsize(geniv));
247 crypto_put_default_rng();
248 }
249
250 return err ?: skcipher_geniv_init(tfm);
251}
252
253static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
254 struct rtattr **tb)
255{
256 struct crypto_instance *inst;
257 int err;
258
259 inst = skcipher_geniv_alloc(tmpl, tb, 0, 0);
260
261 if (IS_ERR(inst))
262 return PTR_ERR(inst);
263
264 err = -EINVAL;
265 if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64))
266 goto free_inst;
267
268 inst->alg.cra_init = seqiv_init;
269 inst->alg.cra_exit = skcipher_geniv_exit;
270
271 inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
272 inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
273
274 inst->alg.cra_alignmask |= __alignof__(u32) - 1;
275
276 err = crypto_register_instance(tmpl, inst);
277 if (err)
278 goto free_inst;
279
280out:
281 return err;
282
283free_inst:
284 skcipher_geniv_free(inst);
285 goto out;
286}
287
288static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) 144static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
289{ 145{
290 struct aead_instance *inst; 146 struct aead_instance *inst;
@@ -330,26 +186,20 @@ free_inst:
330static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) 186static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb)
331{ 187{
332 struct crypto_attr_type *algt; 188 struct crypto_attr_type *algt;
333 int err;
334 189
335 algt = crypto_get_attr_type(tb); 190 algt = crypto_get_attr_type(tb);
336 if (IS_ERR(algt)) 191 if (IS_ERR(algt))
337 return PTR_ERR(algt); 192 return PTR_ERR(algt);
338 193
339 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) 194 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
340 err = seqiv_ablkcipher_create(tmpl, tb); 195 return -EINVAL;
341 else
342 err = seqiv_aead_create(tmpl, tb);
343 196
344 return err; 197 return seqiv_aead_create(tmpl, tb);
345} 198}
346 199
347static void seqiv_free(struct crypto_instance *inst) 200static void seqiv_free(struct crypto_instance *inst)
348{ 201{
349 if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) 202 aead_geniv_free(aead_instance(inst));
350 skcipher_geniv_free(inst);
351 else
352 aead_geniv_free(aead_instance(inst));
353} 203}
354 204
355static struct crypto_template seqiv_tmpl = { 205static struct crypto_template seqiv_tmpl = {
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
new file mode 100644
index 000000000000..62264397a2d2
--- /dev/null
+++ b/crypto/sha3_generic.c
@@ -0,0 +1,300 @@
1/*
2 * Cryptographic API.
3 *
4 * SHA-3, as specified in
5 * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
6 *
7 * SHA-3 code by Jeff Garzik <jeff@garzik.org>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)•
12 * any later version.
13 *
14 */
15#include <crypto/internal/hash.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <crypto/sha3.h>
20#include <asm/byteorder.h>
21
22#define KECCAK_ROUNDS 24
23
24#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
25
26static const u64 keccakf_rndc[24] = {
27 0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
28 0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
29 0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
30 0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
31 0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
32 0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
33 0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
34 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
35};
36
37static const int keccakf_rotc[24] = {
38 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
39 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
40};
41
42static const int keccakf_piln[24] = {
43 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
44 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
45};
46
47/* update the state with given number of rounds */
48
49static void keccakf(u64 st[25])
50{
51 int i, j, round;
52 u64 t, bc[5];
53
54 for (round = 0; round < KECCAK_ROUNDS; round++) {
55
56 /* Theta */
57 for (i = 0; i < 5; i++)
58 bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15]
59 ^ st[i + 20];
60
61 for (i = 0; i < 5; i++) {
62 t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
63 for (j = 0; j < 25; j += 5)
64 st[j + i] ^= t;
65 }
66
67 /* Rho Pi */
68 t = st[1];
69 for (i = 0; i < 24; i++) {
70 j = keccakf_piln[i];
71 bc[0] = st[j];
72 st[j] = ROTL64(t, keccakf_rotc[i]);
73 t = bc[0];
74 }
75
76 /* Chi */
77 for (j = 0; j < 25; j += 5) {
78 for (i = 0; i < 5; i++)
79 bc[i] = st[j + i];
80 for (i = 0; i < 5; i++)
81 st[j + i] ^= (~bc[(i + 1) % 5]) &
82 bc[(i + 2) % 5];
83 }
84
85 /* Iota */
86 st[0] ^= keccakf_rndc[round];
87 }
88}
89
90static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz)
91{
92 memset(sctx, 0, sizeof(*sctx));
93 sctx->md_len = digest_sz;
94 sctx->rsiz = 200 - 2 * digest_sz;
95 sctx->rsizw = sctx->rsiz / 8;
96}
97
98static int sha3_224_init(struct shash_desc *desc)
99{
100 struct sha3_state *sctx = shash_desc_ctx(desc);
101
102 sha3_init(sctx, SHA3_224_DIGEST_SIZE);
103 return 0;
104}
105
106static int sha3_256_init(struct shash_desc *desc)
107{
108 struct sha3_state *sctx = shash_desc_ctx(desc);
109
110 sha3_init(sctx, SHA3_256_DIGEST_SIZE);
111 return 0;
112}
113
114static int sha3_384_init(struct shash_desc *desc)
115{
116 struct sha3_state *sctx = shash_desc_ctx(desc);
117
118 sha3_init(sctx, SHA3_384_DIGEST_SIZE);
119 return 0;
120}
121
122static int sha3_512_init(struct shash_desc *desc)
123{
124 struct sha3_state *sctx = shash_desc_ctx(desc);
125
126 sha3_init(sctx, SHA3_512_DIGEST_SIZE);
127 return 0;
128}
129
130static int sha3_update(struct shash_desc *desc, const u8 *data,
131 unsigned int len)
132{
133 struct sha3_state *sctx = shash_desc_ctx(desc);
134 unsigned int done;
135 const u8 *src;
136
137 done = 0;
138 src = data;
139
140 if ((sctx->partial + len) > (sctx->rsiz - 1)) {
141 if (sctx->partial) {
142 done = -sctx->partial;
143 memcpy(sctx->buf + sctx->partial, data,
144 done + sctx->rsiz);
145 src = sctx->buf;
146 }
147
148 do {
149 unsigned int i;
150
151 for (i = 0; i < sctx->rsizw; i++)
152 sctx->st[i] ^= ((u64 *) src)[i];
153 keccakf(sctx->st);
154
155 done += sctx->rsiz;
156 src = data + done;
157 } while (done + (sctx->rsiz - 1) < len);
158
159 sctx->partial = 0;
160 }
161 memcpy(sctx->buf + sctx->partial, src, len - done);
162 sctx->partial += (len - done);
163
164 return 0;
165}
166
167static int sha3_final(struct shash_desc *desc, u8 *out)
168{
169 struct sha3_state *sctx = shash_desc_ctx(desc);
170 unsigned int i, inlen = sctx->partial;
171
172 sctx->buf[inlen++] = 0x06;
173 memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
174 sctx->buf[sctx->rsiz - 1] |= 0x80;
175
176 for (i = 0; i < sctx->rsizw; i++)
177 sctx->st[i] ^= ((u64 *) sctx->buf)[i];
178
179 keccakf(sctx->st);
180
181 for (i = 0; i < sctx->rsizw; i++)
182 sctx->st[i] = cpu_to_le64(sctx->st[i]);
183
184 memcpy(out, sctx->st, sctx->md_len);
185
186 memset(sctx, 0, sizeof(*sctx));
187 return 0;
188}
189
190static struct shash_alg sha3_224 = {
191 .digestsize = SHA3_224_DIGEST_SIZE,
192 .init = sha3_224_init,
193 .update = sha3_update,
194 .final = sha3_final,
195 .descsize = sizeof(struct sha3_state),
196 .base = {
197 .cra_name = "sha3-224",
198 .cra_driver_name = "sha3-224-generic",
199 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
200 .cra_blocksize = SHA3_224_BLOCK_SIZE,
201 .cra_module = THIS_MODULE,
202 }
203};
204
205static struct shash_alg sha3_256 = {
206 .digestsize = SHA3_256_DIGEST_SIZE,
207 .init = sha3_256_init,
208 .update = sha3_update,
209 .final = sha3_final,
210 .descsize = sizeof(struct sha3_state),
211 .base = {
212 .cra_name = "sha3-256",
213 .cra_driver_name = "sha3-256-generic",
214 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
215 .cra_blocksize = SHA3_256_BLOCK_SIZE,
216 .cra_module = THIS_MODULE,
217 }
218};
219
220static struct shash_alg sha3_384 = {
221 .digestsize = SHA3_384_DIGEST_SIZE,
222 .init = sha3_384_init,
223 .update = sha3_update,
224 .final = sha3_final,
225 .descsize = sizeof(struct sha3_state),
226 .base = {
227 .cra_name = "sha3-384",
228 .cra_driver_name = "sha3-384-generic",
229 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
230 .cra_blocksize = SHA3_384_BLOCK_SIZE,
231 .cra_module = THIS_MODULE,
232 }
233};
234
235static struct shash_alg sha3_512 = {
236 .digestsize = SHA3_512_DIGEST_SIZE,
237 .init = sha3_512_init,
238 .update = sha3_update,
239 .final = sha3_final,
240 .descsize = sizeof(struct sha3_state),
241 .base = {
242 .cra_name = "sha3-512",
243 .cra_driver_name = "sha3-512-generic",
244 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
245 .cra_blocksize = SHA3_512_BLOCK_SIZE,
246 .cra_module = THIS_MODULE,
247 }
248};
249
250static int __init sha3_generic_mod_init(void)
251{
252 int ret;
253
254 ret = crypto_register_shash(&sha3_224);
255 if (ret < 0)
256 goto err_out;
257 ret = crypto_register_shash(&sha3_256);
258 if (ret < 0)
259 goto err_out_224;
260 ret = crypto_register_shash(&sha3_384);
261 if (ret < 0)
262 goto err_out_256;
263 ret = crypto_register_shash(&sha3_512);
264 if (ret < 0)
265 goto err_out_384;
266
267 return 0;
268
269err_out_384:
270 crypto_unregister_shash(&sha3_384);
271err_out_256:
272 crypto_unregister_shash(&sha3_256);
273err_out_224:
274 crypto_unregister_shash(&sha3_224);
275err_out:
276 return ret;
277}
278
279static void __exit sha3_generic_mod_fini(void)
280{
281 crypto_unregister_shash(&sha3_224);
282 crypto_unregister_shash(&sha3_256);
283 crypto_unregister_shash(&sha3_384);
284 crypto_unregister_shash(&sha3_512);
285}
286
287module_init(sha3_generic_mod_init);
288module_exit(sha3_generic_mod_fini);
289
290MODULE_LICENSE("GPL");
291MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm");
292
293MODULE_ALIAS_CRYPTO("sha3-224");
294MODULE_ALIAS_CRYPTO("sha3-224-generic");
295MODULE_ALIAS_CRYPTO("sha3-256");
296MODULE_ALIAS_CRYPTO("sha3-256-generic");
297MODULE_ALIAS_CRYPTO("sha3-384");
298MODULE_ALIAS_CRYPTO("sha3-384-generic");
299MODULE_ALIAS_CRYPTO("sha3-512");
300MODULE_ALIAS_CRYPTO("sha3-512-generic");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 69230e9d4ac9..f7d0018dcaee 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -16,7 +16,11 @@
16 16
17#include <crypto/internal/skcipher.h> 17#include <crypto/internal/skcipher.h>
18#include <linux/bug.h> 18#include <linux/bug.h>
19#include <linux/cryptouser.h>
19#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/rtnetlink.h>
22#include <linux/seq_file.h>
23#include <net/netlink.h>
20 24
21#include "internal.h" 25#include "internal.h"
22 26
@@ -25,10 +29,11 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
25 if (alg->cra_type == &crypto_blkcipher_type) 29 if (alg->cra_type == &crypto_blkcipher_type)
26 return sizeof(struct crypto_blkcipher *); 30 return sizeof(struct crypto_blkcipher *);
27 31
28 BUG_ON(alg->cra_type != &crypto_ablkcipher_type && 32 if (alg->cra_type == &crypto_ablkcipher_type ||
29 alg->cra_type != &crypto_givcipher_type); 33 alg->cra_type == &crypto_givcipher_type)
34 return sizeof(struct crypto_ablkcipher *);
30 35
31 return sizeof(struct crypto_ablkcipher *); 36 return crypto_alg_extsize(alg);
32} 37}
33 38
34static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, 39static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
@@ -216,26 +221,118 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
216 return 0; 221 return 0;
217} 222}
218 223
224static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
225{
226 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
227 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
228
229 alg->exit(skcipher);
230}
231
219static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) 232static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
220{ 233{
234 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
235 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
236
221 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) 237 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
222 return crypto_init_skcipher_ops_blkcipher(tfm); 238 return crypto_init_skcipher_ops_blkcipher(tfm);
223 239
224 BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type && 240 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
225 tfm->__crt_alg->cra_type != &crypto_givcipher_type); 241 tfm->__crt_alg->cra_type == &crypto_givcipher_type)
242 return crypto_init_skcipher_ops_ablkcipher(tfm);
243
244 skcipher->setkey = alg->setkey;
245 skcipher->encrypt = alg->encrypt;
246 skcipher->decrypt = alg->decrypt;
247 skcipher->ivsize = alg->ivsize;
248 skcipher->keysize = alg->max_keysize;
249
250 if (alg->exit)
251 skcipher->base.exit = crypto_skcipher_exit_tfm;
226 252
227 return crypto_init_skcipher_ops_ablkcipher(tfm); 253 if (alg->init)
254 return alg->init(skcipher);
255
256 return 0;
257}
258
259static void crypto_skcipher_free_instance(struct crypto_instance *inst)
260{
261 struct skcipher_instance *skcipher =
262 container_of(inst, struct skcipher_instance, s.base);
263
264 skcipher->free(skcipher);
265}
266
267static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
268 __attribute__ ((unused));
269static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
270{
271 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
272 base);
273
274 seq_printf(m, "type : skcipher\n");
275 seq_printf(m, "async : %s\n",
276 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
277 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
278 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
279 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
280 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
281 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
228} 282}
229 283
284#ifdef CONFIG_NET
285static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
286{
287 struct crypto_report_blkcipher rblkcipher;
288 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
289 base);
290
291 strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
292 strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
293
294 rblkcipher.blocksize = alg->cra_blocksize;
295 rblkcipher.min_keysize = skcipher->min_keysize;
296 rblkcipher.max_keysize = skcipher->max_keysize;
297 rblkcipher.ivsize = skcipher->ivsize;
298
299 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
300 sizeof(struct crypto_report_blkcipher), &rblkcipher))
301 goto nla_put_failure;
302 return 0;
303
304nla_put_failure:
305 return -EMSGSIZE;
306}
307#else
308static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
309{
310 return -ENOSYS;
311}
312#endif
313
230static const struct crypto_type crypto_skcipher_type2 = { 314static const struct crypto_type crypto_skcipher_type2 = {
231 .extsize = crypto_skcipher_extsize, 315 .extsize = crypto_skcipher_extsize,
232 .init_tfm = crypto_skcipher_init_tfm, 316 .init_tfm = crypto_skcipher_init_tfm,
317 .free = crypto_skcipher_free_instance,
318#ifdef CONFIG_PROC_FS
319 .show = crypto_skcipher_show,
320#endif
321 .report = crypto_skcipher_report,
233 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 322 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
234 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, 323 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
235 .type = CRYPTO_ALG_TYPE_BLKCIPHER, 324 .type = CRYPTO_ALG_TYPE_SKCIPHER,
236 .tfmsize = offsetof(struct crypto_skcipher, base), 325 .tfmsize = offsetof(struct crypto_skcipher, base),
237}; 326};
238 327
328int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
329 const char *name, u32 type, u32 mask)
330{
331 spawn->base.frontend = &crypto_skcipher_type2;
332 return crypto_grab_spawn(&spawn->base, name, type, mask);
333}
334EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
335
239struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, 336struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
240 u32 type, u32 mask) 337 u32 type, u32 mask)
241{ 338{
@@ -243,5 +340,90 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
243} 340}
244EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); 341EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
245 342
343int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
344{
345 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
346 type, mask);
347}
348EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
349
350static int skcipher_prepare_alg(struct skcipher_alg *alg)
351{
352 struct crypto_alg *base = &alg->base;
353
354 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
355 return -EINVAL;
356
357 if (!alg->chunksize)
358 alg->chunksize = base->cra_blocksize;
359
360 base->cra_type = &crypto_skcipher_type2;
361 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
362 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
363
364 return 0;
365}
366
367int crypto_register_skcipher(struct skcipher_alg *alg)
368{
369 struct crypto_alg *base = &alg->base;
370 int err;
371
372 err = skcipher_prepare_alg(alg);
373 if (err)
374 return err;
375
376 return crypto_register_alg(base);
377}
378EXPORT_SYMBOL_GPL(crypto_register_skcipher);
379
380void crypto_unregister_skcipher(struct skcipher_alg *alg)
381{
382 crypto_unregister_alg(&alg->base);
383}
384EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
385
386int crypto_register_skciphers(struct skcipher_alg *algs, int count)
387{
388 int i, ret;
389
390 for (i = 0; i < count; i++) {
391 ret = crypto_register_skcipher(&algs[i]);
392 if (ret)
393 goto err;
394 }
395
396 return 0;
397
398err:
399 for (--i; i >= 0; --i)
400 crypto_unregister_skcipher(&algs[i]);
401
402 return ret;
403}
404EXPORT_SYMBOL_GPL(crypto_register_skciphers);
405
406void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
407{
408 int i;
409
410 for (i = count - 1; i >= 0; --i)
411 crypto_unregister_skcipher(&algs[i]);
412}
413EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
414
415int skcipher_register_instance(struct crypto_template *tmpl,
416 struct skcipher_instance *inst)
417{
418 int err;
419
420 err = skcipher_prepare_alg(&inst->alg);
421 if (err)
422 return err;
423
424 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
425}
426EXPORT_SYMBOL_GPL(skcipher_register_instance);
427
246MODULE_LICENSE("GPL"); 428MODULE_LICENSE("GPL");
247MODULE_DESCRIPTION("Symmetric key cipher type"); 429MODULE_DESCRIPTION("Symmetric key cipher type");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 579dce071463..ae22f05d5936 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -24,6 +24,7 @@
24 24
25#include <crypto/aead.h> 25#include <crypto/aead.h>
26#include <crypto/hash.h> 26#include <crypto/hash.h>
27#include <crypto/skcipher.h>
27#include <linux/err.h> 28#include <linux/err.h>
28#include <linux/fips.h> 29#include <linux/fips.h>
29#include <linux/init.h> 30#include <linux/init.h>
@@ -72,7 +73,8 @@ static char *check[] = {
72 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 73 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
73 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", 74 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
74 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", 75 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
75 "lzo", "cts", "zlib", NULL 76 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
77 NULL
76}; 78};
77 79
78struct tcrypt_result { 80struct tcrypt_result {
@@ -91,76 +93,6 @@ static void tcrypt_complete(struct crypto_async_request *req, int err)
91 complete(&res->completion); 93 complete(&res->completion);
92} 94}
93 95
94static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
95 struct scatterlist *sg, int blen, int secs)
96{
97 unsigned long start, end;
98 int bcount;
99 int ret;
100
101 for (start = jiffies, end = start + secs * HZ, bcount = 0;
102 time_before(jiffies, end); bcount++) {
103 if (enc)
104 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
105 else
106 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
107
108 if (ret)
109 return ret;
110 }
111
112 printk("%d operations in %d seconds (%ld bytes)\n",
113 bcount, secs, (long)bcount * blen);
114 return 0;
115}
116
117static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
118 struct scatterlist *sg, int blen)
119{
120 unsigned long cycles = 0;
121 int ret = 0;
122 int i;
123
124 local_irq_disable();
125
126 /* Warm-up run. */
127 for (i = 0; i < 4; i++) {
128 if (enc)
129 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
130 else
131 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
132
133 if (ret)
134 goto out;
135 }
136
137 /* The real thing. */
138 for (i = 0; i < 8; i++) {
139 cycles_t start, end;
140
141 start = get_cycles();
142 if (enc)
143 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
144 else
145 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
146 end = get_cycles();
147
148 if (ret)
149 goto out;
150
151 cycles += end - start;
152 }
153
154out:
155 local_irq_enable();
156
157 if (ret == 0)
158 printk("1 operation in %lu cycles (%d bytes)\n",
159 (cycles + 4) / 8, blen);
160
161 return ret;
162}
163
164static inline int do_one_aead_op(struct aead_request *req, int ret) 96static inline int do_one_aead_op(struct aead_request *req, int ret)
165{ 97{
166 if (ret == -EINPROGRESS || ret == -EBUSY) { 98 if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -454,127 +386,148 @@ out_noxbuf:
454 return; 386 return;
455} 387}
456 388
457static void test_cipher_speed(const char *algo, int enc, unsigned int secs, 389static void test_hash_sg_init(struct scatterlist *sg)
458 struct cipher_speed_template *template,
459 unsigned int tcount, u8 *keysize)
460{ 390{
461 unsigned int ret, i, j, iv_len; 391 int i;
462 const char *key;
463 char iv[128];
464 struct crypto_blkcipher *tfm;
465 struct blkcipher_desc desc;
466 const char *e;
467 u32 *b_size;
468 392
469 if (enc == ENCRYPT) 393 sg_init_table(sg, TVMEMSIZE);
470 e = "encryption"; 394 for (i = 0; i < TVMEMSIZE; i++) {
471 else 395 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
472 e = "decryption"; 396 memset(tvmem[i], 0xff, PAGE_SIZE);
397 }
398}
473 399
474 tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); 400static inline int do_one_ahash_op(struct ahash_request *req, int ret)
401{
402 if (ret == -EINPROGRESS || ret == -EBUSY) {
403 struct tcrypt_result *tr = req->base.data;
475 404
476 if (IS_ERR(tfm)) { 405 wait_for_completion(&tr->completion);
477 printk("failed to load transform for %s: %ld\n", algo, 406 reinit_completion(&tr->completion);
478 PTR_ERR(tfm)); 407 ret = tr->err;
408 }
409 return ret;
410}
411
412struct test_mb_ahash_data {
413 struct scatterlist sg[TVMEMSIZE];
414 char result[64];
415 struct ahash_request *req;
416 struct tcrypt_result tresult;
417 char *xbuf[XBUFSIZE];
418};
419
420static void test_mb_ahash_speed(const char *algo, unsigned int sec,
421 struct hash_speed *speed)
422{
423 struct test_mb_ahash_data *data;
424 struct crypto_ahash *tfm;
425 unsigned long start, end;
426 unsigned long cycles;
427 unsigned int i, j, k;
428 int ret;
429
430 data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
431 if (!data)
479 return; 432 return;
433
434 tfm = crypto_alloc_ahash(algo, 0, 0);
435 if (IS_ERR(tfm)) {
436 pr_err("failed to load transform for %s: %ld\n",
437 algo, PTR_ERR(tfm));
438 goto free_data;
480 } 439 }
481 desc.tfm = tfm;
482 desc.flags = 0;
483 440
484 printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, 441 for (i = 0; i < 8; ++i) {
485 get_driver_name(crypto_blkcipher, tfm), e); 442 if (testmgr_alloc_buf(data[i].xbuf))
443 goto out;
486 444
487 i = 0; 445 init_completion(&data[i].tresult.completion);
488 do {
489 446
490 b_size = block_sizes; 447 data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
491 do { 448 if (!data[i].req) {
492 struct scatterlist sg[TVMEMSIZE]; 449 pr_err("alg: hash: Failed to allocate request for %s\n",
450 algo);
451 goto out;
452 }
493 453
494 if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) { 454 ahash_request_set_callback(data[i].req, 0,
495 printk("template (%u) too big for " 455 tcrypt_complete, &data[i].tresult);
496 "tvmem (%lu)\n", *keysize + *b_size, 456 test_hash_sg_init(data[i].sg);
497 TVMEMSIZE * PAGE_SIZE); 457 }
498 goto out;
499 }
500 458
501 printk("test %u (%d bit key, %d byte blocks): ", i, 459 pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
502 *keysize * 8, *b_size); 460 get_driver_name(crypto_ahash, tfm));
503 461
504 memset(tvmem[0], 0xff, PAGE_SIZE); 462 for (i = 0; speed[i].blen != 0; i++) {
463 /* For some reason this only tests digests. */
464 if (speed[i].blen != speed[i].plen)
465 continue;
505 466
506 /* set key, plain text and IV */ 467 if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
507 key = tvmem[0]; 468 pr_err("template (%u) too big for tvmem (%lu)\n",
508 for (j = 0; j < tcount; j++) { 469 speed[i].blen, TVMEMSIZE * PAGE_SIZE);
509 if (template[j].klen == *keysize) { 470 goto out;
510 key = template[j].key; 471 }
511 break;
512 }
513 }
514 472
515 ret = crypto_blkcipher_setkey(tfm, key, *keysize); 473 if (speed[i].klen)
516 if (ret) { 474 crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
517 printk("setkey() failed flags=%x\n",
518 crypto_blkcipher_get_flags(tfm));
519 goto out;
520 }
521 475
522 sg_init_table(sg, TVMEMSIZE); 476 for (k = 0; k < 8; k++)
523 sg_set_buf(sg, tvmem[0] + *keysize, 477 ahash_request_set_crypt(data[k].req, data[k].sg,
524 PAGE_SIZE - *keysize); 478 data[k].result, speed[i].blen);
525 for (j = 1; j < TVMEMSIZE; j++) {
526 sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
527 memset (tvmem[j], 0xff, PAGE_SIZE);
528 }
529 479
530 iv_len = crypto_blkcipher_ivsize(tfm); 480 pr_info("test%3u "
531 if (iv_len) { 481 "(%5u byte blocks,%5u bytes per update,%4u updates): ",
532 memset(&iv, 0xff, iv_len); 482 i, speed[i].blen, speed[i].plen,
533 crypto_blkcipher_set_iv(tfm, iv, iv_len); 483 speed[i].blen / speed[i].plen);
534 }
535 484
536 if (secs) 485 start = get_cycles();
537 ret = test_cipher_jiffies(&desc, enc, sg,
538 *b_size, secs);
539 else
540 ret = test_cipher_cycles(&desc, enc, sg,
541 *b_size);
542 486
543 if (ret) { 487 for (k = 0; k < 8; k++) {
544 printk("%s() failed flags=%x\n", e, desc.flags); 488 ret = crypto_ahash_digest(data[k].req);
545 break; 489 if (ret == -EINPROGRESS) {
490 ret = 0;
491 continue;
546 } 492 }
547 b_size++;
548 i++;
549 } while (*b_size);
550 keysize++;
551 } while (*keysize);
552 493
553out: 494 if (ret)
554 crypto_free_blkcipher(tfm); 495 break;
555}
556 496
557static void test_hash_sg_init(struct scatterlist *sg) 497 complete(&data[k].tresult.completion);
558{ 498 data[k].tresult.err = 0;
559 int i; 499 }
560 500
561 sg_init_table(sg, TVMEMSIZE); 501 for (j = 0; j < k; j++) {
562 for (i = 0; i < TVMEMSIZE; i++) { 502 struct tcrypt_result *tr = &data[j].tresult;
563 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
564 memset(tvmem[i], 0xff, PAGE_SIZE);
565 }
566}
567 503
568static inline int do_one_ahash_op(struct ahash_request *req, int ret) 504 wait_for_completion(&tr->completion);
569{ 505 if (tr->err)
570 if (ret == -EINPROGRESS || ret == -EBUSY) { 506 ret = tr->err;
571 struct tcrypt_result *tr = req->base.data; 507 }
572 508
573 wait_for_completion(&tr->completion); 509 end = get_cycles();
574 reinit_completion(&tr->completion); 510 cycles = end - start;
575 ret = tr->err; 511 pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
512 cycles, cycles / (8 * speed[i].blen));
513
514 if (ret) {
515 pr_err("At least one hashing failed ret=%d\n", ret);
516 break;
517 }
576 } 518 }
577 return ret; 519
520out:
521 for (k = 0; k < 8; ++k)
522 ahash_request_free(data[k].req);
523
524 for (k = 0; k < 8; ++k)
525 testmgr_free_buf(data[k].xbuf);
526
527 crypto_free_ahash(tfm);
528
529free_data:
530 kfree(data);
578} 531}
579 532
580static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, 533static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
@@ -812,7 +765,7 @@ static void test_hash_speed(const char *algo, unsigned int secs,
812 return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC); 765 return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
813} 766}
814 767
815static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret) 768static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
816{ 769{
817 if (ret == -EINPROGRESS || ret == -EBUSY) { 770 if (ret == -EINPROGRESS || ret == -EBUSY) {
818 struct tcrypt_result *tr = req->base.data; 771 struct tcrypt_result *tr = req->base.data;
@@ -825,7 +778,7 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
825 return ret; 778 return ret;
826} 779}
827 780
828static int test_acipher_jiffies(struct ablkcipher_request *req, int enc, 781static int test_acipher_jiffies(struct skcipher_request *req, int enc,
829 int blen, int secs) 782 int blen, int secs)
830{ 783{
831 unsigned long start, end; 784 unsigned long start, end;
@@ -836,10 +789,10 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
836 time_before(jiffies, end); bcount++) { 789 time_before(jiffies, end); bcount++) {
837 if (enc) 790 if (enc)
838 ret = do_one_acipher_op(req, 791 ret = do_one_acipher_op(req,
839 crypto_ablkcipher_encrypt(req)); 792 crypto_skcipher_encrypt(req));
840 else 793 else
841 ret = do_one_acipher_op(req, 794 ret = do_one_acipher_op(req,
842 crypto_ablkcipher_decrypt(req)); 795 crypto_skcipher_decrypt(req));
843 796
844 if (ret) 797 if (ret)
845 return ret; 798 return ret;
@@ -850,7 +803,7 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
850 return 0; 803 return 0;
851} 804}
852 805
853static int test_acipher_cycles(struct ablkcipher_request *req, int enc, 806static int test_acipher_cycles(struct skcipher_request *req, int enc,
854 int blen) 807 int blen)
855{ 808{
856 unsigned long cycles = 0; 809 unsigned long cycles = 0;
@@ -861,10 +814,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
861 for (i = 0; i < 4; i++) { 814 for (i = 0; i < 4; i++) {
862 if (enc) 815 if (enc)
863 ret = do_one_acipher_op(req, 816 ret = do_one_acipher_op(req,
864 crypto_ablkcipher_encrypt(req)); 817 crypto_skcipher_encrypt(req));
865 else 818 else
866 ret = do_one_acipher_op(req, 819 ret = do_one_acipher_op(req,
867 crypto_ablkcipher_decrypt(req)); 820 crypto_skcipher_decrypt(req));
868 821
869 if (ret) 822 if (ret)
870 goto out; 823 goto out;
@@ -877,10 +830,10 @@ static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
877 start = get_cycles(); 830 start = get_cycles();
878 if (enc) 831 if (enc)
879 ret = do_one_acipher_op(req, 832 ret = do_one_acipher_op(req,
880 crypto_ablkcipher_encrypt(req)); 833 crypto_skcipher_encrypt(req));
881 else 834 else
882 ret = do_one_acipher_op(req, 835 ret = do_one_acipher_op(req,
883 crypto_ablkcipher_decrypt(req)); 836 crypto_skcipher_decrypt(req));
884 end = get_cycles(); 837 end = get_cycles();
885 838
886 if (ret) 839 if (ret)
@@ -897,16 +850,16 @@ out:
897 return ret; 850 return ret;
898} 851}
899 852
900static void test_acipher_speed(const char *algo, int enc, unsigned int secs, 853static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
901 struct cipher_speed_template *template, 854 struct cipher_speed_template *template,
902 unsigned int tcount, u8 *keysize) 855 unsigned int tcount, u8 *keysize, bool async)
903{ 856{
904 unsigned int ret, i, j, k, iv_len; 857 unsigned int ret, i, j, k, iv_len;
905 struct tcrypt_result tresult; 858 struct tcrypt_result tresult;
906 const char *key; 859 const char *key;
907 char iv[128]; 860 char iv[128];
908 struct ablkcipher_request *req; 861 struct skcipher_request *req;
909 struct crypto_ablkcipher *tfm; 862 struct crypto_skcipher *tfm;
910 const char *e; 863 const char *e;
911 u32 *b_size; 864 u32 *b_size;
912 865
@@ -917,7 +870,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
917 870
918 init_completion(&tresult.completion); 871 init_completion(&tresult.completion);
919 872
920 tfm = crypto_alloc_ablkcipher(algo, 0, 0); 873 tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
921 874
922 if (IS_ERR(tfm)) { 875 if (IS_ERR(tfm)) {
923 pr_err("failed to load transform for %s: %ld\n", algo, 876 pr_err("failed to load transform for %s: %ld\n", algo,
@@ -926,17 +879,17 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
926 } 879 }
927 880
928 pr_info("\ntesting speed of async %s (%s) %s\n", algo, 881 pr_info("\ntesting speed of async %s (%s) %s\n", algo,
929 get_driver_name(crypto_ablkcipher, tfm), e); 882 get_driver_name(crypto_skcipher, tfm), e);
930 883
931 req = ablkcipher_request_alloc(tfm, GFP_KERNEL); 884 req = skcipher_request_alloc(tfm, GFP_KERNEL);
932 if (!req) { 885 if (!req) {
933 pr_err("tcrypt: skcipher: Failed to allocate request for %s\n", 886 pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
934 algo); 887 algo);
935 goto out; 888 goto out;
936 } 889 }
937 890
938 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 891 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
939 tcrypt_complete, &tresult); 892 tcrypt_complete, &tresult);
940 893
941 i = 0; 894 i = 0;
942 do { 895 do {
@@ -966,12 +919,12 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
966 } 919 }
967 } 920 }
968 921
969 crypto_ablkcipher_clear_flags(tfm, ~0); 922 crypto_skcipher_clear_flags(tfm, ~0);
970 923
971 ret = crypto_ablkcipher_setkey(tfm, key, *keysize); 924 ret = crypto_skcipher_setkey(tfm, key, *keysize);
972 if (ret) { 925 if (ret) {
973 pr_err("setkey() failed flags=%x\n", 926 pr_err("setkey() failed flags=%x\n",
974 crypto_ablkcipher_get_flags(tfm)); 927 crypto_skcipher_get_flags(tfm));
975 goto out_free_req; 928 goto out_free_req;
976 } 929 }
977 930
@@ -995,11 +948,11 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
995 sg_set_buf(sg, tvmem[0] + *keysize, *b_size); 948 sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
996 } 949 }
997 950
998 iv_len = crypto_ablkcipher_ivsize(tfm); 951 iv_len = crypto_skcipher_ivsize(tfm);
999 if (iv_len) 952 if (iv_len)
1000 memset(&iv, 0xff, iv_len); 953 memset(&iv, 0xff, iv_len);
1001 954
1002 ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv); 955 skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
1003 956
1004 if (secs) 957 if (secs)
1005 ret = test_acipher_jiffies(req, enc, 958 ret = test_acipher_jiffies(req, enc,
@@ -1010,7 +963,7 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
1010 963
1011 if (ret) { 964 if (ret) {
1012 pr_err("%s() failed flags=%x\n", e, 965 pr_err("%s() failed flags=%x\n", e,
1013 crypto_ablkcipher_get_flags(tfm)); 966 crypto_skcipher_get_flags(tfm));
1014 break; 967 break;
1015 } 968 }
1016 b_size++; 969 b_size++;
@@ -1020,9 +973,25 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
1020 } while (*keysize); 973 } while (*keysize);
1021 974
1022out_free_req: 975out_free_req:
1023 ablkcipher_request_free(req); 976 skcipher_request_free(req);
1024out: 977out:
1025 crypto_free_ablkcipher(tfm); 978 crypto_free_skcipher(tfm);
979}
980
981static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
982 struct cipher_speed_template *template,
983 unsigned int tcount, u8 *keysize)
984{
985 return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
986 true);
987}
988
989static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
990 struct cipher_speed_template *template,
991 unsigned int tcount, u8 *keysize)
992{
993 return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
994 false);
1026} 995}
1027 996
1028static void test_available(void) 997static void test_available(void)
@@ -1284,6 +1253,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1284 ret += tcrypt_test("crct10dif"); 1253 ret += tcrypt_test("crct10dif");
1285 break; 1254 break;
1286 1255
1256 case 48:
1257 ret += tcrypt_test("sha3-224");
1258 break;
1259
1260 case 49:
1261 ret += tcrypt_test("sha3-256");
1262 break;
1263
1264 case 50:
1265 ret += tcrypt_test("sha3-384");
1266 break;
1267
1268 case 51:
1269 ret += tcrypt_test("sha3-512");
1270 break;
1271
1287 case 100: 1272 case 100:
1288 ret += tcrypt_test("hmac(md5)"); 1273 ret += tcrypt_test("hmac(md5)");
1289 break; 1274 break;
@@ -1328,6 +1313,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1328 ret += tcrypt_test("hmac(crc32)"); 1313 ret += tcrypt_test("hmac(crc32)");
1329 break; 1314 break;
1330 1315
1316 case 111:
1317 ret += tcrypt_test("hmac(sha3-224)");
1318 break;
1319
1320 case 112:
1321 ret += tcrypt_test("hmac(sha3-256)");
1322 break;
1323
1324 case 113:
1325 ret += tcrypt_test("hmac(sha3-384)");
1326 break;
1327
1328 case 114:
1329 ret += tcrypt_test("hmac(sha3-512)");
1330 break;
1331
1331 case 150: 1332 case 150:
1332 ret += tcrypt_test("ansi_cprng"); 1333 ret += tcrypt_test("ansi_cprng");
1333 break; 1334 break;
@@ -1406,6 +1407,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1406 speed_template_32_48_64); 1407 speed_template_32_48_64);
1407 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, 1408 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
1408 speed_template_32_48_64); 1409 speed_template_32_48_64);
1410 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
1411 speed_template_16_24_32);
1412 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
1413 speed_template_16_24_32);
1409 test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, 1414 test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
1410 speed_template_16_24_32); 1415 speed_template_16_24_32);
1411 test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, 1416 test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
@@ -1691,6 +1696,22 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1691 test_hash_speed("poly1305", sec, poly1305_speed_template); 1696 test_hash_speed("poly1305", sec, poly1305_speed_template);
1692 if (mode > 300 && mode < 400) break; 1697 if (mode > 300 && mode < 400) break;
1693 1698
1699 case 322:
1700 test_hash_speed("sha3-224", sec, generic_hash_speed_template);
1701 if (mode > 300 && mode < 400) break;
1702
1703 case 323:
1704 test_hash_speed("sha3-256", sec, generic_hash_speed_template);
1705 if (mode > 300 && mode < 400) break;
1706
1707 case 324:
1708 test_hash_speed("sha3-384", sec, generic_hash_speed_template);
1709 if (mode > 300 && mode < 400) break;
1710
1711 case 325:
1712 test_hash_speed("sha3-512", sec, generic_hash_speed_template);
1713 if (mode > 300 && mode < 400) break;
1714
1694 case 399: 1715 case 399:
1695 break; 1716 break;
1696 1717
@@ -1770,6 +1791,35 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1770 test_ahash_speed("rmd320", sec, generic_hash_speed_template); 1791 test_ahash_speed("rmd320", sec, generic_hash_speed_template);
1771 if (mode > 400 && mode < 500) break; 1792 if (mode > 400 && mode < 500) break;
1772 1793
1794 case 418:
1795 test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
1796 if (mode > 400 && mode < 500) break;
1797
1798 case 419:
1799 test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
1800 if (mode > 400 && mode < 500) break;
1801
1802 case 420:
1803 test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
1804 if (mode > 400 && mode < 500) break;
1805
1806
1807 case 421:
1808 test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
1809 if (mode > 400 && mode < 500) break;
1810
1811 case 422:
1812 test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
1813 if (mode > 400 && mode < 500) break;
1814
1815 case 423:
1816 test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
1817 if (mode > 400 && mode < 500) break;
1818
1819 case 424:
1820 test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
1821 if (mode > 400 && mode < 500) break;
1822
1773 case 499: 1823 case 499:
1774 break; 1824 break;
1775 1825
@@ -1790,6 +1840,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1790 speed_template_32_48_64); 1840 speed_template_32_48_64);
1791 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, 1841 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
1792 speed_template_32_48_64); 1842 speed_template_32_48_64);
1843 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
1844 speed_template_16_24_32);
1845 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
1846 speed_template_16_24_32);
1793 test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, 1847 test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
1794 speed_template_16_24_32); 1848 speed_template_16_24_32);
1795 test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, 1849 test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index c727fb0cb021..5c9d5a5e7b65 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -32,6 +32,7 @@
32#include <crypto/rng.h> 32#include <crypto/rng.h>
33#include <crypto/drbg.h> 33#include <crypto/drbg.h>
34#include <crypto/akcipher.h> 34#include <crypto/akcipher.h>
35#include <crypto/kpp.h>
35 36
36#include "internal.h" 37#include "internal.h"
37 38
@@ -120,6 +121,11 @@ struct akcipher_test_suite {
120 unsigned int count; 121 unsigned int count;
121}; 122};
122 123
124struct kpp_test_suite {
125 struct kpp_testvec *vecs;
126 unsigned int count;
127};
128
123struct alg_test_desc { 129struct alg_test_desc {
124 const char *alg; 130 const char *alg;
125 int (*test)(const struct alg_test_desc *desc, const char *driver, 131 int (*test)(const struct alg_test_desc *desc, const char *driver,
@@ -134,6 +140,7 @@ struct alg_test_desc {
134 struct cprng_test_suite cprng; 140 struct cprng_test_suite cprng;
135 struct drbg_test_suite drbg; 141 struct drbg_test_suite drbg;
136 struct akcipher_test_suite akcipher; 142 struct akcipher_test_suite akcipher;
143 struct kpp_test_suite kpp;
137 } suite; 144 } suite;
138}; 145};
139 146
@@ -1777,8 +1784,135 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
1777 1784
1778} 1785}
1779 1786
1780static int do_test_rsa(struct crypto_akcipher *tfm, 1787static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1781 struct akcipher_testvec *vecs) 1788 const char *alg)
1789{
1790 struct kpp_request *req;
1791 void *input_buf = NULL;
1792 void *output_buf = NULL;
1793 struct tcrypt_result result;
1794 unsigned int out_len_max;
1795 int err = -ENOMEM;
1796 struct scatterlist src, dst;
1797
1798 req = kpp_request_alloc(tfm, GFP_KERNEL);
1799 if (!req)
1800 return err;
1801
1802 init_completion(&result.completion);
1803
1804 err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
1805 if (err < 0)
1806 goto free_req;
1807
1808 out_len_max = crypto_kpp_maxsize(tfm);
1809 output_buf = kzalloc(out_len_max, GFP_KERNEL);
1810 if (!output_buf) {
1811 err = -ENOMEM;
1812 goto free_req;
1813 }
1814
1815 /* Use appropriate parameter as base */
1816 kpp_request_set_input(req, NULL, 0);
1817 sg_init_one(&dst, output_buf, out_len_max);
1818 kpp_request_set_output(req, &dst, out_len_max);
1819 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1820 tcrypt_complete, &result);
1821
1822 /* Compute public key */
1823 err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
1824 if (err) {
1825 pr_err("alg: %s: generate public key test failed. err %d\n",
1826 alg, err);
1827 goto free_output;
1828 }
1829 /* Verify calculated public key */
1830 if (memcmp(vec->expected_a_public, sg_virt(req->dst),
1831 vec->expected_a_public_size)) {
1832 pr_err("alg: %s: generate public key test failed. Invalid output\n",
1833 alg);
1834 err = -EINVAL;
1835 goto free_output;
1836 }
1837
1838 /* Calculate shared secret key by using counter part (b) public key. */
1839 input_buf = kzalloc(vec->b_public_size, GFP_KERNEL);
1840 if (!input_buf) {
1841 err = -ENOMEM;
1842 goto free_output;
1843 }
1844
1845 memcpy(input_buf, vec->b_public, vec->b_public_size);
1846 sg_init_one(&src, input_buf, vec->b_public_size);
1847 sg_init_one(&dst, output_buf, out_len_max);
1848 kpp_request_set_input(req, &src, vec->b_public_size);
1849 kpp_request_set_output(req, &dst, out_len_max);
1850 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1851 tcrypt_complete, &result);
1852 err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
1853 if (err) {
1854 pr_err("alg: %s: compute shard secret test failed. err %d\n",
1855 alg, err);
1856 goto free_all;
1857 }
1858 /*
1859 * verify shared secret from which the user will derive
1860 * secret key by executing whatever hash it has chosen
1861 */
1862 if (memcmp(vec->expected_ss, sg_virt(req->dst),
1863 vec->expected_ss_size)) {
1864 pr_err("alg: %s: compute shared secret test failed. Invalid output\n",
1865 alg);
1866 err = -EINVAL;
1867 }
1868
1869free_all:
1870 kfree(input_buf);
1871free_output:
1872 kfree(output_buf);
1873free_req:
1874 kpp_request_free(req);
1875 return err;
1876}
1877
1878static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1879 struct kpp_testvec *vecs, unsigned int tcount)
1880{
1881 int ret, i;
1882
1883 for (i = 0; i < tcount; i++) {
1884 ret = do_test_kpp(tfm, vecs++, alg);
1885 if (ret) {
1886 pr_err("alg: %s: test failed on vector %d, err=%d\n",
1887 alg, i + 1, ret);
1888 return ret;
1889 }
1890 }
1891 return 0;
1892}
1893
1894static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
1895 u32 type, u32 mask)
1896{
1897 struct crypto_kpp *tfm;
1898 int err = 0;
1899
1900 tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1901 if (IS_ERR(tfm)) {
1902 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1903 driver, PTR_ERR(tfm));
1904 return PTR_ERR(tfm);
1905 }
1906 if (desc->suite.kpp.vecs)
1907 err = test_kpp(tfm, desc->alg, desc->suite.kpp.vecs,
1908 desc->suite.kpp.count);
1909
1910 crypto_free_kpp(tfm);
1911 return err;
1912}
1913
1914static int test_akcipher_one(struct crypto_akcipher *tfm,
1915 struct akcipher_testvec *vecs)
1782{ 1916{
1783 char *xbuf[XBUFSIZE]; 1917 char *xbuf[XBUFSIZE];
1784 struct akcipher_request *req; 1918 struct akcipher_request *req;
@@ -1807,6 +1941,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
1807 if (err) 1941 if (err)
1808 goto free_req; 1942 goto free_req;
1809 1943
1944 err = -ENOMEM;
1810 out_len_max = crypto_akcipher_maxsize(tfm); 1945 out_len_max = crypto_akcipher_maxsize(tfm);
1811 outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); 1946 outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
1812 if (!outbuf_enc) 1947 if (!outbuf_enc)
@@ -1829,17 +1964,18 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
1829 /* Run RSA encrypt - c = m^e mod n;*/ 1964 /* Run RSA encrypt - c = m^e mod n;*/
1830 err = wait_async_op(&result, crypto_akcipher_encrypt(req)); 1965 err = wait_async_op(&result, crypto_akcipher_encrypt(req));
1831 if (err) { 1966 if (err) {
1832 pr_err("alg: rsa: encrypt test failed. err %d\n", err); 1967 pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
1833 goto free_all; 1968 goto free_all;
1834 } 1969 }
1835 if (req->dst_len != vecs->c_size) { 1970 if (req->dst_len != vecs->c_size) {
1836 pr_err("alg: rsa: encrypt test failed. Invalid output len\n"); 1971 pr_err("alg: akcipher: encrypt test failed. Invalid output len\n");
1837 err = -EINVAL; 1972 err = -EINVAL;
1838 goto free_all; 1973 goto free_all;
1839 } 1974 }
1840 /* verify that encrypted message is equal to expected */ 1975 /* verify that encrypted message is equal to expected */
1841 if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) { 1976 if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
1842 pr_err("alg: rsa: encrypt test failed. Invalid output\n"); 1977 pr_err("alg: akcipher: encrypt test failed. Invalid output\n");
1978 hexdump(outbuf_enc, vecs->c_size);
1843 err = -EINVAL; 1979 err = -EINVAL;
1844 goto free_all; 1980 goto free_all;
1845 } 1981 }
@@ -1867,18 +2003,22 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
1867 /* Run RSA decrypt - m = c^d mod n;*/ 2003 /* Run RSA decrypt - m = c^d mod n;*/
1868 err = wait_async_op(&result, crypto_akcipher_decrypt(req)); 2004 err = wait_async_op(&result, crypto_akcipher_decrypt(req));
1869 if (err) { 2005 if (err) {
1870 pr_err("alg: rsa: decrypt test failed. err %d\n", err); 2006 pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
1871 goto free_all; 2007 goto free_all;
1872 } 2008 }
1873 out_len = req->dst_len; 2009 out_len = req->dst_len;
1874 if (out_len != vecs->m_size) { 2010 if (out_len < vecs->m_size) {
1875 pr_err("alg: rsa: decrypt test failed. Invalid output len\n"); 2011 pr_err("alg: akcipher: decrypt test failed. "
2012 "Invalid output len %u\n", out_len);
1876 err = -EINVAL; 2013 err = -EINVAL;
1877 goto free_all; 2014 goto free_all;
1878 } 2015 }
1879 /* verify that decrypted message is equal to the original msg */ 2016 /* verify that decrypted message is equal to the original msg */
1880 if (memcmp(vecs->m, outbuf_dec, vecs->m_size)) { 2017 if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) ||
1881 pr_err("alg: rsa: decrypt test failed. Invalid output\n"); 2018 memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size,
2019 vecs->m_size)) {
2020 pr_err("alg: akcipher: decrypt test failed. Invalid output\n");
2021 hexdump(outbuf_dec, out_len);
1882 err = -EINVAL; 2022 err = -EINVAL;
1883 } 2023 }
1884free_all: 2024free_all:
@@ -1891,28 +2031,22 @@ free_xbuf:
1891 return err; 2031 return err;
1892} 2032}
1893 2033
1894static int test_rsa(struct crypto_akcipher *tfm, struct akcipher_testvec *vecs, 2034static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1895 unsigned int tcount) 2035 struct akcipher_testvec *vecs, unsigned int tcount)
1896{ 2036{
2037 const char *algo =
2038 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1897 int ret, i; 2039 int ret, i;
1898 2040
1899 for (i = 0; i < tcount; i++) { 2041 for (i = 0; i < tcount; i++) {
1900 ret = do_test_rsa(tfm, vecs++); 2042 ret = test_akcipher_one(tfm, vecs++);
1901 if (ret) { 2043 if (!ret)
1902 pr_err("alg: rsa: test failed on vector %d, err=%d\n", 2044 continue;
1903 i + 1, ret);
1904 return ret;
1905 }
1906 }
1907 return 0;
1908}
1909
1910static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1911 struct akcipher_testvec *vecs, unsigned int tcount)
1912{
1913 if (strncmp(alg, "rsa", 3) == 0)
1914 return test_rsa(tfm, vecs, tcount);
1915 2045
2046 pr_err("alg: akcipher: test %d failed for %s, err=%d\n",
2047 i + 1, algo, ret);
2048 return ret;
2049 }
1916 return 0; 2050 return 0;
1917} 2051}
1918 2052
@@ -2729,6 +2863,16 @@ static const struct alg_test_desc alg_test_descs[] = {
2729 } 2863 }
2730 } 2864 }
2731 }, { 2865 }, {
2866 .alg = "dh",
2867 .test = alg_test_kpp,
2868 .fips_allowed = 1,
2869 .suite = {
2870 .kpp = {
2871 .vecs = dh_tv_template,
2872 .count = DH_TEST_VECTORS
2873 }
2874 }
2875 }, {
2732 .alg = "digest_null", 2876 .alg = "digest_null",
2733 .test = alg_test_null, 2877 .test = alg_test_null,
2734 }, { 2878 }, {
@@ -3157,6 +3301,16 @@ static const struct alg_test_desc alg_test_descs[] = {
3157 } 3301 }
3158 } 3302 }
3159 }, { 3303 }, {
3304 .alg = "ecdh",
3305 .test = alg_test_kpp,
3306 .fips_allowed = 1,
3307 .suite = {
3308 .kpp = {
3309 .vecs = ecdh_tv_template,
3310 .count = ECDH_TEST_VECTORS
3311 }
3312 }
3313 }, {
3160 .alg = "gcm(aes)", 3314 .alg = "gcm(aes)",
3161 .test = alg_test_aead, 3315 .test = alg_test_aead,
3162 .fips_allowed = 1, 3316 .fips_allowed = 1,
@@ -3249,6 +3403,46 @@ static const struct alg_test_desc alg_test_descs[] = {
3249 } 3403 }
3250 } 3404 }
3251 }, { 3405 }, {
3406 .alg = "hmac(sha3-224)",
3407 .test = alg_test_hash,
3408 .fips_allowed = 1,
3409 .suite = {
3410 .hash = {
3411 .vecs = hmac_sha3_224_tv_template,
3412 .count = HMAC_SHA3_224_TEST_VECTORS
3413 }
3414 }
3415 }, {
3416 .alg = "hmac(sha3-256)",
3417 .test = alg_test_hash,
3418 .fips_allowed = 1,
3419 .suite = {
3420 .hash = {
3421 .vecs = hmac_sha3_256_tv_template,
3422 .count = HMAC_SHA3_256_TEST_VECTORS
3423 }
3424 }
3425 }, {
3426 .alg = "hmac(sha3-384)",
3427 .test = alg_test_hash,
3428 .fips_allowed = 1,
3429 .suite = {
3430 .hash = {
3431 .vecs = hmac_sha3_384_tv_template,
3432 .count = HMAC_SHA3_384_TEST_VECTORS
3433 }
3434 }
3435 }, {
3436 .alg = "hmac(sha3-512)",
3437 .test = alg_test_hash,
3438 .fips_allowed = 1,
3439 .suite = {
3440 .hash = {
3441 .vecs = hmac_sha3_512_tv_template,
3442 .count = HMAC_SHA3_512_TEST_VECTORS
3443 }
3444 }
3445 }, {
3252 .alg = "hmac(sha384)", 3446 .alg = "hmac(sha384)",
3253 .test = alg_test_hash, 3447 .test = alg_test_hash,
3254 .fips_allowed = 1, 3448 .fips_allowed = 1,
@@ -3659,6 +3853,46 @@ static const struct alg_test_desc alg_test_descs[] = {
3659 } 3853 }
3660 } 3854 }
3661 }, { 3855 }, {
3856 .alg = "sha3-224",
3857 .test = alg_test_hash,
3858 .fips_allowed = 1,
3859 .suite = {
3860 .hash = {
3861 .vecs = sha3_224_tv_template,
3862 .count = SHA3_224_TEST_VECTORS
3863 }
3864 }
3865 }, {
3866 .alg = "sha3-256",
3867 .test = alg_test_hash,
3868 .fips_allowed = 1,
3869 .suite = {
3870 .hash = {
3871 .vecs = sha3_256_tv_template,
3872 .count = SHA3_256_TEST_VECTORS
3873 }
3874 }
3875 }, {
3876 .alg = "sha3-384",
3877 .test = alg_test_hash,
3878 .fips_allowed = 1,
3879 .suite = {
3880 .hash = {
3881 .vecs = sha3_384_tv_template,
3882 .count = SHA3_384_TEST_VECTORS
3883 }
3884 }
3885 }, {
3886 .alg = "sha3-512",
3887 .test = alg_test_hash,
3888 .fips_allowed = 1,
3889 .suite = {
3890 .hash = {
3891 .vecs = sha3_512_tv_template,
3892 .count = SHA3_512_TEST_VECTORS
3893 }
3894 }
3895 }, {
3662 .alg = "sha384", 3896 .alg = "sha384",
3663 .test = alg_test_hash, 3897 .test = alg_test_hash,
3664 .fips_allowed = 1, 3898 .fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 487ec880e889..acb6bbff781a 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -133,6 +133,17 @@ struct akcipher_testvec {
133 bool public_key_vec; 133 bool public_key_vec;
134}; 134};
135 135
136struct kpp_testvec {
137 unsigned char *secret;
138 unsigned char *b_public;
139 unsigned char *expected_a_public;
140 unsigned char *expected_ss;
141 unsigned short secret_size;
142 unsigned short b_public_size;
143 unsigned short expected_a_public_size;
144 unsigned short expected_ss_size;
145};
146
136static char zeroed_string[48]; 147static char zeroed_string[48];
137 148
138/* 149/*
@@ -141,7 +152,7 @@ static char zeroed_string[48];
141#ifdef CONFIG_CRYPTO_FIPS 152#ifdef CONFIG_CRYPTO_FIPS
142#define RSA_TEST_VECTORS 2 153#define RSA_TEST_VECTORS 2
143#else 154#else
144#define RSA_TEST_VECTORS 4 155#define RSA_TEST_VECTORS 5
145#endif 156#endif
146static struct akcipher_testvec rsa_tv_template[] = { 157static struct akcipher_testvec rsa_tv_template[] = {
147 { 158 {
@@ -327,6 +338,516 @@ static struct akcipher_testvec rsa_tv_template[] = {
327 .m_size = 8, 338 .m_size = 8,
328 .c_size = 256, 339 .c_size = 256,
329 .public_key_vec = true, 340 .public_key_vec = true,
341 }, {
342 .key =
343 "\x30\x82\x09\x29" /* sequence of 2345 bytes */
344 "\x02\x01\x00" /* version integer of 1 byte */
345 "\x02\x82\x02\x01" /* modulus - integer of 513 bytes */
346 "\x00\xC3\x8B\x55\x7B\x73\x4D\xFF\xE9\x9B\xC6\xDC\x67\x3C\xB4\x8E"
347 "\xA0\x86\xED\xF2\xB9\x50\x5C\x54\x5C\xBA\xE4\xA1\xB2\xA7\xAE\x2F"
348 "\x1B\x7D\xF1\xFB\xAC\x79\xC5\xDF\x1A\x00\xC9\xB2\xC1\x61\x25\x33"
349 "\xE6\x9C\xE9\xCF\xD6\x27\xC4\x4E\x44\x30\x44\x5E\x08\xA1\x87\x52"
350 "\xCC\x6B\x97\x70\x8C\xBC\xA5\x06\x31\x0C\xD4\x2F\xD5\x7D\x26\x24"
351 "\xA2\xE2\xAC\x78\xF4\x53\x14\xCE\xF7\x19\x2E\xD7\xF7\xE6\x0C\xB9"
352 "\x56\x7F\x0B\xF1\xB1\xE2\x43\x70\xBD\x86\x1D\xA1\xCC\x2B\x19\x08"
353 "\x76\xEF\x91\xAC\xBF\x20\x24\x0D\x38\xC0\x89\xB8\x9A\x70\xB3\x64"
354 "\xD9\x8F\x80\x41\x10\x5B\x9F\xB1\xCB\x76\x43\x00\x21\x25\x36\xD4"
355 "\x19\xFC\x55\x95\x10\xE4\x26\x74\x98\x2C\xD9\xBD\x0B\x2B\x04\xC2"
356 "\xAC\x82\x38\xB4\xDD\x4C\x04\x7E\x51\x36\x40\x1E\x0B\xC4\x7C\x25"
357 "\xDD\x4B\xB2\xE7\x20\x0A\x57\xF9\xB4\x94\xC3\x08\x33\x22\x6F\x8B"
358 "\x48\xDB\x03\x68\x5A\x5B\xBA\xAE\xF3\xAD\xCF\xC3\x6D\xBA\xF1\x28"
359 "\x67\x7E\x6C\x79\x07\xDE\xFC\xED\xE7\x96\xE3\x6C\xE0\x2C\x87\xF8"
360 "\x02\x01\x28\x38\x43\x21\x53\x84\x69\x75\x78\x15\x7E\xEE\xD2\x1B"
361 "\xB9\x23\x40\xA8\x86\x1E\x38\x83\xB2\x73\x1D\x53\xFB\x9E\x2A\x8A"
362 "\xB2\x75\x35\x01\xC3\xC3\xC4\x94\xE8\x84\x86\x64\x81\xF4\x42\xAA"
363 "\x3C\x0E\xD6\x4F\xBC\x0A\x09\x2D\xE7\x1B\xD4\x10\xA8\x54\xEA\x89"
364 "\x84\x8A\xCB\xF7\x5A\x3C\xCA\x76\x08\x29\x62\xB4\x6A\x22\xDF\x14"
365 "\x95\x71\xFD\xB6\x86\x39\xB8\x8B\xF8\x91\x7F\x38\xAA\x14\xCD\xE5"
366 "\xF5\x1D\xC2\x6D\x53\x69\x52\x84\x7F\xA3\x1A\x5E\x26\x04\x83\x06"
367 "\x73\x52\x56\xCF\x76\x26\xC9\xDD\x75\xD7\xFC\xF4\x69\xD8\x7B\x55"
368 "\xB7\x68\x13\x53\xB9\xE7\x89\xC3\xE8\xD6\x6E\xA7\x6D\xEA\x81\xFD"
369 "\xC4\xB7\x05\x5A\xB7\x41\x0A\x23\x8E\x03\x8A\x1C\xAE\xD3\x1E\xCE"
370 "\xE3\x5E\xFC\x19\x4A\xEE\x61\x9B\x8E\xE5\xE5\xDD\x85\xF9\x41\xEC"
371 "\x14\x53\x92\xF7\xDD\x06\x85\x02\x91\xE3\xEB\x6C\x43\x03\xB1\x36"
372 "\x7B\x89\x5A\xA8\xEB\xFC\xD5\xA8\x35\xDC\x81\xD9\x5C\xBD\xCA\xDC"
373 "\x9B\x98\x0B\x06\x5D\x0C\x5B\xEE\xF3\xD5\xCC\x57\xC9\x71\x2F\x90"
374 "\x3B\x3C\xF0\x8E\x4E\x35\x48\xAE\x63\x74\xA9\xFC\x72\x75\x8E\x34"
375 "\xA8\xF2\x1F\xEA\xDF\x3A\x37\x2D\xE5\x39\x39\xF8\x57\x58\x3C\x04"
376 "\xFE\x87\x06\x98\xBC\x7B\xD3\x21\x36\x60\x25\x54\xA7\x3D\xFA\x91"
377 "\xCC\xA8\x0B\x92\x8E\xB4\xF7\x06\xFF\x1E\x95\xCB\x07\x76\x97\x3B"
378 "\x9D"
379 "\x02\x03\x01\x00\x01" /* public key integer of 3 bytes */
380 "\x02\x82\x02\x00" /* private key integer of 512 bytes */
381 "\x74\xA9\xE0\x6A\x32\xB4\xCA\x85\xD9\x86\x9F\x60\x88\x7B\x40\xCC"
382 "\xCD\x33\x91\xA8\xB6\x25\x1F\xBF\xE3\x51\x1C\x97\xB6\x2A\xD9\xB8"
383 "\x11\x40\x19\xE3\x21\x13\xC8\xB3\x7E\xDC\xD7\x65\x40\x4C\x2D\xD6"
384 "\xDC\xAF\x32\x6C\x96\x75\x2C\x2C\xCA\x8F\x3F\x7A\xEE\xC4\x09\xC6"
385 "\x24\x3A\xC9\xCF\x6D\x8D\x17\x50\x94\x52\xD3\xE7\x0F\x2F\x7E\x94"
386 "\x1F\xA0\xBE\xD9\x25\xE8\x38\x42\x7C\x27\xD2\x79\xF8\x2A\x87\x38"
387 "\xEF\xBB\x74\x8B\xA8\x6E\x8C\x08\xC6\xC7\x4F\x0C\xBC\x79\xC6\xEF"
388 "\x0E\xA7\x5E\xE4\xF8\x8C\x09\xC7\x5E\x37\xCC\x87\x77\xCD\xCF\xD1"
389 "\x6D\x28\x1B\xA9\x62\xC0\xB8\x16\xA7\x8B\xF9\xBB\xCC\xB4\x15\x7F"
390 "\x1B\x69\x03\xF2\x7B\xEB\xE5\x8C\x14\xD6\x23\x4F\x52\x6F\x18\xA6"
391 "\x4B\x5B\x01\xAD\x35\xF9\x48\x53\xB3\x86\x35\x66\xD7\xE7\x29\xC0"
392 "\x09\xB5\xC6\xE6\xFA\xC4\xDA\x19\xBE\xD7\x4D\x41\x14\xBE\x6F\xDF"
393 "\x1B\xAB\xC0\xCA\x88\x07\xAC\xF1\x7D\x35\x83\x67\x28\x2D\x50\xE9"
394 "\xCE\x27\x71\x5E\x1C\xCF\xD2\x30\x65\x79\x72\x2F\x9C\xE1\xD2\x39"
395 "\x7F\xEF\x3B\x01\xF2\x14\x1D\xDF\xBD\x51\xD3\xA1\x53\x62\xCF\x5F"
396 "\x79\x84\xCE\x06\x96\x69\x29\x49\x82\x1C\x71\x4A\xA1\x66\xC8\x2F"
397 "\xFD\x7B\x96\x7B\xFC\xC4\x26\x58\xC4\xFC\x7C\xAF\xB5\xE8\x95\x83"
398 "\x87\xCB\x46\xDE\x97\xA7\xB3\xA2\x54\x5B\xD7\xAF\xAB\xEB\xC8\xF3"
399 "\x55\x9D\x48\x2B\x30\x9C\xDC\x26\x4B\xC2\x89\x45\x13\xB2\x01\x9A"
400 "\xA4\x65\xC3\xEC\x24\x2D\x26\x97\xEB\x80\x8A\x9D\x03\xBC\x59\x66"
401 "\x9E\xE2\xBB\xBB\x63\x19\x64\x93\x11\x7B\x25\x65\x30\xCD\x5B\x4B"
402 "\x2C\xFF\xDC\x2D\x30\x87\x1F\x3C\x88\x07\xD0\xFC\x48\xCC\x05\x8A"
403 "\xA2\xC8\x39\x3E\xD5\x51\xBC\x0A\xBE\x6D\xA8\xA0\xF6\x88\x06\x79"
404 "\x13\xFF\x1B\x45\xDA\x54\xC9\x24\x25\x8A\x75\x0A\x26\xD1\x69\x81"
405 "\x14\x14\xD1\x79\x7D\x8E\x76\xF2\xE0\xEB\xDD\x0F\xDE\xC2\xEC\x80"
406 "\xD7\xDC\x16\x99\x92\xBE\xCB\x40\x0C\xCE\x7C\x3B\x46\xA2\x5B\x5D"
407 "\x0C\x45\xEB\xE1\x00\xDE\x72\x50\xB1\xA6\x0B\x76\xC5\x8D\xFC\x82"
408 "\x38\x6D\x99\x14\x1D\x1A\x4A\xD3\x7C\x53\xB8\x12\x46\xA2\x30\x38"
409 "\x82\xF4\x96\x6E\x8C\xCE\x47\x0D\xAF\x0A\x3B\x45\xB7\x43\x95\x43"
410 "\x9E\x02\x2C\x44\x07\x6D\x1F\x3C\x66\x89\x09\xB6\x1F\x06\x30\xCC"
411 "\xAD\xCE\x7D\x9A\xDE\x3E\xFB\x6C\xE4\x58\x43\xD2\x4F\xA5\x9E\x5E"
412 "\xA7\x7B\xAE\x3A\xF6\x7E\xD9\xDB\xD3\xF5\xC5\x41\xAF\xE6\x9C\x91"
413 "\x02\x82\x01\x01" /* prime1 - integer of 257 bytes */
414 "\x00\xE0\xA6\x6C\xF0\xA2\xF8\x81\x85\x36\x43\xD0\x13\x0B\x33\x8B"
415 "\x8F\x78\x3D\xAC\xC7\x5E\x46\x6A\x7F\x05\xAE\x3E\x26\x0A\xA6\xD0"
416 "\x51\xF3\xC8\x61\xF5\x77\x22\x48\x10\x87\x4C\xD5\xA4\xD5\xAE\x2D"
417 "\x4E\x7A\xFE\x1C\x31\xE7\x6B\xFF\xA4\x69\x20\xF9\x2A\x0B\x99\xBE"
418 "\x7C\x32\x68\xAD\xB0\xC6\x94\x81\x41\x75\xDC\x06\x78\x0A\xB4\xCF"
419 "\xCD\x1B\x2D\x31\xE4\x7B\xEA\xA8\x35\x99\x75\x57\xC6\x0E\xF6\x78"
420 "\x4F\xA0\x92\x4A\x00\x1B\xE7\x96\xF2\x5B\xFD\x2C\x0A\x0A\x13\x81"
421 "\xAF\xCB\x59\x87\x31\xD9\x83\x65\xF2\x22\x48\xD0\x03\x67\x39\xF6"
422 "\xFF\xA8\x36\x07\x3A\x68\xE3\x7B\xA9\x64\xFD\x9C\xF7\xB1\x3D\xBF"
423 "\x26\x5C\xCC\x7A\xFC\xA2\x8F\x51\xD1\xE1\xE2\x3C\xEC\x06\x75\x7C"
424 "\x34\xF9\xA9\x33\x70\x11\xAD\x5A\xDC\x5F\xCF\x50\xF6\x23\x2F\x39"
425 "\xAC\x92\x48\x53\x4D\x01\x96\x3C\xD8\xDC\x1F\x23\x23\x78\x80\x34"
426 "\x54\x14\x76\x8B\xB6\xBB\xFB\x88\x78\x31\x59\x28\xD2\xB1\x75\x17"
427 "\x88\x04\x4A\x78\x62\x18\x2E\xF5\xFB\x9B\xEF\x15\xD8\x16\x47\xC6"
428 "\x42\xB1\x02\xDA\x9E\xE3\x84\x90\xB4\x2D\xC3\xCE\x13\xC9\x12\x7D"
429 "\x3E\xCD\x39\x39\xC9\xAD\xA1\x1A\xE6\xD5\xAD\x5A\x09\x4D\x1B\x0C"
430 "\xAB"
431 "\x02\x82\x01\x01" /* prime 2 - integer of 257 bytes */
432 "\x00\xDE\xD5\x1B\xF6\xCD\x83\xB1\xC6\x47\x7E\xB9\xC0\x6B\xA9\xB8"
433 "\x02\xF3\xAE\x40\x5D\xFC\xD3\xE5\x4E\xF1\xE3\x39\x04\x52\x84\x89"
434 "\x40\x37\xBB\xC2\xCD\x7F\x71\x77\x17\xDF\x6A\x4C\x31\x24\x7F\xB9"
435 "\x7E\x7F\xC8\x43\x4A\x3C\xEB\x8D\x1B\x7F\x21\x51\x67\x45\x8F\xA0"
436 "\x36\x29\x3A\x18\x45\xA5\x32\xEC\x74\x88\x3C\x98\x5D\x67\x3B\xD7"
437 "\x51\x1F\xE9\xAE\x09\x01\xDE\xDE\x7C\xFB\x60\xD1\xA5\x6C\xE9\x6A"
438 "\x93\x04\x02\x3A\xBB\x67\x02\xB9\xFD\x23\xF0\x02\x2B\x49\x85\xC9"
439 "\x5B\xE7\x4B\xDF\xA3\xF4\xEE\x59\x4C\x45\xEF\x8B\xC1\x6B\xDE\xDE"
440 "\xBC\x1A\xFC\xD2\x76\x3F\x33\x74\xA9\x8E\xA3\x7E\x0C\xC6\xCE\x70"
441 "\xA1\x5B\xA6\x77\xEA\x76\xEB\x18\xCE\xB9\xD7\x78\x8D\xAE\x06\xBB"
442 "\xD3\x1F\x16\x0D\x05\xAB\x4F\xC6\x52\xC8\x6B\x36\x51\x7D\x1D\x27"
443 "\xAF\x88\x9A\x6F\xCC\x25\x2E\x74\x06\x72\xCE\x9E\xDB\xE0\x9D\x30"
444 "\xEF\x55\xA5\x58\x21\xA7\x42\x12\x2C\x2C\x23\x87\xC1\x0F\xE8\x51"
445 "\xDA\x53\xDA\xFC\x05\x36\xDF\x08\x0E\x08\x36\xBE\x5C\x86\x9E\xCA"
446 "\x68\x90\x33\x12\x0B\x14\x82\xAB\x90\x1A\xD4\x49\x32\x9C\xBD\xAA"
447 "\xAB\x4E\x38\xF1\xEE\xED\x3D\x3F\xE8\xBD\x48\x56\xA6\x64\xEE\xC8"
448 "\xD7"
449 "\x02\x82\x01\x01" /* exponent 1 - integer of 257 bytes */
450 "\x00\x96\x5E\x6F\x8F\x06\xD6\xE6\x03\x1F\x96\x76\x81\x38\xBF\x30"
451 "\xCC\x40\x84\xAF\xD0\xE7\x06\xA5\x24\x0E\xCE\x59\xA5\x26\xFE\x0F"
452 "\x74\xBB\x83\xC6\x26\x02\xAF\x3C\xA3\x6B\x9C\xFF\x68\x0C\xEB\x40"
453 "\x42\x46\xCB\x2E\x5E\x2C\xF4\x3A\x32\x77\x77\xED\xAF\xBA\x02\x17"
454 "\xE1\x93\xF0\x43\x4A\x8F\x31\x39\xEF\x72\x0F\x6B\x79\x10\x59\x84"
455 "\xBA\x5A\x55\x7F\x0E\xDB\xEE\xEE\xD6\xA9\xB8\x44\x9F\x3A\xC6\xB9"
456 "\x33\x3B\x5C\x90\x11\xD0\x9B\xCC\x8A\xBF\x0E\x10\x5B\x4B\xF1\x50"
457 "\x9E\x35\xB3\xE0\x6D\x7A\x95\x9C\x38\x5D\xC0\x75\x13\xC2\x15\xA7"
458 "\x81\xEA\xBA\xF7\x4D\x9E\x85\x9D\xF1\x7D\xBA\xD0\x45\x6F\x2A\xD0"
459 "\x76\xC2\x28\xD0\xAD\xA7\xB5\xDC\xE3\x6A\x99\xFF\x83\x50\xB3\x75"
460 "\x07\x14\x91\xAF\xEF\x74\xB5\x9F\x9A\xE0\xBA\xA9\x0B\x87\xF3\x85"
461 "\x5C\x40\xB2\x0E\xA7\xFD\xC6\xED\x45\x8E\xD9\x7C\xB0\xB2\x68\xC6"
462 "\x1D\xFD\x70\x78\x06\x41\x7F\x95\x12\x36\x9D\xE2\x58\x5D\x15\xEE"
463 "\x41\x49\xF5\xFA\xEC\x56\x19\xA0\xE6\xE0\xB2\x40\xE1\xD9\xD0\x03"
464 "\x22\x02\xCF\xD1\x3C\x07\x38\x65\x8F\x65\x0E\xAA\x32\xCE\x25\x05"
465 "\x16\x73\x51\xB9\x9F\x88\x0B\xCD\x30\xF3\x97\xCC\x2B\x6B\xA4\x0E"
466 "\x6F"
467 "\x02\x82\x01\x00" /* exponent 2 - integer of 256 bytes */
468 "\x2A\x5F\x3F\xB8\x08\x90\x58\x47\xA9\xE4\xB1\x11\xA3\xE7\x5B\xF4"
469 "\x43\xBE\x08\xC3\x56\x86\x3C\x7E\x6C\x84\x96\x9C\xF9\xCB\xF6\x05"
470 "\x5E\x13\xB8\x11\x37\x80\xAD\xF2\xBE\x2B\x0A\x5D\xF5\xE0\xCB\xB7"
471 "\x00\x39\x66\x82\x41\x5F\x51\x2F\xBF\x56\xE8\x91\xC8\xAA\x6C\xFE"
472 "\x9F\x8C\x4A\x7D\x43\xD2\x91\x1F\xFF\x9F\xF6\x21\x1C\xB6\x46\x55"
473 "\x48\xCA\x38\xAB\xC1\xCD\x4D\x65\x5A\xAF\xA8\x6D\xDA\x6D\xF0\x34"
474 "\x10\x79\x14\x0D\xFA\xA2\x8C\x17\x54\xB4\x18\xD5\x7E\x5F\x90\x50"
475 "\x87\x84\xE7\xFB\xD7\x61\x53\x5D\xAB\x96\xC7\x6E\x7A\x42\xA0\xFC"
476 "\x07\xED\xB7\x5F\x80\xD9\x19\xFF\xFB\xFD\x9E\xC4\x73\x31\x62\x3D"
477 "\x6C\x9E\x15\x03\x62\xA5\x85\xCC\x19\x8E\x9D\x7F\xE3\x6D\xA8\x5D"
478 "\x96\xF5\xAC\x78\x3D\x81\x27\xE7\x29\xF1\x29\x1D\x09\xBB\x77\x86"
479 "\x6B\x65\x62\x88\xE1\x31\x1A\x22\xF7\xC5\xCE\x73\x65\x1C\xBE\xE7"
480 "\x63\xD3\xD3\x14\x63\x27\xAF\x28\xF3\x23\xB6\x76\xC1\xBD\x9D\x82"
481 "\xF4\x9B\x19\x7D\x2C\x57\xF0\xC2\x2A\x51\xAE\x95\x0D\x8C\x38\x54"
482 "\xF5\xC6\xA0\x51\xB7\x0E\xB9\xEC\xE7\x0D\x22\xF6\x1A\xD3\xFE\x16"
483 "\x21\x03\xB7\x0D\x85\xD3\x35\xC9\xDD\xE4\x59\x85\xBE\x7F\xA1\x75"
484 "\x02\x82\x01\x01" /* coefficient - integer of 257 bytes */
485 "\x00\xB9\x48\xD2\x54\x2F\x19\x54\x64\xAE\x62\x80\x61\x89\x80\xB4"
486 "\x48\x0B\x8D\x7E\x1B\x0F\x50\x08\x82\x3F\xED\x75\x84\xB7\x13\xE4"
487 "\xF8\x8D\xA8\xBB\x54\x21\x4C\x5A\x54\x07\x16\x4B\xB4\xA4\x9E\x30"
488 "\xBF\x7A\x30\x1B\x39\x60\xA3\x21\x53\xFB\xB0\xDC\x0F\x7C\x2C\xFB"
489 "\xAA\x95\x7D\x51\x39\x28\x33\x1F\x25\x31\x53\xF5\xD2\x64\x2B\xF2"
490 "\x1E\xB3\xC0\x6A\x0B\xC9\xA4\x42\x64\x5C\xFB\x15\xA3\xE8\x4C\x3A"
491 "\x9C\x3C\xBE\xA3\x39\x83\x23\xE3\x6D\x18\xCC\xC2\xDC\x63\x8D\xBA"
492 "\x98\xE0\xE0\x31\x4A\x2B\x37\x9C\x4D\x6B\xF3\x9F\x51\xE4\x43\x5C"
493 "\x83\x5F\xBF\x5C\xFE\x92\x45\x01\xAF\xF5\xC2\xF4\xB7\x56\x93\xA5"
494 "\xF4\xAA\x67\x3C\x48\x37\xBD\x9A\x3C\xFE\xA5\x9A\xB0\xD1\x6B\x85"
495 "\xDD\x81\xD4\xFA\xAD\x31\x83\xA8\x22\x9B\xFD\xB4\x61\xDC\x7A\x51"
496 "\x59\x62\x10\x1B\x7E\x44\xA3\xFE\x90\x51\x5A\x3E\x02\x87\xAD\xFA"
497 "\xDD\x0B\x1F\x3D\x35\xAF\xEE\x13\x85\x51\xA7\x42\xC0\xEE\x9E\x20"
498 "\xE9\xD0\x29\xB2\xE4\x21\xE4\x6D\x62\xB9\xF4\x48\x4A\xD8\x46\x8E"
499 "\x61\xA6\x2C\x5D\xDF\x8F\x97\x2B\x3A\x75\x1D\x83\x17\x6F\xC6\xB0"
500 "\xDE\xFC\x14\x25\x06\x5A\x60\xBB\xB8\x21\x89\xD1\xEF\x57\xF1\x71"
501 "\x3D",
502 .m = "\x54\x85\x9b\x34\x2c\x49\xea\x2a",
503 .c =
504 "\x5c\xce\x9c\xd7\x9a\x9e\xa1\xfe\x7a\x82\x3c\x68\x27\x98\xe3\x5d"
505 "\xd5\xd7\x07\x29\xf5\xfb\xc3\x1a\x7f\x63\x1e\x62\x31\x3b\x19\x87"
506 "\x79\x4f\xec\x7b\xf3\xcb\xea\x9b\x95\x52\x3a\x40\xe5\x87\x7b\x72"
507 "\xd1\x72\xc9\xfb\x54\x63\xd8\xc9\xd7\x2c\xfc\x7b\xc3\x14\x1e\xbc"
508 "\x18\xb4\x34\xa1\xbf\x14\xb1\x37\x31\x6e\xf0\x1b\x35\x19\x54\x07"
509 "\xf7\x99\xec\x3e\x63\xe2\xcd\x61\x28\x65\xc3\xcd\xb1\x38\x36\xa5"
510 "\xb2\xd7\xb0\xdc\x1f\xf5\xef\x19\xc7\x53\x32\x2d\x1c\x26\xda\xe4"
511 "\x0d\xd6\x90\x7e\x28\xd8\xdc\xe4\x61\x05\xd2\x25\x90\x01\xd3\x96"
512 "\x6d\xa6\xcf\x58\x20\xbb\x03\xf4\x01\xbc\x79\xb9\x18\xd8\xb8\xba"
513 "\xbd\x93\xfc\xf2\x62\x5d\x8c\x66\x1e\x0e\x84\x59\x93\xdd\xe2\x93"
514 "\xa2\x62\x7d\x08\x82\x7a\xdd\xfc\xb8\xbc\xc5\x4f\x9c\x4e\xbf\xb4"
515 "\xfc\xf4\xc5\x01\xe8\x00\x70\x4d\x28\x26\xcc\x2e\xfe\x0e\x58\x41"
516 "\x8b\xec\xaf\x7c\x4b\x54\xd0\xa0\x64\xf9\x32\xf4\x2e\x47\x65\x0a"
517 "\x67\x88\x39\x3a\xdb\xb2\xdb\x7b\xb5\xf6\x17\xa8\xd9\xc6\x5e\x28"
518 "\x13\x82\x8a\x99\xdb\x60\x08\xa5\x23\x37\xfa\x88\x90\x31\xc8\x9d"
519 "\x8f\xec\xfb\x85\x9f\xb1\xce\xa6\x24\x50\x46\x44\x47\xcb\x65\xd1"
520 "\xdf\xc0\xb1\x6c\x90\x1f\x99\x8e\x4d\xd5\x9e\x31\x07\x66\x87\xdf"
521 "\x01\xaa\x56\x3c\x71\xe0\x2b\x6f\x67\x3b\x23\xed\xc2\xbd\x03\x30"
522 "\x79\x76\x02\x10\x10\x98\x85\x8a\xff\xfd\x0b\xda\xa5\xd9\x32\x48"
523 "\x02\xa0\x0b\xb9\x2a\x8a\x18\xca\xc6\x8f\x3f\xbb\x16\xb2\xaa\x98"
524 "\x27\xe3\x60\x43\xed\x15\x70\xd4\x57\x15\xfe\x19\xd4\x9b\x13\x78"
525 "\x8a\xf7\x21\xf1\xa2\xa2\x2d\xb3\x09\xcf\x44\x91\x6e\x08\x3a\x30"
526 "\x81\x3e\x90\x93\x8a\x67\x33\x00\x59\x54\x9a\x25\xd3\x49\x8e\x9f"
527 "\xc1\x4b\xe5\x86\xf3\x50\x4c\xbc\xc5\xd3\xf5\x3a\x54\xe1\x36\x3f"
528 "\xe2\x5a\xb4\x37\xc0\xeb\x70\x35\xec\xf6\xb7\xe8\x44\x3b\x7b\xf3"
529 "\xf1\xf2\x1e\xdb\x60\x7d\xd5\xbe\xf0\x71\x34\x90\x4c\xcb\xd4\x35"
530 "\x51\xc7\xdd\xd8\xc9\x81\xf5\x5d\x57\x46\x2c\xb1\x7b\x9b\xaa\xcb"
531 "\xd1\x22\x25\x49\x44\xa3\xd4\x6b\x29\x7b\xd8\xb2\x07\x93\xbf\x3d"
532 "\x52\x49\x84\x79\xef\xb8\xe5\xc4\xad\xca\xa8\xc6\xf6\xa6\x76\x70"
533 "\x5b\x0b\xe5\x83\xc6\x0e\xef\x55\xf2\xe7\xff\x04\xea\xe6\x13\xbe"
534 "\x40\xe1\x40\x45\x48\x66\x75\x31\xae\x35\x64\x91\x11\x6f\xda\xee"
535 "\x26\x86\x45\x6f\x0b\xd5\x9f\x03\xb1\x65\x5b\xdb\xa4\xe4\xf9\x45",
536 .key_len = 2349,
537 .m_size = 8,
538 .c_size = 512,
539 }
540};
541
542#define DH_TEST_VECTORS 2
543
544struct kpp_testvec dh_tv_template[] = {
545 {
546 .secret =
547#ifdef __LITTLE_ENDIAN
548 "\x01\x00" /* type */
549 "\x11\x02" /* len */
550 "\x00\x01\x00\x00" /* key_size */
551 "\x00\x01\x00\x00" /* p_size */
552 "\x01\x00\x00\x00" /* g_size */
553#else
554 "\x00\x01" /* type */
555 "\x02\x11" /* len */
556 "\x00\x00\x01\x00" /* key_size */
557 "\x00\x00\x01\x00" /* p_size */
558 "\x00\x00\x00\x01" /* g_size */
559#endif
560 /* xa */
561 "\x44\xc1\x48\x36\xa7\x2b\x6f\x4e\x43\x03\x68\xad\x31\x00\xda\xf3"
562 "\x2a\x01\xa8\x32\x63\x5f\x89\x32\x1f\xdf\x4c\xa1\x6a\xbc\x10\x15"
563 "\x90\x35\xc9\x26\x41\xdf\x7b\xaa\x56\x56\x3d\x85\x44\xb5\xc0\x8e"
564 "\x37\x83\x06\x50\xb3\x5f\x0e\x28\x2c\xd5\x46\x15\xe3\xda\x7d\x74"
565 "\x87\x13\x91\x4f\xd4\x2d\xf6\xc7\x5e\x14\x2c\x11\xc2\x26\xb4\x3a"
566 "\xe3\xb2\x36\x20\x11\x3b\x22\xf2\x06\x65\x66\xe2\x57\x58\xf8\x22"
567 "\x1a\x94\xbd\x2b\x0e\x8c\x55\xad\x61\x23\x45\x2b\x19\x1e\x63\x3a"
568 "\x13\x61\xe3\xa0\x79\x70\x3e\x6d\x98\x32\xbc\x7f\x82\xc3\x11\xd8"
569 "\xeb\x53\xb5\xfc\xb5\xd5\x3c\x4a\xea\x92\x3e\x01\xce\x15\x65\xd4"
570 "\xaa\x85\xc1\x11\x90\x83\x31\x6e\xfe\xe7\x7f\x7d\xed\xab\xf9\x29"
571 "\xf8\xc7\xf1\x68\xc6\xb7\xe4\x1f\x2f\x28\xa0\xc9\x1a\x50\x64\x29"
572 "\x4b\x01\x6d\x1a\xda\x46\x63\x21\x07\x40\x8c\x8e\x4c\x6f\xb5\xe5"
573 "\x12\xf3\xc2\x1b\x48\x27\x5e\x27\x01\xb1\xaa\xed\x68\x9b\x83\x18"
574 "\x8f\xb1\xeb\x1f\x04\xd1\x3c\x79\xed\x4b\xf7\x0a\x33\xdc\xe0\xc6"
575 "\xd8\x02\x51\x59\x00\x74\x30\x07\x4c\x2d\xac\xe4\x13\xf1\x80\xf0"
576 "\xce\xfa\xff\xa9\xce\x29\x46\xdd\x9d\xad\xd1\xc3\xc6\x58\x1a\x63"
577 /* p */
578 "\xb9\x36\x3a\xf1\x82\x1f\x60\xd3\x22\x47\xb8\xbc\x2d\x22\x6b\x81"
579 "\x7f\xe8\x20\x06\x09\x23\x73\x49\x9a\x59\x8b\x35\x25\xf8\x31\xbc"
580 "\x7d\xa8\x1c\x9d\x56\x0d\x1a\xf7\x4b\x4f\x96\xa4\x35\x77\x6a\x89"
581 "\xab\x42\x00\x49\x21\x71\xed\x28\x16\x1d\x87\x5a\x10\xa7\x9c\x64"
582 "\x94\xd4\x87\x3d\x28\xef\x44\xfe\x4b\xe2\xb4\x15\x8c\x82\xa6\xf3"
583 "\x50\x5f\xa8\xe8\xa2\x60\xe7\x00\x86\x78\x05\xd4\x78\x19\xa1\x98"
584 "\x62\x4e\x4a\x00\x78\x56\x96\xe6\xcf\xd7\x10\x1b\x74\x5d\xd0\x26"
585 "\x61\xdb\x6b\x32\x09\x51\xd8\xa5\xfd\x54\x16\x71\x01\xb3\x39\xe6"
586 "\x4e\x69\xb1\xd7\x06\x8f\xd6\x1e\xdc\x72\x25\x26\x74\xc8\x41\x06"
587 "\x5c\xd1\x26\x5c\xb0\x2f\xf9\x59\x13\xc1\x2a\x0f\x78\xea\x7b\xf7"
588 "\xbd\x59\xa0\x90\x1d\xfc\x33\x5b\x4c\xbf\x05\x9c\x3a\x3f\x69\xa2"
589 "\x45\x61\x4e\x10\x6a\xb3\x17\xc5\x68\x30\xfb\x07\x5f\x34\xc6\xfb"
590 "\x73\x07\x3c\x70\xf6\xae\xe7\x72\x84\xc3\x18\x81\x8f\xe8\x11\x1f"
591 "\x3d\x83\x83\x01\x2a\x14\x73\xbf\x32\x32\x2e\xc9\x4d\xdb\x2a\xca"
592 "\xee\x71\xf9\xda\xad\xe8\x82\x0b\x4d\x0c\x1f\xb6\x1d\xef\x00\x67"
593 "\x74\x3d\x95\xe0\xb7\xc4\x30\x8a\x24\x87\x12\x47\x27\x70\x0d\x73"
594 /* g */
595 "\x02",
596 .b_public =
597 "\x2a\x67\x5c\xfd\x63\x5d\xc0\x97\x0a\x8b\xa2\x1f\xf8\x8a\xcb\x54"
598 "\xca\x2f\xd3\x49\x3f\x01\x8e\x87\xfe\xcc\x94\xa0\x3e\xd4\x26\x79"
599 "\x9a\x94\x3c\x11\x81\x58\x5c\x60\x3d\xf5\x98\x90\x89\x64\x62\x1f"
600 "\xbd\x05\x6d\x2b\xcd\x84\x40\x9b\x4a\x1f\xe0\x19\xf1\xca\x20\xb3"
601 "\x4e\xa0\x4f\x15\xcc\xa5\xfe\xa5\xb4\xf5\x0b\x18\x7a\x5a\x37\xaa"
602 "\x58\x00\x19\x7f\xe2\xa3\xd9\x1c\x44\x57\xcc\xde\x2e\xc1\x38\xea"
603 "\xeb\xe3\x90\x40\xc4\x6c\xf7\xcd\xe9\x22\x50\x71\xf5\x7c\xdb\x37"
604 "\x0e\x80\xc3\xed\x7e\xb1\x2b\x2f\xbe\x71\xa6\x11\xa5\x9d\xf5\x39"
605 "\xf1\xa2\xe5\x85\xbc\x25\x91\x4e\x84\x8d\x26\x9f\x4f\xe6\x0f\xa6"
606 "\x2b\x6b\xf9\x0d\xaf\x6f\xbb\xfa\x2d\x79\x15\x31\x57\xae\x19\x60"
607 "\x22\x0a\xf5\xfd\x98\x0e\xbf\x5d\x49\x75\x58\x37\xbc\x7f\xf5\x21"
608 "\x56\x1e\xd5\xb3\x50\x0b\xca\x96\xf3\xd1\x3f\xb3\x70\xa8\x6d\x63"
609 "\x48\xfb\x3d\xd7\x29\x91\x45\xb5\x48\xcd\xb6\x78\x30\xf2\x3f\x1e"
610 "\xd6\x22\xd6\x35\x9b\xf9\x1f\x85\xae\xab\x4b\xd7\xe0\xc7\x86\x67"
611 "\x3f\x05\x7f\xa6\x0d\x2f\x0d\xbf\x53\x5f\x4d\x2c\x6d\x5e\x57\x40"
612 "\x30\x3a\x23\x98\xf9\xb4\x32\xf5\x32\x83\xdd\x0b\xae\x33\x97\x2f",
613 .expected_a_public =
614 "\x5c\x24\xdf\xeb\x5b\x4b\xf8\xc5\xef\x39\x48\x82\xe0\x1e\x62\xee"
615 "\x8a\xae\xdf\x93\x6c\x2b\x16\x95\x92\x16\x3f\x16\x7b\x75\x03\x85"
616 "\xd9\xf1\x69\xc2\x14\x87\x45\xfc\xa4\x19\xf6\xf0\xa4\xf3\xec\xd4"
617 "\x6c\x5c\x03\x3b\x94\xc2\x2f\x92\xe4\xce\xb3\xe4\x72\xe8\x17\xe6"
618 "\x23\x7e\x00\x01\x09\x59\x13\xbf\xc1\x2f\x99\xa9\x07\xaa\x02\x23"
619 "\x4a\xca\x39\x4f\xbc\xec\x0f\x27\x4f\x19\x93\x6c\xb9\x30\x52\xfd"
620 "\x2b\x9d\x86\xf1\x06\x1e\xb6\x56\x27\x4a\xc9\x8a\xa7\x8a\x48\x5e"
621 "\xb5\x60\xcb\xdf\xff\x03\x26\x10\xbf\x90\x8f\x46\x60\xeb\x9b\x9a"
622 "\xd6\x6f\x44\x91\x03\x92\x18\x2c\x96\x5e\x40\x19\xfb\xf4\x4f\x3a"
623 "\x02\x7b\xaf\xcc\x22\x20\x79\xb9\xf8\x9f\x8f\x85\x6b\xec\x44\xbb"
624 "\xe6\xa8\x8e\xb1\xe8\x2c\xee\x64\xee\xf8\xbd\x00\xf3\xe2\x2b\x93"
625 "\xcd\xe7\xc4\xdf\xc9\x19\x46\xfe\xb6\x07\x73\xc1\x8a\x64\x79\x26"
626 "\xe7\x30\xad\x2a\xdf\xe6\x8f\x59\xf5\x81\xbf\x4a\x29\x91\xe7\xb7"
627 "\xcf\x48\x13\x27\x75\x79\x40\xd9\xd6\x32\x52\x4e\x6a\x86\xae\x6f"
628 "\xc2\xbf\xec\x1f\xc2\x69\xb2\xb6\x59\xe5\xa5\x17\xa4\x77\xb7\x62"
629 "\x46\xde\xe8\xd2\x89\x78\x9a\xef\xa3\xb5\x8f\x26\xec\x80\xda\x39",
630 .expected_ss =
631 "\x8f\xf3\xac\xa2\xea\x22\x11\x5c\x45\x65\x1a\x77\x75\x2e\xcf\x46"
632 "\x23\x14\x1e\x67\x53\x4d\x35\xb0\x38\x1d\x4e\xb9\x41\x9a\x21\x24"
633 "\x6e\x9f\x40\xfe\x90\x51\xb1\x06\xa4\x7b\x87\x17\x2f\xe7\x5e\x22"
634 "\xf0\x7b\x54\x84\x0a\xac\x0a\x90\xd2\xd7\xe8\x7f\xe7\xe3\x30\x75"
635 "\x01\x1f\x24\x75\x56\xbe\xcc\x8d\x1e\x68\x0c\x41\x72\xd3\xfa\xbb"
636 "\xe5\x9c\x60\xc7\x28\x77\x0c\xbe\x89\xab\x08\xd6\x21\xe7\x2e\x1a"
637 "\x58\x7a\xca\x4f\x22\xf3\x2b\x30\xfd\xf4\x98\xc1\xa3\xf8\xf6\xcc"
638 "\xa9\xe4\xdb\x5b\xee\xd5\x5c\x6f\x62\x4c\xd1\x1a\x02\x2a\x23\xe4"
639 "\xb5\x57\xf3\xf9\xec\x04\x83\x54\xfe\x08\x5e\x35\xac\xfb\xa8\x09"
640 "\x82\x32\x60\x11\xb2\x16\x62\x6b\xdf\xda\xde\x9c\xcb\x63\x44\x6c"
641 "\x59\x26\x6a\x8f\xb0\x24\xcb\xa6\x72\x48\x1e\xeb\xe0\xe1\x09\x44"
642 "\xdd\xee\x66\x6d\x84\xcf\xa5\xc1\xb8\x36\x74\xd3\x15\x96\xc3\xe4"
643 "\xc6\x5a\x4d\x23\x97\x0c\x5c\xcb\xa9\xf5\x29\xc2\x0e\xff\x93\x82"
644 "\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11"
645 "\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50"
646 "\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17",
647 .secret_size = 529,
648 .b_public_size = 256,
649 .expected_a_public_size = 256,
650 .expected_ss_size = 256,
651 },
652 {
653 .secret =
654#ifdef __LITTLE_ENDIAN
655 "\x01\x00" /* type */
656 "\x11\x02" /* len */
657 "\x00\x01\x00\x00" /* key_size */
658 "\x00\x01\x00\x00" /* p_size */
659 "\x01\x00\x00\x00" /* g_size */
660#else
661 "\x00\x01" /* type */
662 "\x02\x11" /* len */
663 "\x00\x00\x01\x00" /* key_size */
664 "\x00\x00\x01\x00" /* p_size */
665 "\x00\x00\x00\x01" /* g_size */
666#endif
667 /* xa */
668 "\x4d\x75\xa8\x6e\xba\x23\x3a\x0c\x63\x56\xc8\xc9\x5a\xa7\xd6\x0e"
669 "\xed\xae\x40\x78\x87\x47\x5f\xe0\xa7\x7b\xba\x84\x88\x67\x4e\xe5"
670 "\x3c\xcc\x5c\x6a\xe7\x4a\x20\xec\xbe\xcb\xf5\x52\x62\x9f\x37\x80"
671 "\x0c\x72\x7b\x83\x66\xa4\xf6\x7f\x95\x97\x1c\x6a\x5c\x7e\xf1\x67"
672 "\x37\xb3\x93\x39\x3d\x0b\x55\x35\xd9\xe5\x22\x04\x9f\xf8\xc1\x04"
673 "\xce\x13\xa5\xac\xe1\x75\x05\xd1\x2b\x53\xa2\x84\xef\xb1\x18\xf4"
674 "\x66\xdd\xea\xe6\x24\x69\x5a\x49\xe0\x7a\xd8\xdf\x1b\xb7\xf1\x6d"
675 "\x9b\x50\x2c\xc8\x1c\x1c\xa3\xb4\x37\xfb\x66\x3f\x67\x71\x73\xa9"
676 "\xff\x5f\xd9\xa2\x25\x6e\x25\x1b\x26\x54\xbf\x0c\xc6\xdb\xea\x0a"
677 "\x52\x6c\x16\x7c\x27\x68\x15\x71\x58\x73\x9d\xe6\xc2\x80\xaa\x97"
678 "\x31\x66\xfb\xa6\xfb\xfd\xd0\x9c\x1d\xbe\x81\x48\xf5\x9a\x32\xf1"
679 "\x69\x62\x18\x78\xae\x72\x36\xe6\x94\x27\xd1\xff\x18\x4f\x28\x6a"
680 "\x16\xbd\x6a\x60\xee\xe5\xf9\x6d\x16\xe4\xb8\xa6\x41\x9b\x23\x7e"
681 "\xf7\x9d\xd1\x1d\x03\x15\x66\x3a\xcf\xb6\x2c\x13\x96\x2c\x52\x21"
682 "\xe4\x2d\x48\x7a\x8a\x5d\xb2\x88\xed\x98\x61\x79\x8b\x6a\x1e\x5f"
683 "\xd0\x8a\x2d\x99\x5a\x2b\x0f\xbc\xef\x53\x8f\x32\xc1\xa2\x99\x26"
684 /* p */
685 "\xb9\x36\x3a\xf1\x82\x1f\x60\xd3\x22\x47\xb8\xbc\x2d\x22\x6b\x81"
686 "\x7f\xe8\x20\x06\x09\x23\x73\x49\x9a\x59\x8b\x35\x25\xf8\x31\xbc"
687 "\x7d\xa8\x1c\x9d\x56\x0d\x1a\xf7\x4b\x4f\x96\xa4\x35\x77\x6a\x89"
688 "\xab\x42\x00\x49\x21\x71\xed\x28\x16\x1d\x87\x5a\x10\xa7\x9c\x64"
689 "\x94\xd4\x87\x3d\x28\xef\x44\xfe\x4b\xe2\xb4\x15\x8c\x82\xa6\xf3"
690 "\x50\x5f\xa8\xe8\xa2\x60\xe7\x00\x86\x78\x05\xd4\x78\x19\xa1\x98"
691 "\x62\x4e\x4a\x00\x78\x56\x96\xe6\xcf\xd7\x10\x1b\x74\x5d\xd0\x26"
692 "\x61\xdb\x6b\x32\x09\x51\xd8\xa5\xfd\x54\x16\x71\x01\xb3\x39\xe6"
693 "\x4e\x69\xb1\xd7\x06\x8f\xd6\x1e\xdc\x72\x25\x26\x74\xc8\x41\x06"
694 "\x5c\xd1\x26\x5c\xb0\x2f\xf9\x59\x13\xc1\x2a\x0f\x78\xea\x7b\xf7"
695 "\xbd\x59\xa0\x90\x1d\xfc\x33\x5b\x4c\xbf\x05\x9c\x3a\x3f\x69\xa2"
696 "\x45\x61\x4e\x10\x6a\xb3\x17\xc5\x68\x30\xfb\x07\x5f\x34\xc6\xfb"
697 "\x73\x07\x3c\x70\xf6\xae\xe7\x72\x84\xc3\x18\x81\x8f\xe8\x11\x1f"
698 "\x3d\x83\x83\x01\x2a\x14\x73\xbf\x32\x32\x2e\xc9\x4d\xdb\x2a\xca"
699 "\xee\x71\xf9\xda\xad\xe8\x82\x0b\x4d\x0c\x1f\xb6\x1d\xef\x00\x67"
700 "\x74\x3d\x95\xe0\xb7\xc4\x30\x8a\x24\x87\x12\x47\x27\x70\x0d\x73"
701 /* g */
702 "\x02",
703 .b_public =
704 "\x99\x4d\xd9\x01\x84\x8e\x4a\x5b\xb8\xa5\x64\x8c\x6c\x00\x5c\x0e"
705 "\x1e\x1b\xee\x5d\x9f\x53\xe3\x16\x70\x01\xed\xbf\x4f\x14\x36\x6e"
706 "\xe4\x43\x45\x43\x49\xcc\xb1\xb0\x2a\xc0\x6f\x22\x55\x42\x17\x94"
707 "\x18\x83\xd7\x2a\x5c\x51\x54\xf8\x4e\x7c\x10\xda\x76\x68\x57\x77"
708 "\x1e\x62\x03\x30\x04\x7b\x4c\x39\x9c\x54\x01\x54\xec\xef\xb3\x55"
709 "\xa4\xc0\x24\x6d\x3d\xbd\xcc\x46\x5b\x00\x96\xc7\xea\x93\xd1\x3f"
710 "\xf2\x6a\x72\xe3\xf2\xc1\x92\x24\x5b\xda\x48\x70\x2c\xa9\x59\x97"
711 "\x19\xb1\xd6\x54\xb3\x9c\x2e\xb0\x63\x07\x9b\x5e\xac\xb5\xf2\xb1"
712 "\x5b\xf8\xf3\xd7\x2d\x37\x9b\x68\x6c\xf8\x90\x07\xbc\x37\x9a\xa5"
713 "\xe2\x91\x12\x25\x47\x77\xe3\x3d\xb2\x95\x69\x44\x0b\x91\x1e\xaf"
714 "\x7c\x8c\x7c\x34\x41\x6a\xab\x60\x6e\xc6\x52\xec\x7e\x94\x0a\x37"
715 "\xec\x98\x90\xdf\x3f\x02\xbd\x23\x52\xdd\xd9\xe5\x31\x80\x74\x25"
716 "\xb6\xd2\xd3\xcc\xd5\xcc\x6d\xf9\x7e\x4d\x78\xab\x77\x51\xfa\x77"
717 "\x19\x94\x49\x8c\x05\xd4\x75\xed\xd2\xb3\x64\x57\xe0\x52\x99\xc0"
718 "\x83\xe3\xbb\x5e\x2b\xf1\xd2\xc0\xb1\x37\x36\x0b\x7c\xb5\x63\x96"
719 "\x8e\xde\x04\x23\x11\x95\x62\x11\x9a\xce\x6f\x63\xc8\xd5\xd1\x8f",
720 .expected_a_public =
721 "\x90\x89\xe4\x82\xd6\x0a\xcf\x1a\xae\xce\x1b\x66\xa7\x19\x71\x18"
722 "\x8f\x95\x4b\x5b\x80\x45\x4a\x5a\x43\x99\x4d\x37\xcf\xa3\xa7\x28"
723 "\x9c\xc7\x73\xf1\xb2\x17\xf6\x99\xe3\x6b\x56\xcb\x3e\x35\x60\x7d"
724 "\x65\xc7\x84\x6b\x3e\x60\xee\xcd\xd2\x70\xe7\xc9\x32\x1c\xf0\xb4"
725 "\xf9\x52\xd9\x88\x75\xfd\x40\x2c\xa7\xbe\x19\x1c\x0a\xae\x93\xe1"
726 "\x71\xc7\xcd\x4f\x33\x5c\x10\x7d\x39\x56\xfc\x73\x84\xb2\x67\xc3"
727 "\x77\x26\x20\x97\x2b\xf8\x13\x43\x93\x9c\x9a\xa4\x08\xc7\x34\x83"
728 "\xe6\x98\x61\xe7\x16\x30\x2c\xb1\xdb\x2a\xb2\xcc\xc3\x02\xa5\x3c"
729 "\x71\x50\x14\x83\xc7\xbb\xa4\xbe\x98\x1b\xfe\xcb\x43\xe9\x97\x62"
730 "\xd6\xf0\x8c\xcb\x1c\xba\x1e\xa8\xa6\xa6\x50\xfc\x85\x7d\x47\xbf"
731 "\xf4\x3e\x23\xd3\x5f\xb2\x71\x3e\x40\x94\xaa\x87\x83\x2c\x6c\x8e"
732 "\x60\xfd\xdd\xf7\xf4\x76\x03\xd3\x1d\xec\x18\x51\xa3\xf2\x44\x1a"
733 "\x3f\xb4\x7c\x18\x0d\x68\x65\x92\x54\x0d\x2d\x81\x16\xf1\x84\x66"
734 "\x89\x92\xd0\x1a\x5e\x1f\x42\x46\x5b\xe5\x83\x86\x80\xd9\xcd\x3a"
735 "\x5a\x2f\xb9\x59\x9b\xe4\x43\x84\x64\xf3\x09\x1a\x0a\xa2\x64\x0f"
736 "\x77\x4e\x8d\x8b\xe6\x88\xd1\xfc\xaf\x8f\xdf\x1d\xbc\x31\xb3\xbd",
737 .expected_ss =
738 "\x34\xc3\x35\x14\x88\x46\x26\x23\x97\xbb\xdd\x28\x5c\x94\xf6\x47"
739 "\xca\xb3\x19\xaf\xca\x44\x9b\xc2\x7d\x89\xfd\x96\x14\xfd\x6d\x58"
740 "\xd8\xc4\x6b\x61\x2a\x0d\xf2\x36\x45\xc8\xe4\xa4\xed\x81\x53\x81"
741 "\x66\x1e\xe0\x5a\xb1\x78\x2d\x0b\x5c\xb4\xd1\xfc\x90\xc6\x9c\xdb"
742 "\x5a\x30\x0b\x14\x7d\xbe\xb3\x7d\xb1\xb2\x76\x3c\x6c\xef\x74\x6b"
743 "\xe7\x1f\x64\x0c\xab\x65\xe1\x76\x5c\x3d\x83\xb5\x8a\xfb\xaf\x0f"
744 "\xf2\x06\x14\x8f\xa0\xf6\xc1\x89\x78\xf2\xba\x72\x73\x3c\xf7\x76"
745 "\x21\x67\xbc\x24\x31\xb8\x09\x65\x0f\x0c\x02\x32\x4a\x98\x14\xfc"
746 "\x72\x2c\x25\x60\x68\x5f\x2f\x30\x1e\x5b\xf0\x3b\xd1\xa2\x87\xa0"
747 "\x54\xdf\xdb\xc0\xee\x0a\x0f\x47\xc9\x90\x20\x2c\xf9\xe3\x52\xad"
748 "\x27\x65\x8d\x54\x8d\xa8\xa1\xf3\xed\x15\xd4\x94\x28\x90\x31\x93"
749 "\x1b\xc0\x51\xbb\x43\x5d\x76\x3b\x1d\x2a\x71\x50\xea\x5d\x48\x94"
750 "\x7f\x6f\xf1\x48\xdb\x30\xe5\xae\x64\x79\xd9\x7a\xdb\xc6\xff\xd8"
751 "\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a"
752 "\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee"
753 "\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd",
754 .secret_size = 529,
755 .b_public_size = 256,
756 .expected_a_public_size = 256,
757 .expected_ss_size = 256,
758 }
759};
760
761#ifdef CONFIG_CRYPTO_FIPS
762#define ECDH_TEST_VECTORS 1
763#else
764#define ECDH_TEST_VECTORS 2
765#endif
766struct kpp_testvec ecdh_tv_template[] = {
767 {
768#ifndef CONFIG_CRYPTO_FIPS
769 .secret =
770#ifdef __LITTLE_ENDIAN
771 "\x02\x00" /* type */
772 "\x20\x00" /* len */
773 "\x01\x00" /* curve_id */
774 "\x18\x00" /* key_size */
775#else
776 "\x00\x02" /* type */
777 "\x00\x20" /* len */
778 "\x00\x01" /* curve_id */
779 "\x00\x18" /* key_size */
780#endif
781 "\xb5\x05\xb1\x71\x1e\xbf\x8c\xda"
782 "\x4e\x19\x1e\x62\x1f\x23\x23\x31"
783 "\x36\x1e\xd3\x84\x2f\xcc\x21\x72",
784 .b_public =
785 "\xc3\xba\x67\x4b\x71\xec\xd0\x76"
786 "\x7a\x99\x75\x64\x36\x13\x9a\x94"
787 "\x5d\x8b\xdc\x60\x90\x91\xfd\x3f"
788 "\xb0\x1f\x8a\x0a\x68\xc6\x88\x6e"
789 "\x83\x87\xdd\x67\x09\xf8\x8d\x96"
790 "\x07\xd6\xbd\x1c\xe6\x8d\x9d\x67",
791 .expected_a_public =
792 "\x1a\x04\xdb\xa5\xe1\xdd\x4e\x79"
793 "\xa3\xe6\xef\x0e\x5c\x80\x49\x85"
794 "\xfa\x78\xb4\xef\x49\xbd\x4c\x7c"
795 "\x22\x90\x21\x02\xf9\x1b\x81\x5d"
796 "\x0c\x8a\xa8\x98\xd6\x27\x69\x88"
797 "\x5e\xbc\x94\xd8\x15\x9e\x21\xce",
798 .expected_ss =
799 "\xf4\x57\xcc\x4f\x1f\x4e\x31\xcc"
800 "\xe3\x40\x60\xc8\x06\x93\xc6\x2e"
801 "\x99\x80\x81\x28\xaf\xc5\x51\x74",
802 .secret_size = 32,
803 .b_public_size = 48,
804 .expected_a_public_size = 48,
805 .expected_ss_size = 24
806 }, {
807#endif
808 .secret =
809#ifdef __LITTLE_ENDIAN
810 "\x02\x00" /* type */
811 "\x28\x00" /* len */
812 "\x02\x00" /* curve_id */
813 "\x20\x00" /* key_size */
814#else
815 "\x00\x02" /* type */
816 "\x00\x28" /* len */
817 "\x00\x02" /* curve_id */
818 "\x00\x20" /* key_size */
819#endif
820 "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
821 "\xf6\x62\x1b\x6e\x43\x84\x3a\xa3"
822 "\x8b\xe0\x86\xc3\x20\x19\xda\x92"
823 "\x50\x53\x03\xe1\xc0\xea\xb8\x82",
824 .expected_a_public =
825 "\x1a\x7f\xeb\x52\x00\xbd\x3c\x31"
826 "\x7d\xb6\x70\xc1\x86\xa6\xc7\xc4"
827 "\x3b\xc5\x5f\x6c\x6f\x58\x3c\xf5"
828 "\xb6\x63\x82\x77\x33\x24\xa1\x5f"
829 "\x6a\xca\x43\x6f\xf7\x7e\xff\x02"
830 "\x37\x08\xcc\x40\x5e\x7a\xfd\x6a"
831 "\x6a\x02\x6e\x41\x87\x68\x38\x77"
832 "\xfa\xa9\x44\x43\x2d\xef\x09\xdf",
833 .expected_ss =
834 "\xea\x17\x6f\x7e\x6e\x57\x26\x38"
835 "\x8b\xfb\x41\xeb\xba\xc8\x6d\xa5"
836 "\xa8\x72\xd1\xff\xc9\x47\x3d\xaa"
837 "\x58\x43\x9f\x34\x0f\x8c\xf3\xc9",
838 .b_public =
839 "\xcc\xb4\xda\x74\xb1\x47\x3f\xea"
840 "\x6c\x70\x9e\x38\x2d\xc7\xaa\xb7"
841 "\x29\xb2\x47\x03\x19\xab\xdd\x34"
842 "\xbd\xa8\x2c\x93\xe1\xa4\x74\xd9"
843 "\x64\x63\xf7\x70\x20\x2f\xa4\xe6"
844 "\x9f\x4a\x38\xcc\xc0\x2c\x49\x2f"
845 "\xb1\x32\xbb\xaf\x22\x61\xda\xcb"
846 "\x6f\xdb\xa9\xaa\xfc\x77\x81\xf3",
847 .secret_size = 40,
848 .b_public_size = 64,
849 .expected_a_public_size = 64,
850 .expected_ss_size = 32
330 } 851 }
331}; 852};
332 853
@@ -376,6 +897,131 @@ static struct hash_testvec md4_tv_template [] = {
376 }, 897 },
377}; 898};
378 899
900#define SHA3_224_TEST_VECTORS 3
901static struct hash_testvec sha3_224_tv_template[] = {
902 {
903 .plaintext = "",
904 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
905 "\x3b\x6e\x15\x45\x4f\x0e\xb1\xab"
906 "\xd4\x59\x7f\x9a\x1b\x07\x8e\x3f"
907 "\x5b\x5a\x6b\xc7",
908 }, {
909 .plaintext = "a",
910 .psize = 1,
911 .digest = "\x9e\x86\xff\x69\x55\x7c\xa9\x5f"
912 "\x40\x5f\x08\x12\x69\x68\x5b\x38"
913 "\xe3\xa8\x19\xb3\x09\xee\x94\x2f"
914 "\x48\x2b\x6a\x8b",
915 }, {
916 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
917 "jklmklmnlmnomnopnopq",
918 .psize = 56,
919 .digest = "\x8a\x24\x10\x8b\x15\x4a\xda\x21"
920 "\xc9\xfd\x55\x74\x49\x44\x79\xba"
921 "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea"
922 "\xd0\xfc\xce\x33",
923 },
924};
925
926#define SHA3_256_TEST_VECTORS 3
927static struct hash_testvec sha3_256_tv_template[] = {
928 {
929 .plaintext = "",
930 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
931 "\x51\xc1\x47\x56\xa0\x61\xd6\x62"
932 "\xf5\x80\xff\x4d\xe4\x3b\x49\xfa"
933 "\x82\xd8\x0a\x4b\x80\xf8\x43\x4a",
934 }, {
935 .plaintext = "a",
936 .psize = 1,
937 .digest = "\x80\x08\x4b\xf2\xfb\xa0\x24\x75"
938 "\x72\x6f\xeb\x2c\xab\x2d\x82\x15"
939 "\xea\xb1\x4b\xc6\xbd\xd8\xbf\xb2"
940 "\xc8\x15\x12\x57\x03\x2e\xcd\x8b",
941 }, {
942 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
943 "jklmklmnlmnomnopnopq",
944 .psize = 56,
945 .digest = "\x41\xc0\xdb\xa2\xa9\xd6\x24\x08"
946 "\x49\x10\x03\x76\xa8\x23\x5e\x2c"
947 "\x82\xe1\xb9\x99\x8a\x99\x9e\x21"
948 "\xdb\x32\xdd\x97\x49\x6d\x33\x76",
949 },
950};
951
952
953#define SHA3_384_TEST_VECTORS 3
954static struct hash_testvec sha3_384_tv_template[] = {
955 {
956 .plaintext = "",
957 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
958 "\x01\x10\x7d\x85\x2e\x4c\x24\x85"
959 "\xc5\x1a\x50\xaa\xaa\x94\xfc\x61"
960 "\x99\x5e\x71\xbb\xee\x98\x3a\x2a"
961 "\xc3\x71\x38\x31\x26\x4a\xdb\x47"
962 "\xfb\x6b\xd1\xe0\x58\xd5\xf0\x04",
963 }, {
964 .plaintext = "a",
965 .psize = 1,
966 .digest = "\x18\x15\xf7\x74\xf3\x20\x49\x1b"
967 "\x48\x56\x9e\xfe\xc7\x94\xd2\x49"
968 "\xee\xb5\x9a\xae\x46\xd2\x2b\xf7"
969 "\x7d\xaf\xe2\x5c\x5e\xdc\x28\xd7"
970 "\xea\x44\xf9\x3e\xe1\x23\x4a\xa8"
971 "\x8f\x61\xc9\x19\x12\xa4\xcc\xd9",
972 }, {
973 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
974 "jklmklmnlmnomnopnopq",
975 .psize = 56,
976 .digest = "\x99\x1c\x66\x57\x55\xeb\x3a\x4b"
977 "\x6b\xbd\xfb\x75\xc7\x8a\x49\x2e"
978 "\x8c\x56\xa2\x2c\x5c\x4d\x7e\x42"
979 "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a"
980 "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1"
981 "\x9e\xef\x51\xac\xd0\x65\x7c\x22",
982 },
983};
984
985
986#define SHA3_512_TEST_VECTORS 3
987static struct hash_testvec sha3_512_tv_template[] = {
988 {
989 .plaintext = "",
990 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
991 "\xc8\xb5\x67\xdc\x18\x5a\x75\x6e"
992 "\x97\xc9\x82\x16\x4f\xe2\x58\x59"
993 "\xe0\xd1\xdc\xc1\x47\x5c\x80\xa6"
994 "\x15\xb2\x12\x3a\xf1\xf5\xf9\x4c"
995 "\x11\xe3\xe9\x40\x2c\x3a\xc5\x58"
996 "\xf5\x00\x19\x9d\x95\xb6\xd3\xe3"
997 "\x01\x75\x85\x86\x28\x1d\xcd\x26",
998 }, {
999 .plaintext = "a",
1000 .psize = 1,
1001 .digest = "\x69\x7f\x2d\x85\x61\x72\xcb\x83"
1002 "\x09\xd6\xb8\xb9\x7d\xac\x4d\xe3"
1003 "\x44\xb5\x49\xd4\xde\xe6\x1e\xdf"
1004 "\xb4\x96\x2d\x86\x98\xb7\xfa\x80"
1005 "\x3f\x4f\x93\xff\x24\x39\x35\x86"
1006 "\xe2\x8b\x5b\x95\x7a\xc3\xd1\xd3"
1007 "\x69\x42\x0c\xe5\x33\x32\x71\x2f"
1008 "\x99\x7b\xd3\x36\xd0\x9a\xb0\x2a",
1009 }, {
1010 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkl"
1011 "jklmklmnlmnomnopnopq",
1012 .psize = 56,
1013 .digest = "\x04\xa3\x71\xe8\x4e\xcf\xb5\xb8"
1014 "\xb7\x7c\xb4\x86\x10\xfc\xa8\x18"
1015 "\x2d\xd4\x57\xce\x6f\x32\x6a\x0f"
1016 "\xd3\xd7\xec\x2f\x1e\x91\x63\x6d"
1017 "\xee\x69\x1f\xbe\x0c\x98\x53\x02"
1018 "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63"
1019 "\x46\xb5\x33\xb4\x9c\x03\x0d\x99"
1020 "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e",
1021 },
1022};
1023
1024
379/* 1025/*
380 * MD5 test vectors from RFC1321 1026 * MD5 test vectors from RFC1321
381 */ 1027 */
@@ -3246,6 +3892,394 @@ static struct hash_testvec hmac_sha512_tv_template[] = {
3246 }, 3892 },
3247}; 3893};
3248 3894
3895#define HMAC_SHA3_224_TEST_VECTORS 4
3896
3897static struct hash_testvec hmac_sha3_224_tv_template[] = {
3898 {
3899 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3900 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3901 "\x0b\x0b\x0b\x0b",
3902 .ksize = 20,
3903 .plaintext = "Hi There",
3904 .psize = 8,
3905 .digest = "\x3b\x16\x54\x6b\xbc\x7b\xe2\x70"
3906 "\x6a\x03\x1d\xca\xfd\x56\x37\x3d"
3907 "\x98\x84\x36\x76\x41\xd8\xc5\x9a"
3908 "\xf3\xc8\x60\xf7",
3909 }, {
3910 .key = "Jefe",
3911 .ksize = 4,
3912 .plaintext = "what do ya want for nothing?",
3913 .psize = 28,
3914 .digest = "\x7f\xdb\x8d\xd8\x8b\xd2\xf6\x0d"
3915 "\x1b\x79\x86\x34\xad\x38\x68\x11"
3916 "\xc2\xcf\xc8\x5b\xfa\xf5\xd5\x2b"
3917 "\xba\xce\x5e\x66",
3918 .np = 4,
3919 .tap = { 7, 7, 7, 7 }
3920 }, {
3921 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3922 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3923 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3924 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3925 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3926 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3927 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3928 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3929 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3930 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3931 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3932 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3933 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3934 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3935 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3936 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3937 "\xaa\xaa\xaa",
3938 .ksize = 131,
3939 .plaintext = "Test Using Large"
3940 "r Than Block-Siz"
3941 "e Key - Hash Key"
3942 " First",
3943 .psize = 54,
3944 .digest = "\xb4\xa1\xf0\x4c\x00\x28\x7a\x9b"
3945 "\x7f\x60\x75\xb3\x13\xd2\x79\xb8"
3946 "\x33\xbc\x8f\x75\x12\x43\x52\xd0"
3947 "\x5f\xb9\x99\x5f",
3948 }, {
3949 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3950 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3951 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3952 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3953 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3954 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3955 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3956 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3957 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3958 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3959 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3960 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3961 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3962 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3963 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3964 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
3965 "\xaa\xaa\xaa",
3966 .ksize = 131,
3967 .plaintext =
3968 "This is a test u"
3969 "sing a larger th"
3970 "an block-size ke"
3971 "y and a larger t"
3972 "han block-size d"
3973 "ata. The key nee"
3974 "ds to be hashed "
3975 "before being use"
3976 "d by the HMAC al"
3977 "gorithm.",
3978 .psize = 152,
3979 .digest = "\x05\xd8\xcd\x6d\x00\xfa\xea\x8d"
3980 "\x1e\xb6\x8a\xde\x28\x73\x0b\xbd"
3981 "\x3c\xba\xb6\x92\x9f\x0a\x08\x6b"
3982 "\x29\xcd\x62\xa0",
3983 },
3984};
3985
3986#define HMAC_SHA3_256_TEST_VECTORS 4
3987
3988static struct hash_testvec hmac_sha3_256_tv_template[] = {
3989 {
3990 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3991 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
3992 "\x0b\x0b\x0b\x0b",
3993 .ksize = 20,
3994 .plaintext = "Hi There",
3995 .psize = 8,
3996 .digest = "\xba\x85\x19\x23\x10\xdf\xfa\x96"
3997 "\xe2\xa3\xa4\x0e\x69\x77\x43\x51"
3998 "\x14\x0b\xb7\x18\x5e\x12\x02\xcd"
3999 "\xcc\x91\x75\x89\xf9\x5e\x16\xbb",
4000 }, {
4001 .key = "Jefe",
4002 .ksize = 4,
4003 .plaintext = "what do ya want for nothing?",
4004 .psize = 28,
4005 .digest = "\xc7\xd4\x07\x2e\x78\x88\x77\xae"
4006 "\x35\x96\xbb\xb0\xda\x73\xb8\x87"
4007 "\xc9\x17\x1f\x93\x09\x5b\x29\x4a"
4008 "\xe8\x57\xfb\xe2\x64\x5e\x1b\xa5",
4009 .np = 4,
4010 .tap = { 7, 7, 7, 7 }
4011 }, {
4012 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4013 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4014 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4015 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4016 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4017 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4018 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4019 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4020 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4021 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4022 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4023 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4024 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4025 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4026 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4027 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4028 "\xaa\xaa\xaa",
4029 .ksize = 131,
4030 .plaintext = "Test Using Large"
4031 "r Than Block-Siz"
4032 "e Key - Hash Key"
4033 " First",
4034 .psize = 54,
4035 .digest = "\xed\x73\xa3\x74\xb9\x6c\x00\x52"
4036 "\x35\xf9\x48\x03\x2f\x09\x67\x4a"
4037 "\x58\xc0\xce\x55\x5c\xfc\x1f\x22"
4038 "\x3b\x02\x35\x65\x60\x31\x2c\x3b",
4039 }, {
4040 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4041 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4042 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4043 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4044 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4045 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4046 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4047 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4048 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4049 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4050 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4051 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4052 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4053 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4054 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4055 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4056 "\xaa\xaa\xaa",
4057 .ksize = 131,
4058 .plaintext =
4059 "This is a test u"
4060 "sing a larger th"
4061 "an block-size ke"
4062 "y and a larger t"
4063 "han block-size d"
4064 "ata. The key nee"
4065 "ds to be hashed "
4066 "before being use"
4067 "d by the HMAC al"
4068 "gorithm.",
4069 .psize = 152,
4070 .digest = "\x65\xc5\xb0\x6d\x4c\x3d\xe3\x2a"
4071 "\x7a\xef\x87\x63\x26\x1e\x49\xad"
4072 "\xb6\xe2\x29\x3e\xc8\xe7\xc6\x1e"
4073 "\x8d\xe6\x17\x01\xfc\x63\xe1\x23",
4074 },
4075};
4076
4077#define HMAC_SHA3_384_TEST_VECTORS 4
4078
4079static struct hash_testvec hmac_sha3_384_tv_template[] = {
4080 {
4081 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4082 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4083 "\x0b\x0b\x0b\x0b",
4084 .ksize = 20,
4085 .plaintext = "Hi There",
4086 .psize = 8,
4087 .digest = "\x68\xd2\xdc\xf7\xfd\x4d\xdd\x0a"
4088 "\x22\x40\xc8\xa4\x37\x30\x5f\x61"
4089 "\xfb\x73\x34\xcf\xb5\xd0\x22\x6e"
4090 "\x1b\xc2\x7d\xc1\x0a\x2e\x72\x3a"
4091 "\x20\xd3\x70\xb4\x77\x43\x13\x0e"
4092 "\x26\xac\x7e\x3d\x53\x28\x86\xbd",
4093 }, {
4094 .key = "Jefe",
4095 .ksize = 4,
4096 .plaintext = "what do ya want for nothing?",
4097 .psize = 28,
4098 .digest = "\xf1\x10\x1f\x8c\xbf\x97\x66\xfd"
4099 "\x67\x64\xd2\xed\x61\x90\x3f\x21"
4100 "\xca\x9b\x18\xf5\x7c\xf3\xe1\xa2"
4101 "\x3c\xa1\x35\x08\xa9\x32\x43\xce"
4102 "\x48\xc0\x45\xdc\x00\x7f\x26\xa2"
4103 "\x1b\x3f\x5e\x0e\x9d\xf4\xc2\x0a",
4104 .np = 4,
4105 .tap = { 7, 7, 7, 7 }
4106 }, {
4107 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4108 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4109 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4110 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4111 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4112 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4113 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4114 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4115 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4116 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4117 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4118 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4119 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4120 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4121 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4122 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4123 "\xaa\xaa\xaa",
4124 .ksize = 131,
4125 .plaintext = "Test Using Large"
4126 "r Than Block-Siz"
4127 "e Key - Hash Key"
4128 " First",
4129 .psize = 54,
4130 .digest = "\x0f\xc1\x95\x13\xbf\x6b\xd8\x78"
4131 "\x03\x70\x16\x70\x6a\x0e\x57\xbc"
4132 "\x52\x81\x39\x83\x6b\x9a\x42\xc3"
4133 "\xd4\x19\xe4\x98\xe0\xe1\xfb\x96"
4134 "\x16\xfd\x66\x91\x38\xd3\x3a\x11"
4135 "\x05\xe0\x7c\x72\xb6\x95\x3b\xcc",
4136 }, {
4137 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4138 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4139 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4140 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4141 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4142 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4143 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4144 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4145 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4146 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4147 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4148 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4149 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4150 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4151 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4152 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4153 "\xaa\xaa\xaa",
4154 .ksize = 131,
4155 .plaintext =
4156 "This is a test u"
4157 "sing a larger th"
4158 "an block-size ke"
4159 "y and a larger t"
4160 "han block-size d"
4161 "ata. The key nee"
4162 "ds to be hashed "
4163 "before being use"
4164 "d by the HMAC al"
4165 "gorithm.",
4166 .psize = 152,
4167 .digest = "\x02\x6f\xdf\x6b\x50\x74\x1e\x37"
4168 "\x38\x99\xc9\xf7\xd5\x40\x6d\x4e"
4169 "\xb0\x9f\xc6\x66\x56\x36\xfc\x1a"
4170 "\x53\x00\x29\xdd\xf5\xcf\x3c\xa5"
4171 "\xa9\x00\xed\xce\x01\xf5\xf6\x1e"
4172 "\x2f\x40\x8c\xdf\x2f\xd3\xe7\xe8",
4173 },
4174};
4175
4176#define HMAC_SHA3_512_TEST_VECTORS 4
4177
4178static struct hash_testvec hmac_sha3_512_tv_template[] = {
4179 {
4180 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4181 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4182 "\x0b\x0b\x0b\x0b",
4183 .ksize = 20,
4184 .plaintext = "Hi There",
4185 .psize = 8,
4186 .digest = "\xeb\x3f\xbd\x4b\x2e\xaa\xb8\xf5"
4187 "\xc5\x04\xbd\x3a\x41\x46\x5a\xac"
4188 "\xec\x15\x77\x0a\x7c\xab\xac\x53"
4189 "\x1e\x48\x2f\x86\x0b\x5e\xc7\xba"
4190 "\x47\xcc\xb2\xc6\xf2\xaf\xce\x8f"
4191 "\x88\xd2\x2b\x6d\xc6\x13\x80\xf2"
4192 "\x3a\x66\x8f\xd3\x88\x8b\xb8\x05"
4193 "\x37\xc0\xa0\xb8\x64\x07\x68\x9e",
4194 }, {
4195 .key = "Jefe",
4196 .ksize = 4,
4197 .plaintext = "what do ya want for nothing?",
4198 .psize = 28,
4199 .digest = "\x5a\x4b\xfe\xab\x61\x66\x42\x7c"
4200 "\x7a\x36\x47\xb7\x47\x29\x2b\x83"
4201 "\x84\x53\x7c\xdb\x89\xaf\xb3\xbf"
4202 "\x56\x65\xe4\xc5\xe7\x09\x35\x0b"
4203 "\x28\x7b\xae\xc9\x21\xfd\x7c\xa0"
4204 "\xee\x7a\x0c\x31\xd0\x22\xa9\x5e"
4205 "\x1f\xc9\x2b\xa9\xd7\x7d\xf8\x83"
4206 "\x96\x02\x75\xbe\xb4\xe6\x20\x24",
4207 .np = 4,
4208 .tap = { 7, 7, 7, 7 }
4209 }, {
4210 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4211 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4212 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4213 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4214 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4215 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4216 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4217 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4218 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4219 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4220 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4221 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4222 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4223 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4224 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4225 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4226 "\xaa\xaa\xaa",
4227 .ksize = 131,
4228 .plaintext = "Test Using Large"
4229 "r Than Block-Siz"
4230 "e Key - Hash Key"
4231 " First",
4232 .psize = 54,
4233 .digest = "\x00\xf7\x51\xa9\xe5\x06\x95\xb0"
4234 "\x90\xed\x69\x11\xa4\xb6\x55\x24"
4235 "\x95\x1c\xdc\x15\xa7\x3a\x5d\x58"
4236 "\xbb\x55\x21\x5e\xa2\xcd\x83\x9a"
4237 "\xc7\x9d\x2b\x44\xa3\x9b\xaf\xab"
4238 "\x27\xe8\x3f\xde\x9e\x11\xf6\x34"
4239 "\x0b\x11\xd9\x91\xb1\xb9\x1b\xf2"
4240 "\xee\xe7\xfc\x87\x24\x26\xc3\xa4",
4241 }, {
4242 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4243 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4244 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4245 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4246 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4247 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4248 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4249 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4250 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4251 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4252 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4253 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4254 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4255 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4256 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4257 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
4258 "\xaa\xaa\xaa",
4259 .ksize = 131,
4260 .plaintext =
4261 "This is a test u"
4262 "sing a larger th"
4263 "an block-size ke"
4264 "y and a larger t"
4265 "han block-size d"
4266 "ata. The key nee"
4267 "ds to be hashed "
4268 "before being use"
4269 "d by the HMAC al"
4270 "gorithm.",
4271 .psize = 152,
4272 .digest = "\x38\xa4\x56\xa0\x04\xbd\x10\xd3"
4273 "\x2c\x9a\xb8\x33\x66\x84\x11\x28"
4274 "\x62\xc3\xdb\x61\xad\xcc\xa3\x18"
4275 "\x29\x35\x5e\xaf\x46\xfd\x5c\x73"
4276 "\xd0\x6a\x1f\x0d\x13\xfe\xc9\xa6"
4277 "\x52\xfb\x38\x11\xb5\x77\xb1\xb1"
4278 "\xd1\xb9\x78\x9f\x97\xae\x5b\x83"
4279 "\xc6\xf4\x4d\xfc\xf1\xd6\x7e\xba",
4280 },
4281};
4282
3249/* 4283/*
3250 * Poly1305 test vectors from RFC7539 A.3. 4284 * Poly1305 test vectors from RFC7539 A.3.
3251 */ 4285 */
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index ac51149e9777..56ad5a5936a9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -90,7 +90,7 @@ config HW_RANDOM_BCM63XX
90 90
91config HW_RANDOM_BCM2835 91config HW_RANDOM_BCM2835
92 tristate "Broadcom BCM2835 Random Number Generator support" 92 tristate "Broadcom BCM2835 Random Number Generator support"
93 depends on ARCH_BCM2835 93 depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X
94 default HW_RANDOM 94 default HW_RANDOM
95 ---help--- 95 ---help---
96 This driver provides kernel-side support for the Random Number 96 This driver provides kernel-side support for the Random Number
@@ -396,6 +396,20 @@ config HW_RANDOM_PIC32
396 396
397 If unsure, say Y. 397 If unsure, say Y.
398 398
399config HW_RANDOM_MESON
400 tristate "Amlogic Meson Random Number Generator support"
401 depends on HW_RANDOM
402 depends on ARCH_MESON || COMPILE_TEST
403 default y
404 ---help---
405 This driver provides kernel-side support for the Random Number
406 Generator hardware found on Amlogic Meson SoCs.
407
408 To compile this driver as a module, choose M here. the
409 module will be called meson-rng.
410
411 If unsure, say Y.
412
399endif # HW_RANDOM 413endif # HW_RANDOM
400 414
401config UML_RANDOM 415config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 63022b49f160..04bb0b03356f 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
34obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o 34obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
35obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o 35obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
36obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o 36obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
37obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 7192ec25f667..af2149273fe0 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -19,6 +19,7 @@
19#define RNG_CTRL 0x0 19#define RNG_CTRL 0x0
20#define RNG_STATUS 0x4 20#define RNG_STATUS 0x4
21#define RNG_DATA 0x8 21#define RNG_DATA 0x8
22#define RNG_INT_MASK 0x10
22 23
23/* enable rng */ 24/* enable rng */
24#define RNG_RBGEN 0x1 25#define RNG_RBGEN 0x1
@@ -26,10 +27,24 @@
26/* the initial numbers generated are "less random" so will be discarded */ 27/* the initial numbers generated are "less random" so will be discarded */
27#define RNG_WARMUP_COUNT 0x40000 28#define RNG_WARMUP_COUNT 0x40000
28 29
30#define RNG_INT_OFF 0x1
31
32static void __init nsp_rng_init(void __iomem *base)
33{
34 u32 val;
35
36 /* mask the interrupt */
37 val = readl(base + RNG_INT_MASK);
38 val |= RNG_INT_OFF;
39 writel(val, base + RNG_INT_MASK);
40}
41
29static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, 42static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
30 bool wait) 43 bool wait)
31{ 44{
32 void __iomem *rng_base = (void __iomem *)rng->priv; 45 void __iomem *rng_base = (void __iomem *)rng->priv;
46 u32 max_words = max / sizeof(u32);
47 u32 num_words, count;
33 48
34 while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) { 49 while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
35 if (!wait) 50 if (!wait)
@@ -37,8 +52,14 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
37 cpu_relax(); 52 cpu_relax();
38 } 53 }
39 54
40 *(u32 *)buf = __raw_readl(rng_base + RNG_DATA); 55 num_words = readl(rng_base + RNG_STATUS) >> 24;
41 return sizeof(u32); 56 if (num_words > max_words)
57 num_words = max_words;
58
59 for (count = 0; count < num_words; count++)
60 ((u32 *)buf)[count] = readl(rng_base + RNG_DATA);
61
62 return num_words * sizeof(u32);
42} 63}
43 64
44static struct hwrng bcm2835_rng_ops = { 65static struct hwrng bcm2835_rng_ops = {
@@ -46,10 +67,19 @@ static struct hwrng bcm2835_rng_ops = {
46 .read = bcm2835_rng_read, 67 .read = bcm2835_rng_read,
47}; 68};
48 69
70static const struct of_device_id bcm2835_rng_of_match[] = {
71 { .compatible = "brcm,bcm2835-rng"},
72 { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init},
73 { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init},
74 {},
75};
76
49static int bcm2835_rng_probe(struct platform_device *pdev) 77static int bcm2835_rng_probe(struct platform_device *pdev)
50{ 78{
51 struct device *dev = &pdev->dev; 79 struct device *dev = &pdev->dev;
52 struct device_node *np = dev->of_node; 80 struct device_node *np = dev->of_node;
81 void (*rng_setup)(void __iomem *base);
82 const struct of_device_id *rng_id;
53 void __iomem *rng_base; 83 void __iomem *rng_base;
54 int err; 84 int err;
55 85
@@ -61,6 +91,15 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
61 } 91 }
62 bcm2835_rng_ops.priv = (unsigned long)rng_base; 92 bcm2835_rng_ops.priv = (unsigned long)rng_base;
63 93
94 rng_id = of_match_node(bcm2835_rng_of_match, np);
95 if (!rng_id)
96 return -EINVAL;
97
98 /* Check for rng init function, execute it */
99 rng_setup = rng_id->data;
100 if (rng_setup)
101 rng_setup(rng_base);
102
64 /* set warm-up count & enable */ 103 /* set warm-up count & enable */
65 __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS); 104 __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
66 __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL); 105 __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
@@ -90,10 +129,6 @@ static int bcm2835_rng_remove(struct platform_device *pdev)
90 return 0; 129 return 0;
91} 130}
92 131
93static const struct of_device_id bcm2835_rng_of_match[] = {
94 { .compatible = "brcm,bcm2835-rng", },
95 {},
96};
97MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); 132MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
98 133
99static struct platform_driver bcm2835_rng_driver = { 134static struct platform_driver bcm2835_rng_driver = {
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index ed44561ea647..23d358553b21 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -45,12 +45,12 @@ struct exynos_rng {
45 45
46static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset) 46static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
47{ 47{
48 return __raw_readl(rng->mem + offset); 48 return readl_relaxed(rng->mem + offset);
49} 49}
50 50
51static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset) 51static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
52{ 52{
53 __raw_writel(val, rng->mem + offset); 53 writel_relaxed(val, rng->mem + offset);
54} 54}
55 55
56static int exynos_rng_configure(struct exynos_rng *exynos_rng) 56static int exynos_rng_configure(struct exynos_rng *exynos_rng)
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
new file mode 100644
index 000000000000..0cfd81bcaeac
--- /dev/null
+++ b/drivers/char/hw_random/meson-rng.c
@@ -0,0 +1,131 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright (c) 2016 BayLibre, SAS.
8 * Author: Neil Armstrong <narmstrong@baylibre.com>
9 * Copyright (C) 2014 Amlogic, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
24 *
25 * BSD LICENSE
26 *
27 * Copyright (c) 2016 BayLibre, SAS.
28 * Author: Neil Armstrong <narmstrong@baylibre.com>
29 * Copyright (C) 2014 Amlogic, Inc.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 *
35 * * Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * * Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in
39 * the documentation and/or other materials provided with the
40 * distribution.
41 * * Neither the name of Intel Corporation nor the names of its
42 * contributors may be used to endorse or promote products derived
43 * from this software without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 */
57#include <linux/err.h>
58#include <linux/module.h>
59#include <linux/io.h>
60#include <linux/platform_device.h>
61#include <linux/hw_random.h>
62#include <linux/slab.h>
63#include <linux/types.h>
64#include <linux/of.h>
65
66#define RNG_DATA 0x00
67
68struct meson_rng_data {
69 void __iomem *base;
70 struct platform_device *pdev;
71 struct hwrng rng;
72};
73
74static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
75{
76 struct meson_rng_data *data =
77 container_of(rng, struct meson_rng_data, rng);
78
79 if (max < sizeof(u32))
80 return 0;
81
82 *(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
83
84 return sizeof(u32);
85}
86
87static int meson_rng_probe(struct platform_device *pdev)
88{
89 struct device *dev = &pdev->dev;
90 struct meson_rng_data *data;
91 struct resource *res;
92
93 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
94 if (!data)
95 return -ENOMEM;
96
97 data->pdev = pdev;
98
99 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
100 data->base = devm_ioremap_resource(dev, res);
101 if (IS_ERR(data->base))
102 return PTR_ERR(data->base);
103
104 data->rng.name = pdev->name;
105 data->rng.read = meson_rng_read;
106
107 platform_set_drvdata(pdev, data);
108
109 return devm_hwrng_register(dev, &data->rng);
110}
111
112static const struct of_device_id meson_rng_of_match[] = {
113 { .compatible = "amlogic,meson-rng", },
114 {},
115};
116
117static struct platform_driver meson_rng_driver = {
118 .probe = meson_rng_probe,
119 .driver = {
120 .name = "meson-rng",
121 .of_match_table = meson_rng_of_match,
122 },
123};
124
125module_platform_driver(meson_rng_driver);
126
127MODULE_ALIAS("platform:meson-rng");
128MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
129MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
130MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
131MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 8a1432e8bb80..01d4be2c354b 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -384,7 +384,12 @@ static int omap_rng_probe(struct platform_device *pdev)
384 } 384 }
385 385
386 pm_runtime_enable(&pdev->dev); 386 pm_runtime_enable(&pdev->dev);
387 pm_runtime_get_sync(&pdev->dev); 387 ret = pm_runtime_get_sync(&pdev->dev);
388 if (ret) {
389 dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
390 pm_runtime_put_noidle(&pdev->dev);
391 goto err_ioremap;
392 }
388 393
389 ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : 394 ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
390 get_omap_rng_device_details(priv); 395 get_omap_rng_device_details(priv);
@@ -435,8 +440,15 @@ static int __maybe_unused omap_rng_suspend(struct device *dev)
435static int __maybe_unused omap_rng_resume(struct device *dev) 440static int __maybe_unused omap_rng_resume(struct device *dev)
436{ 441{
437 struct omap_rng_dev *priv = dev_get_drvdata(dev); 442 struct omap_rng_dev *priv = dev_get_drvdata(dev);
443 int ret;
444
445 ret = pm_runtime_get_sync(dev);
446 if (ret) {
447 dev_err(dev, "Failed to runtime_get device: %d\n", ret);
448 pm_runtime_put_noidle(dev);
449 return ret;
450 }
438 451
439 pm_runtime_get_sync(dev);
440 priv->pdata->init(priv); 452 priv->pdata->init(priv);
441 453
442 return 0; 454 return 0;
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 92a810648bd0..63d84e6f1891 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -69,8 +69,12 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
69 } 69 }
70 70
71 /* If error detected or data not ready... */ 71 /* If error detected or data not ready... */
72 if (sr != RNG_SR_DRDY) 72 if (sr != RNG_SR_DRDY) {
73 if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS),
74 "bad RNG status - %x\n", sr))
75 writel_relaxed(0, priv->base + RNG_SR);
73 break; 76 break;
77 }
74 78
75 *(u32 *)data = readl_relaxed(priv->base + RNG_DR); 79 *(u32 *)data = readl_relaxed(priv->base + RNG_DR);
76 80
@@ -79,10 +83,6 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
79 max -= sizeof(u32); 83 max -= sizeof(u32);
80 } 84 }
81 85
82 if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS),
83 "bad RNG status - %x\n", sr))
84 writel_relaxed(0, priv->base + RNG_SR);
85
86 pm_runtime_mark_last_busy((struct device *) priv->rng.priv); 86 pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
87 pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv); 87 pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
88 88
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 95b73968cf72..10db7df366c8 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -588,11 +588,6 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
588 crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH); 588 crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
589 589
590 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 590 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
591 if (res == NULL) {
592 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
593 return -ENOENT;
594 }
595
596 crc->regs = devm_ioremap_resource(dev, res); 591 crc->regs = devm_ioremap_resource(dev, res);
597 if (IS_ERR((void *)crc->regs)) { 592 if (IS_ERR((void *)crc->regs)) {
598 dev_err(&pdev->dev, "Cannot map CRC IO\n"); 593 dev_err(&pdev->dev, "Cannot map CRC IO\n");
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 5652a53415dc..64bf3024b680 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,6 @@
1config CRYPTO_DEV_FSL_CAAM 1config CRYPTO_DEV_FSL_CAAM
2 tristate "Freescale CAAM-Multicore driver backend" 2 tristate "Freescale CAAM-Multicore driver backend"
3 depends on FSL_SOC || ARCH_MXC 3 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
4 help 4 help
5 Enables the driver module for Freescale's Cryptographic Accelerator 5 Enables the driver module for Freescale's Cryptographic Accelerator
6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). 6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -99,6 +99,18 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
99 To compile this as a module, choose M here: the module 99 To compile this as a module, choose M here: the module
100 will be called caamhash. 100 will be called caamhash.
101 101
102config CRYPTO_DEV_FSL_CAAM_PKC_API
103 tristate "Register public key cryptography implementations with Crypto API"
104 depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
105 default y
106 select CRYPTO_RSA
107 help
108 Selecting this will allow SEC Public key support for RSA.
109 Supported cryptographic primitives: encryption, decryption,
110 signature and verification.
111 To compile this as a module, choose M here: the module
112 will be called caam_pkc.
113
102config CRYPTO_DEV_FSL_CAAM_RNG_API 114config CRYPTO_DEV_FSL_CAAM_RNG_API
103 tristate "Register caam device for hwrng API" 115 tristate "Register caam device for hwrng API"
104 depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR 116 depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
@@ -116,10 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_IMX
116 def_bool SOC_IMX6 || SOC_IMX7D 128 def_bool SOC_IMX6 || SOC_IMX7D
117 depends on CRYPTO_DEV_FSL_CAAM 129 depends on CRYPTO_DEV_FSL_CAAM
118 130
119config CRYPTO_DEV_FSL_CAAM_LE
120 def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A
121 depends on CRYPTO_DEV_FSL_CAAM
122
123config CRYPTO_DEV_FSL_CAAM_DEBUG 131config CRYPTO_DEV_FSL_CAAM_DEBUG
124 bool "Enable debug output in CAAM driver" 132 bool "Enable debug output in CAAM driver"
125 depends on CRYPTO_DEV_FSL_CAAM 133 depends on CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 550758a333e7..08bf5515ae8a 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the CAAM backend and dependent components 2# Makefile for the CAAM backend and dependent components
3# 3#
4ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) 4ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
5 EXTRA_CFLAGS := -DDEBUG 5 ccflags-y := -DDEBUG
6endif 6endif
7 7
8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 8obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
@@ -10,6 +10,8 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 10obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o 11obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o 12obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
13obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
13 14
14caam-objs := ctrl.o 15caam-objs := ctrl.o
15caam_jr-objs := jr.o key_gen.o error.o 16caam_jr-objs := jr.o key_gen.o error.o
17caam_pkc-y := caampkc.o pkc_desc.o
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5845d4a08797..f1ecc8df8d41 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -847,7 +847,7 @@ static int ahash_update_ctx(struct ahash_request *req)
847 *next_buflen, 0); 847 *next_buflen, 0);
848 } else { 848 } else {
849 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= 849 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
850 SEC4_SG_LEN_FIN; 850 cpu_to_caam32(SEC4_SG_LEN_FIN);
851 } 851 }
852 852
853 state->current_buf = !state->current_buf; 853 state->current_buf = !state->current_buf;
@@ -949,7 +949,8 @@ static int ahash_final_ctx(struct ahash_request *req)
949 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 949 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
950 buf, state->buf_dma, buflen, 950 buf, state->buf_dma, buflen,
951 last_buflen); 951 last_buflen);
952 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; 952 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
953 cpu_to_caam32(SEC4_SG_LEN_FIN);
953 954
954 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 955 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
955 sec4_sg_bytes, DMA_TO_DEVICE); 956 sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
new file mode 100644
index 000000000000..851015e652b8
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.c
@@ -0,0 +1,607 @@
1/*
2 * caam - Freescale FSL CAAM support for Public Key Cryptography
3 *
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 *
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
8 */
9#include "compat.h"
10#include "regs.h"
11#include "intern.h"
12#include "jr.h"
13#include "error.h"
14#include "desc_constr.h"
15#include "sg_sw_sec4.h"
16#include "caampkc.h"
17
18#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20 sizeof(struct rsa_priv_f1_pdb))
21
22static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
23 struct akcipher_request *req)
24{
25 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
26 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
27
28 if (edesc->sec4_sg_bytes)
29 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
30 DMA_TO_DEVICE);
31}
32
33static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
34 struct akcipher_request *req)
35{
36 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
37 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
38 struct caam_rsa_key *key = &ctx->key;
39 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
40
41 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
42 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
43}
44
45static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
46 struct akcipher_request *req)
47{
48 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
49 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
50 struct caam_rsa_key *key = &ctx->key;
51 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
52
53 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
54 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
55}
56
57/* RSA Job Completion handler */
58static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
59{
60 struct akcipher_request *req = context;
61 struct rsa_edesc *edesc;
62
63 if (err)
64 caam_jr_strstatus(dev, err);
65
66 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
67
68 rsa_pub_unmap(dev, edesc, req);
69 rsa_io_unmap(dev, edesc, req);
70 kfree(edesc);
71
72 akcipher_request_complete(req, err);
73}
74
75static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
76 void *context)
77{
78 struct akcipher_request *req = context;
79 struct rsa_edesc *edesc;
80
81 if (err)
82 caam_jr_strstatus(dev, err);
83
84 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
85
86 rsa_priv_f1_unmap(dev, edesc, req);
87 rsa_io_unmap(dev, edesc, req);
88 kfree(edesc);
89
90 akcipher_request_complete(req, err);
91}
92
93static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
94 size_t desclen)
95{
96 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
97 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
98 struct device *dev = ctx->dev;
99 struct rsa_edesc *edesc;
100 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
101 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
102 int sgc;
103 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
104 int src_nents, dst_nents;
105
106 src_nents = sg_nents_for_len(req->src, req->src_len);
107 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
108
109 if (src_nents > 1)
110 sec4_sg_len = src_nents;
111 if (dst_nents > 1)
112 sec4_sg_len += dst_nents;
113
114 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
115
116 /* allocate space for base edesc, hw desc commands and link tables */
117 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
118 GFP_DMA | flags);
119 if (!edesc)
120 return ERR_PTR(-ENOMEM);
121
122 sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
123 if (unlikely(!sgc)) {
124 dev_err(dev, "unable to map source\n");
125 goto src_fail;
126 }
127
128 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
129 if (unlikely(!sgc)) {
130 dev_err(dev, "unable to map destination\n");
131 goto dst_fail;
132 }
133
134 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
135
136 sec4_sg_index = 0;
137 if (src_nents > 1) {
138 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
139 sec4_sg_index += src_nents;
140 }
141 if (dst_nents > 1)
142 sg_to_sec4_sg_last(req->dst, dst_nents,
143 edesc->sec4_sg + sec4_sg_index, 0);
144
145 /* Save nents for later use in Job Descriptor */
146 edesc->src_nents = src_nents;
147 edesc->dst_nents = dst_nents;
148
149 if (!sec4_sg_bytes)
150 return edesc;
151
152 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
153 sec4_sg_bytes, DMA_TO_DEVICE);
154 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
155 dev_err(dev, "unable to map S/G table\n");
156 goto sec4_sg_fail;
157 }
158
159 edesc->sec4_sg_bytes = sec4_sg_bytes;
160
161 return edesc;
162
163sec4_sg_fail:
164 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
165dst_fail:
166 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
167src_fail:
168 kfree(edesc);
169 return ERR_PTR(-ENOMEM);
170}
171
172static int set_rsa_pub_pdb(struct akcipher_request *req,
173 struct rsa_edesc *edesc)
174{
175 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
176 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
177 struct caam_rsa_key *key = &ctx->key;
178 struct device *dev = ctx->dev;
179 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
180 int sec4_sg_index = 0;
181
182 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
183 if (dma_mapping_error(dev, pdb->n_dma)) {
184 dev_err(dev, "Unable to map RSA modulus memory\n");
185 return -ENOMEM;
186 }
187
188 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
189 if (dma_mapping_error(dev, pdb->e_dma)) {
190 dev_err(dev, "Unable to map RSA public exponent memory\n");
191 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
192 return -ENOMEM;
193 }
194
195 if (edesc->src_nents > 1) {
196 pdb->sgf |= RSA_PDB_SGF_F;
197 pdb->f_dma = edesc->sec4_sg_dma;
198 sec4_sg_index += edesc->src_nents;
199 } else {
200 pdb->f_dma = sg_dma_address(req->src);
201 }
202
203 if (edesc->dst_nents > 1) {
204 pdb->sgf |= RSA_PDB_SGF_G;
205 pdb->g_dma = edesc->sec4_sg_dma +
206 sec4_sg_index * sizeof(struct sec4_sg_entry);
207 } else {
208 pdb->g_dma = sg_dma_address(req->dst);
209 }
210
211 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
212 pdb->f_len = req->src_len;
213
214 return 0;
215}
216
217static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
218 struct rsa_edesc *edesc)
219{
220 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
221 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
222 struct caam_rsa_key *key = &ctx->key;
223 struct device *dev = ctx->dev;
224 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
225 int sec4_sg_index = 0;
226
227 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
228 if (dma_mapping_error(dev, pdb->n_dma)) {
229 dev_err(dev, "Unable to map modulus memory\n");
230 return -ENOMEM;
231 }
232
233 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
234 if (dma_mapping_error(dev, pdb->d_dma)) {
235 dev_err(dev, "Unable to map RSA private exponent memory\n");
236 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
237 return -ENOMEM;
238 }
239
240 if (edesc->src_nents > 1) {
241 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
242 pdb->g_dma = edesc->sec4_sg_dma;
243 sec4_sg_index += edesc->src_nents;
244 } else {
245 pdb->g_dma = sg_dma_address(req->src);
246 }
247
248 if (edesc->dst_nents > 1) {
249 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
250 pdb->f_dma = edesc->sec4_sg_dma +
251 sec4_sg_index * sizeof(struct sec4_sg_entry);
252 } else {
253 pdb->f_dma = sg_dma_address(req->dst);
254 }
255
256 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
257
258 return 0;
259}
260
261static int caam_rsa_enc(struct akcipher_request *req)
262{
263 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
264 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
265 struct caam_rsa_key *key = &ctx->key;
266 struct device *jrdev = ctx->dev;
267 struct rsa_edesc *edesc;
268 int ret;
269
270 if (unlikely(!key->n || !key->e))
271 return -EINVAL;
272
273 if (req->dst_len < key->n_sz) {
274 req->dst_len = key->n_sz;
275 dev_err(jrdev, "Output buffer length less than parameter n\n");
276 return -EOVERFLOW;
277 }
278
279 /* Allocate extended descriptor */
280 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
281 if (IS_ERR(edesc))
282 return PTR_ERR(edesc);
283
284 /* Set RSA Encrypt Protocol Data Block */
285 ret = set_rsa_pub_pdb(req, edesc);
286 if (ret)
287 goto init_fail;
288
289 /* Initialize Job Descriptor */
290 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
291
292 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
293 if (!ret)
294 return -EINPROGRESS;
295
296 rsa_pub_unmap(jrdev, edesc, req);
297
298init_fail:
299 rsa_io_unmap(jrdev, edesc, req);
300 kfree(edesc);
301 return ret;
302}
303
304static int caam_rsa_dec(struct akcipher_request *req)
305{
306 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
307 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
308 struct caam_rsa_key *key = &ctx->key;
309 struct device *jrdev = ctx->dev;
310 struct rsa_edesc *edesc;
311 int ret;
312
313 if (unlikely(!key->n || !key->d))
314 return -EINVAL;
315
316 if (req->dst_len < key->n_sz) {
317 req->dst_len = key->n_sz;
318 dev_err(jrdev, "Output buffer length less than parameter n\n");
319 return -EOVERFLOW;
320 }
321
322 /* Allocate extended descriptor */
323 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
324 if (IS_ERR(edesc))
325 return PTR_ERR(edesc);
326
327 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
328 ret = set_rsa_priv_f1_pdb(req, edesc);
329 if (ret)
330 goto init_fail;
331
332 /* Initialize Job Descriptor */
333 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
334
335 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
336 if (!ret)
337 return -EINPROGRESS;
338
339 rsa_priv_f1_unmap(jrdev, edesc, req);
340
341init_fail:
342 rsa_io_unmap(jrdev, edesc, req);
343 kfree(edesc);
344 return ret;
345}
346
347static void caam_rsa_free_key(struct caam_rsa_key *key)
348{
349 kzfree(key->d);
350 kfree(key->e);
351 kfree(key->n);
352 key->d = NULL;
353 key->e = NULL;
354 key->n = NULL;
355 key->d_sz = 0;
356 key->e_sz = 0;
357 key->n_sz = 0;
358}
359
360/**
361 * caam_read_raw_data - Read a raw byte stream as a positive integer.
362 * The function skips buffer's leading zeros, copies the remained data
363 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
364 * the address of the new buffer.
365 *
366 * @buf : The data to read
367 * @nbytes: The amount of data to read
368 */
369static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
370{
371 u8 *val;
372
373 while (!*buf && *nbytes) {
374 buf++;
375 (*nbytes)--;
376 }
377
378 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
379 if (!val)
380 return NULL;
381
382 memcpy(val, buf, *nbytes);
383
384 return val;
385}
386
387static int caam_rsa_check_key_length(unsigned int len)
388{
389 if (len > 4096)
390 return -EINVAL;
391 return 0;
392}
393
394static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
395 unsigned int keylen)
396{
397 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
398 struct rsa_key raw_key = {0};
399 struct caam_rsa_key *rsa_key = &ctx->key;
400 int ret;
401
402 /* Free the old RSA key if any */
403 caam_rsa_free_key(rsa_key);
404
405 ret = rsa_parse_pub_key(&raw_key, key, keylen);
406 if (ret)
407 return ret;
408
409 /* Copy key in DMA zone */
410 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
411 if (!rsa_key->e)
412 goto err;
413
414 /*
415 * Skip leading zeros and copy the positive integer to a buffer
416 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
417 * expects a positive integer for the RSA modulus and uses its length as
418 * decryption output length.
419 */
420 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
421 if (!rsa_key->n)
422 goto err;
423
424 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
425 caam_rsa_free_key(rsa_key);
426 return -EINVAL;
427 }
428
429 rsa_key->e_sz = raw_key.e_sz;
430 rsa_key->n_sz = raw_key.n_sz;
431
432 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
433
434 return 0;
435err:
436 caam_rsa_free_key(rsa_key);
437 return -ENOMEM;
438}
439
440static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
441 unsigned int keylen)
442{
443 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
444 struct rsa_key raw_key = {0};
445 struct caam_rsa_key *rsa_key = &ctx->key;
446 int ret;
447
448 /* Free the old RSA key if any */
449 caam_rsa_free_key(rsa_key);
450
451 ret = rsa_parse_priv_key(&raw_key, key, keylen);
452 if (ret)
453 return ret;
454
455 /* Copy key in DMA zone */
456 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
457 if (!rsa_key->d)
458 goto err;
459
460 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
461 if (!rsa_key->e)
462 goto err;
463
464 /*
465 * Skip leading zeros and copy the positive integer to a buffer
466 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
467 * expects a positive integer for the RSA modulus and uses its length as
468 * decryption output length.
469 */
470 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
471 if (!rsa_key->n)
472 goto err;
473
474 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
475 caam_rsa_free_key(rsa_key);
476 return -EINVAL;
477 }
478
479 rsa_key->d_sz = raw_key.d_sz;
480 rsa_key->e_sz = raw_key.e_sz;
481 rsa_key->n_sz = raw_key.n_sz;
482
483 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
484 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
485
486 return 0;
487
488err:
489 caam_rsa_free_key(rsa_key);
490 return -ENOMEM;
491}
492
493static int caam_rsa_max_size(struct crypto_akcipher *tfm)
494{
495 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
496 struct caam_rsa_key *key = &ctx->key;
497
498 return (key->n) ? key->n_sz : -EINVAL;
499}
500
501/* Per session pkc's driver context creation function */
502static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
503{
504 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
505
506 ctx->dev = caam_jr_alloc();
507
508 if (IS_ERR(ctx->dev)) {
509 dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
510 return PTR_ERR(ctx->dev);
511 }
512
513 return 0;
514}
515
516/* Per session pkc's driver context cleanup function */
517static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
518{
519 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
520 struct caam_rsa_key *key = &ctx->key;
521
522 caam_rsa_free_key(key);
523 caam_jr_free(ctx->dev);
524}
525
526static struct akcipher_alg caam_rsa = {
527 .encrypt = caam_rsa_enc,
528 .decrypt = caam_rsa_dec,
529 .sign = caam_rsa_dec,
530 .verify = caam_rsa_enc,
531 .set_pub_key = caam_rsa_set_pub_key,
532 .set_priv_key = caam_rsa_set_priv_key,
533 .max_size = caam_rsa_max_size,
534 .init = caam_rsa_init_tfm,
535 .exit = caam_rsa_exit_tfm,
536 .base = {
537 .cra_name = "rsa",
538 .cra_driver_name = "rsa-caam",
539 .cra_priority = 3000,
540 .cra_module = THIS_MODULE,
541 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
542 },
543};
544
545/* Public Key Cryptography module initialization handler */
546static int __init caam_pkc_init(void)
547{
548 struct device_node *dev_node;
549 struct platform_device *pdev;
550 struct device *ctrldev;
551 struct caam_drv_private *priv;
552 u32 cha_inst, pk_inst;
553 int err;
554
555 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
556 if (!dev_node) {
557 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
558 if (!dev_node)
559 return -ENODEV;
560 }
561
562 pdev = of_find_device_by_node(dev_node);
563 if (!pdev) {
564 of_node_put(dev_node);
565 return -ENODEV;
566 }
567
568 ctrldev = &pdev->dev;
569 priv = dev_get_drvdata(ctrldev);
570 of_node_put(dev_node);
571
572 /*
573 * If priv is NULL, it's probably because the caam driver wasn't
574 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
575 */
576 if (!priv)
577 return -ENODEV;
578
579 /* Determine public key hardware accelerator presence. */
580 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
581 pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
582
583 /* Do not register algorithms if PKHA is not present. */
584 if (!pk_inst)
585 return -ENODEV;
586
587 err = crypto_register_akcipher(&caam_rsa);
588 if (err)
589 dev_warn(ctrldev, "%s alg registration failed\n",
590 caam_rsa.base.cra_driver_name);
591 else
592 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
593
594 return err;
595}
596
597static void __exit caam_pkc_exit(void)
598{
599 crypto_unregister_akcipher(&caam_rsa);
600}
601
602module_init(caam_pkc_init);
603module_exit(caam_pkc_exit);
604
605MODULE_LICENSE("Dual BSD/GPL");
606MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
607MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
new file mode 100644
index 000000000000..f595d159b112
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.h
@@ -0,0 +1,70 @@
1/*
2 * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
3 *
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 *
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
8 */
9
10#ifndef _PKC_DESC_H_
11#define _PKC_DESC_H_
12#include "compat.h"
13#include "pdb.h"
14
15/**
16 * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
17 * @n : RSA modulus raw byte stream
18 * @e : RSA public exponent raw byte stream
19 * @d : RSA private exponent raw byte stream
20 * @n_sz : length in bytes of RSA modulus n
21 * @e_sz : length in bytes of RSA public exponent
22 * @d_sz : length in bytes of RSA private exponent
23 */
24struct caam_rsa_key {
25 u8 *n;
26 u8 *e;
27 u8 *d;
28 size_t n_sz;
29 size_t e_sz;
30 size_t d_sz;
31};
32
33/**
34 * caam_rsa_ctx - per session context.
35 * @key : RSA key in DMA zone
36 * @dev : device structure
37 */
38struct caam_rsa_ctx {
39 struct caam_rsa_key key;
40 struct device *dev;
41};
42
43/**
44 * rsa_edesc - s/w-extended rsa descriptor
45 * @src_nents : number of segments in input scatterlist
46 * @dst_nents : number of segments in output scatterlist
47 * @sec4_sg_bytes : length of h/w link table
48 * @sec4_sg_dma : dma address of h/w link table
49 * @sec4_sg : pointer to h/w link table
50 * @pdb : specific RSA Protocol Data Block (PDB)
51 * @hw_desc : descriptor followed by link tables if any
52 */
53struct rsa_edesc {
54 int src_nents;
55 int dst_nents;
56 int sec4_sg_bytes;
57 dma_addr_t sec4_sg_dma;
58 struct sec4_sg_entry *sec4_sg;
59 union {
60 struct rsa_pub_pdb pub;
61 struct rsa_priv_f1_pdb priv_f1;
62 } pdb;
63 u32 hw_desc[];
64};
65
66/* Descriptor construction primitives. */
67void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
68void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
69
70#endif
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index b6955ecdfb3f..7149cd2492e0 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -35,8 +35,11 @@
35#include <crypto/md5.h> 35#include <crypto/md5.h>
36#include <crypto/internal/aead.h> 36#include <crypto/internal/aead.h>
37#include <crypto/authenc.h> 37#include <crypto/authenc.h>
38#include <crypto/akcipher.h>
38#include <crypto/scatterwalk.h> 39#include <crypto/scatterwalk.h>
39#include <crypto/internal/skcipher.h> 40#include <crypto/internal/skcipher.h>
40#include <crypto/internal/hash.h> 41#include <crypto/internal/hash.h>
42#include <crypto/internal/rsa.h>
43#include <crypto/internal/akcipher.h>
41 44
42#endif /* !defined(CAAM_COMPAT_H) */ 45#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 5ad5f3009ae0..0ec112ee5204 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -15,6 +15,9 @@
15#include "desc_constr.h" 15#include "desc_constr.h"
16#include "error.h" 16#include "error.h"
17 17
18bool caam_little_end;
19EXPORT_SYMBOL(caam_little_end);
20
18/* 21/*
19 * i.MX targets tend to have clock control subsystems that can 22 * i.MX targets tend to have clock control subsystems that can
20 * enable/disable clocking to our device. 23 * enable/disable clocking to our device.
@@ -106,7 +109,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
106 109
107 110
108 if (ctrlpriv->virt_en == 1) { 111 if (ctrlpriv->virt_en == 1) {
109 setbits32(&ctrl->deco_rsr, DECORSR_JR0); 112 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
110 113
111 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && 114 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
112 --timeout) 115 --timeout)
@@ -115,7 +118,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
115 timeout = 100000; 118 timeout = 100000;
116 } 119 }
117 120
118 setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE); 121 clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
119 122
120 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) && 123 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
121 --timeout) 124 --timeout)
@@ -123,12 +126,12 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
123 126
124 if (!timeout) { 127 if (!timeout) {
125 dev_err(ctrldev, "failed to acquire DECO 0\n"); 128 dev_err(ctrldev, "failed to acquire DECO 0\n");
126 clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE); 129 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
127 return -ENODEV; 130 return -ENODEV;
128 } 131 }
129 132
130 for (i = 0; i < desc_len(desc); i++) 133 for (i = 0; i < desc_len(desc); i++)
131 wr_reg32(&deco->descbuf[i], *(desc + i)); 134 wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
132 135
133 flags = DECO_JQCR_WHL; 136 flags = DECO_JQCR_WHL;
134 /* 137 /*
@@ -139,7 +142,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
139 flags |= DECO_JQCR_FOUR; 142 flags |= DECO_JQCR_FOUR;
140 143
141 /* Instruct the DECO to execute it */ 144 /* Instruct the DECO to execute it */
142 setbits32(&deco->jr_ctl_hi, flags); 145 clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
143 146
144 timeout = 10000000; 147 timeout = 10000000;
145 do { 148 do {
@@ -158,10 +161,10 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
158 DECO_OP_STATUS_HI_ERR_MASK; 161 DECO_OP_STATUS_HI_ERR_MASK;
159 162
160 if (ctrlpriv->virt_en == 1) 163 if (ctrlpriv->virt_en == 1)
161 clrbits32(&ctrl->deco_rsr, DECORSR_JR0); 164 clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
162 165
163 /* Mark the DECO as free */ 166 /* Mark the DECO as free */
164 clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE); 167 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
165 168
166 if (!timeout) 169 if (!timeout)
167 return -EAGAIN; 170 return -EAGAIN;
@@ -349,7 +352,7 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
349 r4tst = &ctrl->r4tst[0]; 352 r4tst = &ctrl->r4tst[0];
350 353
351 /* put RNG4 into program mode */ 354 /* put RNG4 into program mode */
352 setbits32(&r4tst->rtmctl, RTMCTL_PRGM); 355 clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
353 356
354 /* 357 /*
355 * Performance-wise, it does not make sense to 358 * Performance-wise, it does not make sense to
@@ -363,7 +366,7 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
363 >> RTSDCTL_ENT_DLY_SHIFT; 366 >> RTSDCTL_ENT_DLY_SHIFT;
364 if (ent_delay <= val) { 367 if (ent_delay <= val) {
365 /* put RNG4 into run mode */ 368 /* put RNG4 into run mode */
366 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); 369 clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
367 return; 370 return;
368 } 371 }
369 372
@@ -381,9 +384,9 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
381 * select raw sampling in both entropy shifter 384 * select raw sampling in both entropy shifter
382 * and statistical checker 385 * and statistical checker
383 */ 386 */
384 setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC); 387 clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
385 /* put RNG4 into run mode */ 388 /* put RNG4 into run mode */
386 clrbits32(&val, RTMCTL_PRGM); 389 clrsetbits_32(&val, RTMCTL_PRGM, 0);
387 /* write back the control register */ 390 /* write back the control register */
388 wr_reg32(&r4tst->rtmctl, val); 391 wr_reg32(&r4tst->rtmctl, val);
389} 392}
@@ -406,6 +409,23 @@ int caam_get_era(void)
406} 409}
407EXPORT_SYMBOL(caam_get_era); 410EXPORT_SYMBOL(caam_get_era);
408 411
412#ifdef CONFIG_DEBUG_FS
413static int caam_debugfs_u64_get(void *data, u64 *val)
414{
415 *val = caam64_to_cpu(*(u64 *)data);
416 return 0;
417}
418
419static int caam_debugfs_u32_get(void *data, u64 *val)
420{
421 *val = caam32_to_cpu(*(u32 *)data);
422 return 0;
423}
424
425DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
426DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
427#endif
428
409/* Probe routine for CAAM top (controller) level */ 429/* Probe routine for CAAM top (controller) level */
410static int caam_probe(struct platform_device *pdev) 430static int caam_probe(struct platform_device *pdev)
411{ 431{
@@ -504,6 +524,10 @@ static int caam_probe(struct platform_device *pdev)
504 ret = -ENOMEM; 524 ret = -ENOMEM;
505 goto disable_caam_emi_slow; 525 goto disable_caam_emi_slow;
506 } 526 }
527
528 caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
529 (CSTA_PLEND | CSTA_ALT_PLEND));
530
507 /* Finding the page size for using the CTPR_MS register */ 531 /* Finding the page size for using the CTPR_MS register */
508 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms); 532 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
509 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; 533 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
@@ -559,9 +583,9 @@ static int caam_probe(struct platform_device *pdev)
559 } 583 }
560 584
561 if (ctrlpriv->virt_en == 1) 585 if (ctrlpriv->virt_en == 1)
562 setbits32(&ctrl->jrstart, JRSTART_JR0_START | 586 clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
563 JRSTART_JR1_START | JRSTART_JR2_START | 587 JRSTART_JR1_START | JRSTART_JR2_START |
564 JRSTART_JR3_START); 588 JRSTART_JR3_START);
565 589
566 if (sizeof(dma_addr_t) == sizeof(u64)) 590 if (sizeof(dma_addr_t) == sizeof(u64))
567 if (of_device_is_compatible(nprop, "fsl,sec-v5.0")) 591 if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
@@ -693,7 +717,7 @@ static int caam_probe(struct platform_device *pdev)
693 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK; 717 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
694 718
695 /* Enable RDB bit so that RNG works faster */ 719 /* Enable RDB bit so that RNG works faster */
696 setbits32(&ctrl->scfgr, SCFGR_RDBENABLE); 720 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
697 } 721 }
698 722
699 /* NOTE: RTIC detection ought to go here, around Si time */ 723 /* NOTE: RTIC detection ought to go here, around Si time */
@@ -719,48 +743,59 @@ static int caam_probe(struct platform_device *pdev)
719 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); 743 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
720 744
721 /* Controller-level - performance monitor counters */ 745 /* Controller-level - performance monitor counters */
746
722 ctrlpriv->ctl_rq_dequeued = 747 ctrlpriv->ctl_rq_dequeued =
723 debugfs_create_u64("rq_dequeued", 748 debugfs_create_file("rq_dequeued",
724 S_IRUSR | S_IRGRP | S_IROTH, 749 S_IRUSR | S_IRGRP | S_IROTH,
725 ctrlpriv->ctl, &perfmon->req_dequeued); 750 ctrlpriv->ctl, &perfmon->req_dequeued,
751 &caam_fops_u64_ro);
726 ctrlpriv->ctl_ob_enc_req = 752 ctrlpriv->ctl_ob_enc_req =
727 debugfs_create_u64("ob_rq_encrypted", 753 debugfs_create_file("ob_rq_encrypted",
728 S_IRUSR | S_IRGRP | S_IROTH, 754 S_IRUSR | S_IRGRP | S_IROTH,
729 ctrlpriv->ctl, &perfmon->ob_enc_req); 755 ctrlpriv->ctl, &perfmon->ob_enc_req,
756 &caam_fops_u64_ro);
730 ctrlpriv->ctl_ib_dec_req = 757 ctrlpriv->ctl_ib_dec_req =
731 debugfs_create_u64("ib_rq_decrypted", 758 debugfs_create_file("ib_rq_decrypted",
732 S_IRUSR | S_IRGRP | S_IROTH, 759 S_IRUSR | S_IRGRP | S_IROTH,
733 ctrlpriv->ctl, &perfmon->ib_dec_req); 760 ctrlpriv->ctl, &perfmon->ib_dec_req,
761 &caam_fops_u64_ro);
734 ctrlpriv->ctl_ob_enc_bytes = 762 ctrlpriv->ctl_ob_enc_bytes =
735 debugfs_create_u64("ob_bytes_encrypted", 763 debugfs_create_file("ob_bytes_encrypted",
736 S_IRUSR | S_IRGRP | S_IROTH, 764 S_IRUSR | S_IRGRP | S_IROTH,
737 ctrlpriv->ctl, &perfmon->ob_enc_bytes); 765 ctrlpriv->ctl, &perfmon->ob_enc_bytes,
766 &caam_fops_u64_ro);
738 ctrlpriv->ctl_ob_prot_bytes = 767 ctrlpriv->ctl_ob_prot_bytes =
739 debugfs_create_u64("ob_bytes_protected", 768 debugfs_create_file("ob_bytes_protected",
740 S_IRUSR | S_IRGRP | S_IROTH, 769 S_IRUSR | S_IRGRP | S_IROTH,
741 ctrlpriv->ctl, &perfmon->ob_prot_bytes); 770 ctrlpriv->ctl, &perfmon->ob_prot_bytes,
771 &caam_fops_u64_ro);
742 ctrlpriv->ctl_ib_dec_bytes = 772 ctrlpriv->ctl_ib_dec_bytes =
743 debugfs_create_u64("ib_bytes_decrypted", 773 debugfs_create_file("ib_bytes_decrypted",
744 S_IRUSR | S_IRGRP | S_IROTH, 774 S_IRUSR | S_IRGRP | S_IROTH,
745 ctrlpriv->ctl, &perfmon->ib_dec_bytes); 775 ctrlpriv->ctl, &perfmon->ib_dec_bytes,
776 &caam_fops_u64_ro);
746 ctrlpriv->ctl_ib_valid_bytes = 777 ctrlpriv->ctl_ib_valid_bytes =
747 debugfs_create_u64("ib_bytes_validated", 778 debugfs_create_file("ib_bytes_validated",
748 S_IRUSR | S_IRGRP | S_IROTH, 779 S_IRUSR | S_IRGRP | S_IROTH,
749 ctrlpriv->ctl, &perfmon->ib_valid_bytes); 780 ctrlpriv->ctl, &perfmon->ib_valid_bytes,
781 &caam_fops_u64_ro);
750 782
751 /* Controller level - global status values */ 783 /* Controller level - global status values */
752 ctrlpriv->ctl_faultaddr = 784 ctrlpriv->ctl_faultaddr =
753 debugfs_create_u64("fault_addr", 785 debugfs_create_file("fault_addr",
754 S_IRUSR | S_IRGRP | S_IROTH, 786 S_IRUSR | S_IRGRP | S_IROTH,
755 ctrlpriv->ctl, &perfmon->faultaddr); 787 ctrlpriv->ctl, &perfmon->faultaddr,
788 &caam_fops_u32_ro);
756 ctrlpriv->ctl_faultdetail = 789 ctrlpriv->ctl_faultdetail =
757 debugfs_create_u32("fault_detail", 790 debugfs_create_file("fault_detail",
758 S_IRUSR | S_IRGRP | S_IROTH, 791 S_IRUSR | S_IRGRP | S_IROTH,
759 ctrlpriv->ctl, &perfmon->faultdetail); 792 ctrlpriv->ctl, &perfmon->faultdetail,
793 &caam_fops_u32_ro);
760 ctrlpriv->ctl_faultstatus = 794 ctrlpriv->ctl_faultstatus =
761 debugfs_create_u32("fault_status", 795 debugfs_create_file("fault_status",
762 S_IRUSR | S_IRGRP | S_IROTH, 796 S_IRUSR | S_IRGRP | S_IROTH,
763 ctrlpriv->ctl, &perfmon->status); 797 ctrlpriv->ctl, &perfmon->status,
798 &caam_fops_u32_ro);
764 799
765 /* Internal covering keys (useful in non-secure mode only) */ 800 /* Internal covering keys (useful in non-secure mode only) */
766 ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0]; 801 ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 1e93c6af2275..26427c11ad87 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -20,19 +20,18 @@
20#define SEC4_SG_BPID_MASK 0x000000ff 20#define SEC4_SG_BPID_MASK 0x000000ff
21#define SEC4_SG_BPID_SHIFT 16 21#define SEC4_SG_BPID_SHIFT 16
22#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ 22#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
23#define SEC4_SG_OFFS_MASK 0x00001fff 23#define SEC4_SG_OFFSET_MASK 0x00001fff
24 24
25struct sec4_sg_entry { 25struct sec4_sg_entry {
26#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX 26#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \
27 defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
27 u32 rsvd1; 28 u32 rsvd1;
28 dma_addr_t ptr; 29 dma_addr_t ptr;
29#else 30#else
30 u64 ptr; 31 u64 ptr;
31#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */ 32#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
32 u32 len; 33 u32 len;
33 u8 rsvd2; 34 u32 bpid_offset;
34 u8 buf_pool_id;
35 u16 offset;
36}; 35};
37 36
38/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */ 37/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
@@ -454,6 +453,8 @@ struct sec4_sg_entry {
454#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT) 453#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
455#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT) 454#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
456#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) 455#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
456#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
457#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
457 458
458/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ 459/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
459#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) 460#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 98d07de24fc4..d3869b95e7b1 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include "desc.h" 7#include "desc.h"
8#include "regs.h"
8 9
9#define IMMEDIATE (1 << 23) 10#define IMMEDIATE (1 << 23)
10#define CAAM_CMD_SZ sizeof(u32) 11#define CAAM_CMD_SZ sizeof(u32)
@@ -30,9 +31,11 @@
30 LDST_SRCDST_WORD_DECOCTRL | \ 31 LDST_SRCDST_WORD_DECOCTRL | \
31 (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT)) 32 (LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
32 33
34extern bool caam_little_end;
35
33static inline int desc_len(u32 *desc) 36static inline int desc_len(u32 *desc)
34{ 37{
35 return *desc & HDR_DESCLEN_MASK; 38 return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
36} 39}
37 40
38static inline int desc_bytes(void *desc) 41static inline int desc_bytes(void *desc)
@@ -52,7 +55,7 @@ static inline void *sh_desc_pdb(u32 *desc)
52 55
53static inline void init_desc(u32 *desc, u32 options) 56static inline void init_desc(u32 *desc, u32 options)
54{ 57{
55 *desc = (options | HDR_ONE) + 1; 58 *desc = cpu_to_caam32((options | HDR_ONE) + 1);
56} 59}
57 60
58static inline void init_sh_desc(u32 *desc, u32 options) 61static inline void init_sh_desc(u32 *desc, u32 options)
@@ -74,13 +77,21 @@ static inline void init_job_desc(u32 *desc, u32 options)
74 init_desc(desc, CMD_DESC_HDR | options); 77 init_desc(desc, CMD_DESC_HDR | options);
75} 78}
76 79
80static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
81{
82 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
83
84 init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
85}
86
77static inline void append_ptr(u32 *desc, dma_addr_t ptr) 87static inline void append_ptr(u32 *desc, dma_addr_t ptr)
78{ 88{
79 dma_addr_t *offset = (dma_addr_t *)desc_end(desc); 89 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
80 90
81 *offset = ptr; 91 *offset = cpu_to_caam_dma(ptr);
82 92
83 (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ; 93 (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
94 CAAM_PTR_SZ / CAAM_CMD_SZ);
84} 95}
85 96
86static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len, 97static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
@@ -99,16 +110,17 @@ static inline void append_data(u32 *desc, void *data, int len)
99 if (len) /* avoid sparse warning: memcpy with byte count of 0 */ 110 if (len) /* avoid sparse warning: memcpy with byte count of 0 */
100 memcpy(offset, data, len); 111 memcpy(offset, data, len);
101 112
102 (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; 113 (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
114 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
103} 115}
104 116
105static inline void append_cmd(u32 *desc, u32 command) 117static inline void append_cmd(u32 *desc, u32 command)
106{ 118{
107 u32 *cmd = desc_end(desc); 119 u32 *cmd = desc_end(desc);
108 120
109 *cmd = command; 121 *cmd = cpu_to_caam32(command);
110 122
111 (*desc)++; 123 (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 1);
112} 124}
113 125
114#define append_u32 append_cmd 126#define append_u32 append_cmd
@@ -117,16 +129,22 @@ static inline void append_u64(u32 *desc, u64 data)
117{ 129{
118 u32 *offset = desc_end(desc); 130 u32 *offset = desc_end(desc);
119 131
120 *offset = upper_32_bits(data); 132 /* Only 32-bit alignment is guaranteed in descriptor buffer */
121 *(++offset) = lower_32_bits(data); 133 if (caam_little_end) {
134 *offset = cpu_to_caam32(lower_32_bits(data));
135 *(++offset) = cpu_to_caam32(upper_32_bits(data));
136 } else {
137 *offset = cpu_to_caam32(upper_32_bits(data));
138 *(++offset) = cpu_to_caam32(lower_32_bits(data));
139 }
122 140
123 (*desc) += 2; 141 (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 2);
124} 142}
125 143
126/* Write command without affecting header, and return pointer to next word */ 144/* Write command without affecting header, and return pointer to next word */
127static inline u32 *write_cmd(u32 *desc, u32 command) 145static inline u32 *write_cmd(u32 *desc, u32 command)
128{ 146{
129 *desc = command; 147 *desc = cpu_to_caam32(command);
130 148
131 return desc + 1; 149 return desc + 1;
132} 150}
@@ -168,14 +186,17 @@ APPEND_CMD_RET(move, MOVE)
168 186
169static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) 187static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
170{ 188{
171 *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc)); 189 *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
190 (desc_len(desc) - (jump_cmd - desc)));
172} 191}
173 192
174static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd) 193static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
175{ 194{
176 *move_cmd &= ~MOVE_OFFSET_MASK; 195 u32 val = caam32_to_cpu(*move_cmd);
177 *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & 196
178 MOVE_OFFSET_MASK); 197 val &= ~MOVE_OFFSET_MASK;
198 val |= (desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
199 *move_cmd = cpu_to_caam32(val);
179} 200}
180 201
181#define APPEND_CMD(cmd, op) \ 202#define APPEND_CMD(cmd, op) \
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 5ef4be22eb80..a81f551ac222 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -31,7 +31,7 @@ static int caam_reset_hw_jr(struct device *dev)
31 * mask interrupts since we are going to poll 31 * mask interrupts since we are going to poll
32 * for reset completion status 32 * for reset completion status
33 */ 33 */
34 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 34 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
35 35
36 /* initiate flush (required prior to reset) */ 36 /* initiate flush (required prior to reset) */
37 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); 37 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
@@ -57,7 +57,7 @@ static int caam_reset_hw_jr(struct device *dev)
57 } 57 }
58 58
59 /* unmask interrupts */ 59 /* unmask interrupts */
60 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 60 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
61 61
62 return 0; 62 return 0;
63} 63}
@@ -147,7 +147,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
147 } 147 }
148 148
149 /* mask valid interrupts */ 149 /* mask valid interrupts */
150 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 150 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
151 151
152 /* Have valid interrupt at this point, just ACK and trigger */ 152 /* Have valid interrupt at this point, just ACK and trigger */
153 wr_reg32(&jrp->rregs->jrintstatus, irqstate); 153 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
@@ -182,7 +182,7 @@ static void caam_jr_dequeue(unsigned long devarg)
182 sw_idx = (tail + i) & (JOBR_DEPTH - 1); 182 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
183 183
184 if (jrp->outring[hw_idx].desc == 184 if (jrp->outring[hw_idx].desc ==
185 jrp->entinfo[sw_idx].desc_addr_dma) 185 caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
186 break; /* found */ 186 break; /* found */
187 } 187 }
188 /* we should never fail to find a matching descriptor */ 188 /* we should never fail to find a matching descriptor */
@@ -200,7 +200,7 @@ static void caam_jr_dequeue(unsigned long devarg)
200 usercall = jrp->entinfo[sw_idx].callbk; 200 usercall = jrp->entinfo[sw_idx].callbk;
201 userarg = jrp->entinfo[sw_idx].cbkarg; 201 userarg = jrp->entinfo[sw_idx].cbkarg;
202 userdesc = jrp->entinfo[sw_idx].desc_addr_virt; 202 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
203 userstatus = jrp->outring[hw_idx].jrstatus; 203 userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
204 204
205 /* 205 /*
206 * Make sure all information from the job has been obtained 206 * Make sure all information from the job has been obtained
@@ -236,7 +236,7 @@ static void caam_jr_dequeue(unsigned long devarg)
236 } 236 }
237 237
238 /* reenable / unmask IRQs */ 238 /* reenable / unmask IRQs */
239 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 239 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
240} 240}
241 241
242/** 242/**
@@ -330,7 +330,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
330 int head, tail, desc_size; 330 int head, tail, desc_size;
331 dma_addr_t desc_dma; 331 dma_addr_t desc_dma;
332 332
333 desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32); 333 desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
334 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); 334 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
335 if (dma_mapping_error(dev, desc_dma)) { 335 if (dma_mapping_error(dev, desc_dma)) {
336 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); 336 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
@@ -356,7 +356,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
356 head_entry->cbkarg = areq; 356 head_entry->cbkarg = areq;
357 head_entry->desc_addr_dma = desc_dma; 357 head_entry->desc_addr_dma = desc_dma;
358 358
359 jrp->inpring[jrp->inp_ring_write_index] = desc_dma; 359 jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
360 360
361 /* 361 /*
362 * Guarantee that the descriptor's DMA address has been written to 362 * Guarantee that the descriptor's DMA address has been written to
@@ -444,9 +444,9 @@ static int caam_jr_init(struct device *dev)
444 spin_lock_init(&jrp->outlock); 444 spin_lock_init(&jrp->outlock);
445 445
446 /* Select interrupt coalescing parameters */ 446 /* Select interrupt coalescing parameters */
447 setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC | 447 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
448 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | 448 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
449 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); 449 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
450 450
451 return 0; 451 return 0;
452 452
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 3a87c0cf879a..aaa00dd1c601 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -1,18 +1,19 @@
1/* 1/*
2 * CAAM Protocol Data Block (PDB) definition header file 2 * CAAM Protocol Data Block (PDB) definition header file
3 * 3 *
4 * Copyright 2008-2012 Freescale Semiconductor, Inc. 4 * Copyright 2008-2016 Freescale Semiconductor, Inc.
5 * 5 *
6 */ 6 */
7 7
8#ifndef CAAM_PDB_H 8#ifndef CAAM_PDB_H
9#define CAAM_PDB_H 9#define CAAM_PDB_H
10#include "compat.h"
10 11
11/* 12/*
12 * PDB- IPSec ESP Header Modification Options 13 * PDB- IPSec ESP Header Modification Options
13 */ 14 */
14#define PDBHMO_ESP_DECAP_SHIFT 12 15#define PDBHMO_ESP_DECAP_SHIFT 28
15#define PDBHMO_ESP_ENCAP_SHIFT 4 16#define PDBHMO_ESP_ENCAP_SHIFT 28
16/* 17/*
17 * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the 18 * Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the
18 * Options Byte IP version (IPvsn) field: 19 * Options Byte IP version (IPvsn) field:
@@ -32,12 +33,23 @@
32 */ 33 */
33#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT) 34#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
34 35
36#define PDBNH_ESP_ENCAP_SHIFT 16
37#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT)
38
39#define PDBHDRLEN_ESP_DECAP_SHIFT 16
40#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
41
42#define PDB_NH_OFFSET_SHIFT 8
43#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT)
44
35/* 45/*
36 * PDB - IPSec ESP Encap/Decap Options 46 * PDB - IPSec ESP Encap/Decap Options
37 */ 47 */
38#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */ 48#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */
39#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */ 49#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */
50#define PDBOPTS_ESP_ARS128 0x80 /* 128-entry antireplay window */
40#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */ 51#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */
52#define PDBOPTS_ESP_ARS_MASK 0xc0 /* antireplay window mask */
41#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */ 53#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */
42#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */ 54#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */
43#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */ 55#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */
@@ -54,35 +66,73 @@
54/* 66/*
55 * General IPSec encap/decap PDB definitions 67 * General IPSec encap/decap PDB definitions
56 */ 68 */
69
70/**
71 * ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
72 * @iv: 16-byte array initialization vector
73 */
57struct ipsec_encap_cbc { 74struct ipsec_encap_cbc {
58 u32 iv[4]; 75 u8 iv[16];
59}; 76};
60 77
78/**
79 * ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
80 * @ctr_nonce: 4-byte array nonce
81 * @ctr_initial: initial count constant
82 * @iv: initialization vector
83 */
61struct ipsec_encap_ctr { 84struct ipsec_encap_ctr {
62 u32 ctr_nonce; 85 u8 ctr_nonce[4];
63 u32 ctr_initial; 86 u32 ctr_initial;
64 u32 iv[2]; 87 u64 iv;
65}; 88};
66 89
90/**
91 * ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
92 * @salt: 3-byte array salt (lower 24 bits)
93 * @ccm_opt: CCM algorithm options - MSB-LSB description:
94 * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
95 * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
96 * ctr_flags (8b) - counter flags; constant equal to 0x3
97 * ctr_initial (16b) - initial count constant
98 * @iv: initialization vector
99 */
67struct ipsec_encap_ccm { 100struct ipsec_encap_ccm {
68 u32 salt; /* lower 24 bits */ 101 u8 salt[4];
69 u8 b0_flags; 102 u32 ccm_opt;
70 u8 ctr_flags; 103 u64 iv;
71 u16 ctr_initial;
72 u32 iv[2];
73}; 104};
74 105
106/**
107 * ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
108 * @salt: 3-byte array salt (lower 24 bits)
109 * @rsvd: reserved, do not use
110 * @iv: initialization vector
111 */
75struct ipsec_encap_gcm { 112struct ipsec_encap_gcm {
76 u32 salt; /* lower 24 bits */ 113 u8 salt[4];
77 u32 rsvd1; 114 u32 rsvd1;
78 u32 iv[2]; 115 u64 iv;
79}; 116};
80 117
118/**
119 * ipsec_encap_pdb - PDB for IPsec encapsulation
120 * @options: MSB-LSB description
121 * hmo (header manipulation options) - 4b
122 * reserved - 4b
123 * next header - 8b
124 * next header offset - 8b
125 * option flags (depend on selected algorithm) - 8b
126 * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
127 * @seq_num: IPsec sequence number
128 * @spi: IPsec SPI (Security Parameters Index)
129 * @ip_hdr_len: optional IP Header length (in bytes)
130 * reserved - 16b
131 * Opt. IP Hdr Len - 16b
132 * @ip_hdr: optional IP Header content
133 */
81struct ipsec_encap_pdb { 134struct ipsec_encap_pdb {
82 u8 hmo_rsvd; 135 u32 options;
83 u8 ip_nh;
84 u8 ip_nh_offset;
85 u8 options;
86 u32 seq_num_ext_hi; 136 u32 seq_num_ext_hi;
87 u32 seq_num; 137 u32 seq_num;
88 union { 138 union {
@@ -92,36 +142,65 @@ struct ipsec_encap_pdb {
92 struct ipsec_encap_gcm gcm; 142 struct ipsec_encap_gcm gcm;
93 }; 143 };
94 u32 spi; 144 u32 spi;
95 u16 rsvd1; 145 u32 ip_hdr_len;
96 u16 ip_hdr_len; 146 u32 ip_hdr[0];
97 u32 ip_hdr[0]; /* optional IP Header content */
98}; 147};
99 148
149/**
150 * ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
151 * @rsvd: reserved, do not use
152 */
100struct ipsec_decap_cbc { 153struct ipsec_decap_cbc {
101 u32 rsvd[2]; 154 u32 rsvd[2];
102}; 155};
103 156
157/**
158 * ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
159 * @ctr_nonce: 4-byte array nonce
160 * @ctr_initial: initial count constant
161 */
104struct ipsec_decap_ctr { 162struct ipsec_decap_ctr {
105 u32 salt; 163 u8 ctr_nonce[4];
106 u32 ctr_initial; 164 u32 ctr_initial;
107}; 165};
108 166
167/**
168 * ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
169 * @salt: 3-byte salt (lower 24 bits)
170 * @ccm_opt: CCM algorithm options - MSB-LSB description:
171 * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
172 * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
173 * ctr_flags (8b) - counter flags; constant equal to 0x3
174 * ctr_initial (16b) - initial count constant
175 */
109struct ipsec_decap_ccm { 176struct ipsec_decap_ccm {
110 u32 salt; 177 u8 salt[4];
111 u8 iv_flags; 178 u32 ccm_opt;
112 u8 ctr_flags;
113 u16 ctr_initial;
114}; 179};
115 180
181/**
182 * ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
183 * @salt: 4-byte salt
184 * @rsvd: reserved, do not use
185 */
116struct ipsec_decap_gcm { 186struct ipsec_decap_gcm {
117 u32 salt; 187 u8 salt[4];
118 u32 resvd; 188 u32 resvd;
119}; 189};
120 190
191/**
192 * ipsec_decap_pdb - PDB for IPsec decapsulation
193 * @options: MSB-LSB description
194 * hmo (header manipulation options) - 4b
195 * IP header length - 12b
196 * next header offset - 8b
197 * option flags (depend on selected algorithm) - 8b
198 * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
199 * @seq_num: IPsec sequence number
200 * @anti_replay: Anti-replay window; size depends on ARS (option flags)
201 */
121struct ipsec_decap_pdb { 202struct ipsec_decap_pdb {
122 u16 hmo_ip_hdr_len; 203 u32 options;
123 u8 ip_nh_offset;
124 u8 options;
125 union { 204 union {
126 struct ipsec_decap_cbc cbc; 205 struct ipsec_decap_cbc cbc;
127 struct ipsec_decap_ctr ctr; 206 struct ipsec_decap_ctr ctr;
@@ -130,8 +209,7 @@ struct ipsec_decap_pdb {
130 }; 209 };
131 u32 seq_num_ext_hi; 210 u32 seq_num_ext_hi;
132 u32 seq_num; 211 u32 seq_num;
133 u32 anti_replay[2]; 212 __be32 anti_replay[4];
134 u32 end_index[0];
135}; 213};
136 214
137/* 215/*
@@ -399,4 +477,52 @@ struct dsa_verify_pdb {
399 u8 *ab; /* only used if ECC processing */ 477 u8 *ab; /* only used if ECC processing */
400}; 478};
401 479
480/* RSA Protocol Data Block */
481#define RSA_PDB_SGF_SHIFT 28
482#define RSA_PDB_E_SHIFT 12
483#define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
484#define RSA_PDB_D_SHIFT 12
485#define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
486
487#define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
488#define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
489#define RSA_PRIV_PDB_SGF_F (0x4 << RSA_PDB_SGF_SHIFT)
490#define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
491
492#define RSA_PRIV_KEY_FRM_1 0
493
494/**
495 * RSA Encrypt Protocol Data Block
496 * @sgf: scatter-gather field
497 * @f_dma: dma address of input data
498 * @g_dma: dma address of encrypted output data
499 * @n_dma: dma address of RSA modulus
500 * @e_dma: dma address of RSA public exponent
501 * @f_len: length in octets of the input data
502 */
503struct rsa_pub_pdb {
504 u32 sgf;
505 dma_addr_t f_dma;
506 dma_addr_t g_dma;
507 dma_addr_t n_dma;
508 dma_addr_t e_dma;
509 u32 f_len;
510} __packed;
511
512/**
513 * RSA Decrypt PDB - Private Key Form #1
514 * @sgf: scatter-gather field
515 * @g_dma: dma address of encrypted input data
516 * @f_dma: dma address of output data
517 * @n_dma: dma address of RSA modulus
518 * @d_dma: dma address of RSA private exponent
519 */
520struct rsa_priv_f1_pdb {
521 u32 sgf;
522 dma_addr_t g_dma;
523 dma_addr_t f_dma;
524 dma_addr_t n_dma;
525 dma_addr_t d_dma;
526} __packed;
527
402#endif 528#endif
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
new file mode 100644
index 000000000000..4e4183e615ea
--- /dev/null
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -0,0 +1,36 @@
1/*
2 * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
3 *
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 *
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
8 */
9#include "caampkc.h"
10#include "desc_constr.h"
11
12/* Descriptor for RSA Public operation */
13void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
14{
15 init_job_desc_pdb(desc, 0, sizeof(*pdb));
16 append_cmd(desc, pdb->sgf);
17 append_ptr(desc, pdb->f_dma);
18 append_ptr(desc, pdb->g_dma);
19 append_ptr(desc, pdb->n_dma);
20 append_ptr(desc, pdb->e_dma);
21 append_cmd(desc, pdb->f_len);
22 append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSAENC_PUBKEY);
23}
24
25/* Descriptor for RSA Private operation - Private Key Form #1 */
26void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
27{
28 init_job_desc_pdb(desc, 0, sizeof(*pdb));
29 append_cmd(desc, pdb->sgf);
30 append_ptr(desc, pdb->g_dma);
31 append_ptr(desc, pdb->f_dma);
32 append_ptr(desc, pdb->n_dma);
33 append_ptr(desc, pdb->d_dma);
34 append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
35 RSA_PRIV_KEY_FRM_1);
36}
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 0ba9c40597dc..b3c5016f6458 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -8,6 +8,7 @@
8#define REGS_H 8#define REGS_H
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/bitops.h>
11#include <linux/io.h> 12#include <linux/io.h>
12 13
13/* 14/*
@@ -65,46 +66,56 @@
65 * 66 *
66 */ 67 */
67 68
68#ifdef CONFIG_ARM 69extern bool caam_little_end;
69/* These are common macros for Power, put here for ARM */
70#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
71#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
72 70
73#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a) 71#define caam_to_cpu(len) \
74#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a)) 72static inline u##len caam##len ## _to_cpu(u##len val) \
73{ \
74 if (caam_little_end) \
75 return le##len ## _to_cpu(val); \
76 else \
77 return be##len ## _to_cpu(val); \
78}
75 79
76#define out_le32(a, v) out_arch(l, le32, a, v) 80#define cpu_to_caam(len) \
77#define in_le32(a) in_arch(l, le32, a) 81static inline u##len cpu_to_caam##len(u##len val) \
82{ \
83 if (caam_little_end) \
84 return cpu_to_le##len(val); \
85 else \
86 return cpu_to_be##len(val); \
87}
78 88
79#define out_be32(a, v) out_arch(l, be32, a, v) 89caam_to_cpu(16)
80#define in_be32(a) in_arch(l, be32, a) 90caam_to_cpu(32)
91caam_to_cpu(64)
92cpu_to_caam(16)
93cpu_to_caam(32)
94cpu_to_caam(64)
81 95
82#define clrsetbits(type, addr, clear, set) \ 96static inline void wr_reg32(void __iomem *reg, u32 data)
83 out_##type((addr), (in_##type(addr) & ~(clear)) | (set)) 97{
98 if (caam_little_end)
99 iowrite32(data, reg);
100 else
101 iowrite32be(data, reg);
102}
84 103
85#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set) 104static inline u32 rd_reg32(void __iomem *reg)
86#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set) 105{
87#endif 106 if (caam_little_end)
107 return ioread32(reg);
88 108
89#ifdef __BIG_ENDIAN 109 return ioread32be(reg);
90#define wr_reg32(reg, data) out_be32(reg, data) 110}
91#define rd_reg32(reg) in_be32(reg) 111
92#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set) 112static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set)
93#ifdef CONFIG_64BIT 113{
94#define wr_reg64(reg, data) out_be64(reg, data) 114 if (caam_little_end)
95#define rd_reg64(reg) in_be64(reg) 115 iowrite32((ioread32(reg) & ~clear) | set, reg);
96#endif 116 else
97#else 117 iowrite32be((ioread32be(reg) & ~clear) | set, reg);
98#ifdef __LITTLE_ENDIAN 118}
99#define wr_reg32(reg, data) __raw_writel(data, reg)
100#define rd_reg32(reg) __raw_readl(reg)
101#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set)
102#ifdef CONFIG_64BIT
103#define wr_reg64(reg, data) __raw_writeq(data, reg)
104#define rd_reg64(reg) __raw_readq(reg)
105#endif
106#endif
107#endif
108 119
109/* 120/*
110 * The only users of these wr/rd_reg64 functions is the Job Ring (JR). 121 * The only users of these wr/rd_reg64 functions is the Job Ring (JR).
@@ -123,29 +134,67 @@
123 * base + 0x0000 : least-significant 32 bits 134 * base + 0x0000 : least-significant 32 bits
124 * base + 0x0004 : most-significant 32 bits 135 * base + 0x0004 : most-significant 32 bits
125 */ 136 */
137#ifdef CONFIG_64BIT
138static inline void wr_reg64(void __iomem *reg, u64 data)
139{
140 if (caam_little_end)
141 iowrite64(data, reg);
142 else
143 iowrite64be(data, reg);
144}
126 145
127#ifndef CONFIG_64BIT 146static inline u64 rd_reg64(void __iomem *reg)
128#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \
129 defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
130#define REG64_MS32(reg) ((u32 __iomem *)(reg))
131#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
132#else
133#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1)
134#define REG64_LS32(reg) ((u32 __iomem *)(reg))
135#endif
136
137static inline void wr_reg64(u64 __iomem *reg, u64 data)
138{ 147{
139 wr_reg32(REG64_MS32(reg), data >> 32); 148 if (caam_little_end)
140 wr_reg32(REG64_LS32(reg), data); 149 return ioread64(reg);
150 else
151 return ioread64be(reg);
141} 152}
142 153
143static inline u64 rd_reg64(u64 __iomem *reg) 154#else /* CONFIG_64BIT */
155static inline void wr_reg64(void __iomem *reg, u64 data)
144{ 156{
145 return ((u64)rd_reg32(REG64_MS32(reg)) << 32 | 157#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
146 (u64)rd_reg32(REG64_LS32(reg))); 158 if (caam_little_end) {
159 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
160 wr_reg32((u32 __iomem *)(reg), data);
161 } else
162#endif
163 {
164 wr_reg32((u32 __iomem *)(reg), data >> 32);
165 wr_reg32((u32 __iomem *)(reg) + 1, data);
166 }
147} 167}
168
169static inline u64 rd_reg64(void __iomem *reg)
170{
171#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
172 if (caam_little_end)
173 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
174 (u64)rd_reg32((u32 __iomem *)(reg)));
175 else
148#endif 176#endif
177 return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
178 (u64)rd_reg32((u32 __iomem *)(reg) + 1));
179}
180#endif /* CONFIG_64BIT */
181
182#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
183#ifdef CONFIG_SOC_IMX7D
184#define cpu_to_caam_dma(value) \
185 (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
186 (u64)cpu_to_caam32(upper_32_bits(value)))
187#define caam_dma_to_cpu(value) \
188 (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
189 (u64)caam32_to_cpu(upper_32_bits(value)))
190#else
191#define cpu_to_caam_dma(value) cpu_to_caam64(value)
192#define caam_dma_to_cpu(value) caam64_to_cpu(value)
193#endif /* CONFIG_SOC_IMX7D */
194#else
195#define cpu_to_caam_dma(value) cpu_to_caam32(value)
196#define caam_dma_to_cpu(value) caam32_to_cpu(value)
197#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
149 198
150/* 199/*
151 * jr_outentry 200 * jr_outentry
@@ -249,6 +298,8 @@ struct caam_perfmon {
249 u32 faultliodn; /* FALR - Fault Address LIODN */ 298 u32 faultliodn; /* FALR - Fault Address LIODN */
250 u32 faultdetail; /* FADR - Fault Addr Detail */ 299 u32 faultdetail; /* FADR - Fault Addr Detail */
251 u32 rsvd2; 300 u32 rsvd2;
301#define CSTA_PLEND BIT(10)
302#define CSTA_ALT_PLEND BIT(18)
252 u32 status; /* CSTA - CAAM Status */ 303 u32 status; /* CSTA - CAAM Status */
253 u64 rsvd3; 304 u64 rsvd3;
254 305
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 12ec6616e89d..19dc64fede0d 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -5,18 +5,19 @@
5 * 5 *
6 */ 6 */
7 7
8#include "regs.h"
9
8struct sec4_sg_entry; 10struct sec4_sg_entry;
9 11
10/* 12/*
11 * convert single dma address to h/w link table format 13 * convert single dma address to h/w link table format
12 */ 14 */
13static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, 15static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
14 dma_addr_t dma, u32 len, u32 offset) 16 dma_addr_t dma, u32 len, u16 offset)
15{ 17{
16 sec4_sg_ptr->ptr = dma; 18 sec4_sg_ptr->ptr = cpu_to_caam_dma(dma);
17 sec4_sg_ptr->len = len; 19 sec4_sg_ptr->len = cpu_to_caam32(len);
18 sec4_sg_ptr->buf_pool_id = 0; 20 sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
19 sec4_sg_ptr->offset = offset;
20#ifdef DEBUG 21#ifdef DEBUG
21 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", 22 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
22 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, 23 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
@@ -30,7 +31,7 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
30 */ 31 */
31static inline struct sec4_sg_entry * 32static inline struct sec4_sg_entry *
32sg_to_sec4_sg(struct scatterlist *sg, int sg_count, 33sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
33 struct sec4_sg_entry *sec4_sg_ptr, u32 offset) 34 struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
34{ 35{
35 while (sg_count) { 36 while (sg_count) {
36 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), 37 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
@@ -48,10 +49,10 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
48 */ 49 */
49static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, 50static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
50 struct sec4_sg_entry *sec4_sg_ptr, 51 struct sec4_sg_entry *sec4_sg_ptr,
51 u32 offset) 52 u16 offset)
52{ 53{
53 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); 54 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
54 sec4_sg_ptr->len |= SEC4_SG_LEN_FIN; 55 sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
55} 56}
56 57
57static inline struct sec4_sg_entry *sg_to_sec4_sg_len( 58static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 0d0d4529ee36..58a4244b4752 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -14,9 +14,8 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/scatterlist.h> 16#include <linux/scatterlist.h>
17#include <linux/crypto.h>
18#include <crypto/algapi.h>
19#include <crypto/aes.h> 17#include <crypto/aes.h>
18#include <crypto/internal/skcipher.h>
20#include <crypto/scatterwalk.h> 19#include <crypto/scatterwalk.h>
21 20
22#include "ccp-crypto.h" 21#include "ccp-crypto.h"
@@ -110,15 +109,12 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
110 ctx->u.aes.key_len = key_len / 2; 109 ctx->u.aes.key_len = key_len / 2;
111 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); 110 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
112 111
113 return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key, 112 return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
114 key_len);
115} 113}
116 114
117static int ccp_aes_xts_crypt(struct ablkcipher_request *req, 115static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
118 unsigned int encrypt) 116 unsigned int encrypt)
119{ 117{
120 struct crypto_tfm *tfm =
121 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
122 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 118 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
123 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); 119 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
124 unsigned int unit; 120 unsigned int unit;
@@ -146,14 +142,19 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
146 142
147 if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || 143 if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
148 (ctx->u.aes.key_len != AES_KEYSIZE_128)) { 144 (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
145 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
146
149 /* Use the fallback to process the request for any 147 /* Use the fallback to process the request for any
150 * unsupported unit sizes or key sizes 148 * unsupported unit sizes or key sizes
151 */ 149 */
152 ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher); 150 skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
153 ret = (encrypt) ? crypto_ablkcipher_encrypt(req) : 151 skcipher_request_set_callback(subreq, req->base.flags,
154 crypto_ablkcipher_decrypt(req); 152 NULL, NULL);
155 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); 153 skcipher_request_set_crypt(subreq, req->src, req->dst,
156 154 req->nbytes, req->info);
155 ret = encrypt ? crypto_skcipher_encrypt(subreq) :
156 crypto_skcipher_decrypt(subreq);
157 skcipher_request_zero(subreq);
157 return ret; 158 return ret;
158 } 159 }
159 160
@@ -192,23 +193,21 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
192static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm) 193static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
193{ 194{
194 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 195 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
195 struct crypto_ablkcipher *fallback_tfm; 196 struct crypto_skcipher *fallback_tfm;
196 197
197 ctx->complete = ccp_aes_xts_complete; 198 ctx->complete = ccp_aes_xts_complete;
198 ctx->u.aes.key_len = 0; 199 ctx->u.aes.key_len = 0;
199 200
200 fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0, 201 fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
201 CRYPTO_ALG_ASYNC | 202 CRYPTO_ALG_ASYNC |
202 CRYPTO_ALG_NEED_FALLBACK); 203 CRYPTO_ALG_NEED_FALLBACK);
203 if (IS_ERR(fallback_tfm)) { 204 if (IS_ERR(fallback_tfm)) {
204 pr_warn("could not load fallback driver %s\n", 205 pr_warn("could not load fallback driver xts(aes)\n");
205 crypto_tfm_alg_name(tfm));
206 return PTR_ERR(fallback_tfm); 206 return PTR_ERR(fallback_tfm);
207 } 207 }
208 ctx->u.aes.tfm_ablkcipher = fallback_tfm; 208 ctx->u.aes.tfm_skcipher = fallback_tfm;
209 209
210 tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) + 210 tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
211 fallback_tfm->base.crt_ablkcipher.reqsize;
212 211
213 return 0; 212 return 0;
214} 213}
@@ -217,9 +216,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
217{ 216{
218 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 217 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
219 218
220 if (ctx->u.aes.tfm_ablkcipher) 219 crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
221 crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher);
222 ctx->u.aes.tfm_ablkcipher = NULL;
223} 220}
224 221
225static int ccp_register_aes_xts_alg(struct list_head *head, 222static int ccp_register_aes_xts_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index a326ec20bfa8..8335b32e815e 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -17,7 +17,6 @@
17#include <linux/wait.h> 17#include <linux/wait.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/ccp.h> 19#include <linux/ccp.h>
20#include <linux/crypto.h>
21#include <crypto/algapi.h> 20#include <crypto/algapi.h>
22#include <crypto/aes.h> 21#include <crypto/aes.h>
23#include <crypto/ctr.h> 22#include <crypto/ctr.h>
@@ -69,7 +68,7 @@ static inline struct ccp_crypto_ahash_alg *
69/***** AES related defines *****/ 68/***** AES related defines *****/
70struct ccp_aes_ctx { 69struct ccp_aes_ctx {
71 /* Fallback cipher for XTS with unsupported unit sizes */ 70 /* Fallback cipher for XTS with unsupported unit sizes */
72 struct crypto_ablkcipher *tfm_ablkcipher; 71 struct crypto_skcipher *tfm_skcipher;
73 72
74 /* Cipher used to generate CMAC K1/K2 keys */ 73 /* Cipher used to generate CMAC K1/K2 keys */
75 struct crypto_cipher *tfm_cipher; 74 struct crypto_cipher *tfm_cipher;
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index e8ef9fd24a16..e373cc6557c6 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -31,22 +31,42 @@
31 31
32#include "cesa.h" 32#include "cesa.h"
33 33
34/* Limit of the crypto queue before reaching the backlog */
35#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
36
34static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA); 37static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
35module_param_named(allhwsupport, allhwsupport, int, 0444); 38module_param_named(allhwsupport, allhwsupport, int, 0444);
36MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)"); 39MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
37 40
38struct mv_cesa_dev *cesa_dev; 41struct mv_cesa_dev *cesa_dev;
39 42
40static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) 43struct crypto_async_request *
44mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
45 struct crypto_async_request **backlog)
41{ 46{
42 struct crypto_async_request *req, *backlog; 47 struct crypto_async_request *req;
48
49 *backlog = crypto_get_backlog(&engine->queue);
50 req = crypto_dequeue_request(&engine->queue);
51
52 if (!req)
53 return NULL;
54
55 return req;
56}
57
58static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
59{
60 struct crypto_async_request *req = NULL, *backlog = NULL;
43 struct mv_cesa_ctx *ctx; 61 struct mv_cesa_ctx *ctx;
44 62
45 spin_lock_bh(&cesa_dev->lock); 63
46 backlog = crypto_get_backlog(&cesa_dev->queue); 64 spin_lock_bh(&engine->lock);
47 req = crypto_dequeue_request(&cesa_dev->queue); 65 if (!engine->req) {
48 engine->req = req; 66 req = mv_cesa_dequeue_req_locked(engine, &backlog);
49 spin_unlock_bh(&cesa_dev->lock); 67 engine->req = req;
68 }
69 spin_unlock_bh(&engine->lock);
50 70
51 if (!req) 71 if (!req)
52 return; 72 return;
@@ -55,8 +75,47 @@ static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine)
55 backlog->complete(backlog, -EINPROGRESS); 75 backlog->complete(backlog, -EINPROGRESS);
56 76
57 ctx = crypto_tfm_ctx(req->tfm); 77 ctx = crypto_tfm_ctx(req->tfm);
58 ctx->ops->prepare(req, engine);
59 ctx->ops->step(req); 78 ctx->ops->step(req);
79
80 return;
81}
82
83static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
84{
85 struct crypto_async_request *req;
86 struct mv_cesa_ctx *ctx;
87 int res;
88
89 req = engine->req;
90 ctx = crypto_tfm_ctx(req->tfm);
91 res = ctx->ops->process(req, status);
92
93 if (res == 0) {
94 ctx->ops->complete(req);
95 mv_cesa_engine_enqueue_complete_request(engine, req);
96 } else if (res == -EINPROGRESS) {
97 ctx->ops->step(req);
98 }
99
100 return res;
101}
102
103static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
104{
105 if (engine->chain.first && engine->chain.last)
106 return mv_cesa_tdma_process(engine, status);
107
108 return mv_cesa_std_process(engine, status);
109}
110
111static inline void
112mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
113 int res)
114{
115 ctx->ops->cleanup(req);
116 local_bh_disable();
117 req->complete(req, res);
118 local_bh_enable();
60} 119}
61 120
62static irqreturn_t mv_cesa_int(int irq, void *priv) 121static irqreturn_t mv_cesa_int(int irq, void *priv)
@@ -83,49 +142,54 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
83 writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS); 142 writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
84 writel(~status, engine->regs + CESA_SA_INT_STATUS); 143 writel(~status, engine->regs + CESA_SA_INT_STATUS);
85 144
145 /* Process fetched requests */
146 res = mv_cesa_int_process(engine, status & mask);
86 ret = IRQ_HANDLED; 147 ret = IRQ_HANDLED;
148
87 spin_lock_bh(&engine->lock); 149 spin_lock_bh(&engine->lock);
88 req = engine->req; 150 req = engine->req;
151 if (res != -EINPROGRESS)
152 engine->req = NULL;
89 spin_unlock_bh(&engine->lock); 153 spin_unlock_bh(&engine->lock);
90 if (req) { 154
91 ctx = crypto_tfm_ctx(req->tfm); 155 ctx = crypto_tfm_ctx(req->tfm);
92 res = ctx->ops->process(req, status & mask); 156
93 if (res != -EINPROGRESS) { 157 if (res && res != -EINPROGRESS)
94 spin_lock_bh(&engine->lock); 158 mv_cesa_complete_req(ctx, req, res);
95 engine->req = NULL; 159
96 mv_cesa_dequeue_req_unlocked(engine); 160 /* Launch the next pending request */
97 spin_unlock_bh(&engine->lock); 161 mv_cesa_rearm_engine(engine);
98 ctx->ops->cleanup(req); 162
99 local_bh_disable(); 163 /* Iterate over the complete queue */
100 req->complete(req, res); 164 while (true) {
101 local_bh_enable(); 165 req = mv_cesa_engine_dequeue_complete_request(engine);
102 } else { 166 if (!req)
103 ctx->ops->step(req); 167 break;
104 } 168
169 mv_cesa_complete_req(ctx, req, 0);
105 } 170 }
106 } 171 }
107 172
108 return ret; 173 return ret;
109} 174}
110 175
111int mv_cesa_queue_req(struct crypto_async_request *req) 176int mv_cesa_queue_req(struct crypto_async_request *req,
177 struct mv_cesa_req *creq)
112{ 178{
113 int ret; 179 int ret;
114 int i; 180 struct mv_cesa_engine *engine = creq->engine;
181
182 spin_lock_bh(&engine->lock);
183 if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ)
184 mv_cesa_tdma_chain(engine, creq);
115 185
116 spin_lock_bh(&cesa_dev->lock); 186 ret = crypto_enqueue_request(&engine->queue, req);
117 ret = crypto_enqueue_request(&cesa_dev->queue, req); 187 spin_unlock_bh(&engine->lock);
118 spin_unlock_bh(&cesa_dev->lock);
119 188
120 if (ret != -EINPROGRESS) 189 if (ret != -EINPROGRESS)
121 return ret; 190 return ret;
122 191
123 for (i = 0; i < cesa_dev->caps->nengines; i++) { 192 mv_cesa_rearm_engine(engine);
124 spin_lock_bh(&cesa_dev->engines[i].lock);
125 if (!cesa_dev->engines[i].req)
126 mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]);
127 spin_unlock_bh(&cesa_dev->engines[i].lock);
128 }
129 193
130 return -EINPROGRESS; 194 return -EINPROGRESS;
131} 195}
@@ -309,6 +373,10 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
309 if (!dma->padding_pool) 373 if (!dma->padding_pool)
310 return -ENOMEM; 374 return -ENOMEM;
311 375
376 dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
377 if (!dma->iv_pool)
378 return -ENOMEM;
379
312 cesa->dma = dma; 380 cesa->dma = dma;
313 381
314 return 0; 382 return 0;
@@ -416,7 +484,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
416 return -ENOMEM; 484 return -ENOMEM;
417 485
418 spin_lock_init(&cesa->lock); 486 spin_lock_init(&cesa->lock);
419 crypto_init_queue(&cesa->queue, 50); 487
420 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 488 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
421 cesa->regs = devm_ioremap_resource(dev, res); 489 cesa->regs = devm_ioremap_resource(dev, res);
422 if (IS_ERR(cesa->regs)) 490 if (IS_ERR(cesa->regs))
@@ -489,6 +557,10 @@ static int mv_cesa_probe(struct platform_device *pdev)
489 engine); 557 engine);
490 if (ret) 558 if (ret)
491 goto err_cleanup; 559 goto err_cleanup;
560
561 crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
562 atomic_set(&engine->load, 0);
563 INIT_LIST_HEAD(&engine->complete_queue);
492 } 564 }
493 565
494 cesa_dev = cesa; 566 cesa_dev = cesa;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index 74071e45ada0..e423d33decd4 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -271,10 +271,13 @@ struct mv_cesa_op_ctx {
271/* TDMA descriptor flags */ 271/* TDMA descriptor flags */
272#define CESA_TDMA_DST_IN_SRAM BIT(31) 272#define CESA_TDMA_DST_IN_SRAM BIT(31)
273#define CESA_TDMA_SRC_IN_SRAM BIT(30) 273#define CESA_TDMA_SRC_IN_SRAM BIT(30)
274#define CESA_TDMA_TYPE_MSK GENMASK(29, 0) 274#define CESA_TDMA_END_OF_REQ BIT(29)
275#define CESA_TDMA_BREAK_CHAIN BIT(28)
276#define CESA_TDMA_TYPE_MSK GENMASK(27, 0)
275#define CESA_TDMA_DUMMY 0 277#define CESA_TDMA_DUMMY 0
276#define CESA_TDMA_DATA 1 278#define CESA_TDMA_DATA 1
277#define CESA_TDMA_OP 2 279#define CESA_TDMA_OP 2
280#define CESA_TDMA_IV 3
278 281
279/** 282/**
280 * struct mv_cesa_tdma_desc - TDMA descriptor 283 * struct mv_cesa_tdma_desc - TDMA descriptor
@@ -390,6 +393,7 @@ struct mv_cesa_dev_dma {
390 struct dma_pool *op_pool; 393 struct dma_pool *op_pool;
391 struct dma_pool *cache_pool; 394 struct dma_pool *cache_pool;
392 struct dma_pool *padding_pool; 395 struct dma_pool *padding_pool;
396 struct dma_pool *iv_pool;
393}; 397};
394 398
395/** 399/**
@@ -398,7 +402,6 @@ struct mv_cesa_dev_dma {
398 * @regs: device registers 402 * @regs: device registers
399 * @sram_size: usable SRAM size 403 * @sram_size: usable SRAM size
400 * @lock: device lock 404 * @lock: device lock
401 * @queue: crypto request queue
402 * @engines: array of engines 405 * @engines: array of engines
403 * @dma: dma pools 406 * @dma: dma pools
404 * 407 *
@@ -410,7 +413,6 @@ struct mv_cesa_dev {
410 struct device *dev; 413 struct device *dev;
411 unsigned int sram_size; 414 unsigned int sram_size;
412 spinlock_t lock; 415 spinlock_t lock;
413 struct crypto_queue queue;
414 struct mv_cesa_engine *engines; 416 struct mv_cesa_engine *engines;
415 struct mv_cesa_dev_dma *dma; 417 struct mv_cesa_dev_dma *dma;
416}; 418};
@@ -429,6 +431,11 @@ struct mv_cesa_dev {
429 * @int_mask: interrupt mask cache 431 * @int_mask: interrupt mask cache
430 * @pool: memory pool pointing to the memory region reserved in 432 * @pool: memory pool pointing to the memory region reserved in
431 * SRAM 433 * SRAM
434 * @queue: fifo of the pending crypto requests
435 * @load: engine load counter, useful for load balancing
436 * @chain: list of the current tdma descriptors being processed
437 * by this engine.
438 * @complete_queue: fifo of the processed requests by the engine
432 * 439 *
433 * Structure storing CESA engine information. 440 * Structure storing CESA engine information.
434 */ 441 */
@@ -444,23 +451,27 @@ struct mv_cesa_engine {
444 size_t max_req_len; 451 size_t max_req_len;
445 u32 int_mask; 452 u32 int_mask;
446 struct gen_pool *pool; 453 struct gen_pool *pool;
454 struct crypto_queue queue;
455 atomic_t load;
456 struct mv_cesa_tdma_chain chain;
457 struct list_head complete_queue;
447}; 458};
448 459
449/** 460/**
450 * struct mv_cesa_req_ops - CESA request operations 461 * struct mv_cesa_req_ops - CESA request operations
451 * @prepare: prepare a request to be executed on the specified engine
452 * @process: process a request chunk result (should return 0 if the 462 * @process: process a request chunk result (should return 0 if the
453 * operation, -EINPROGRESS if it needs more steps or an error 463 * operation, -EINPROGRESS if it needs more steps or an error
454 * code) 464 * code)
455 * @step: launch the crypto operation on the next chunk 465 * @step: launch the crypto operation on the next chunk
456 * @cleanup: cleanup the crypto request (release associated data) 466 * @cleanup: cleanup the crypto request (release associated data)
467 * @complete: complete the request, i.e copy result or context from sram when
468 * needed.
457 */ 469 */
458struct mv_cesa_req_ops { 470struct mv_cesa_req_ops {
459 void (*prepare)(struct crypto_async_request *req,
460 struct mv_cesa_engine *engine);
461 int (*process)(struct crypto_async_request *req, u32 status); 471 int (*process)(struct crypto_async_request *req, u32 status);
462 void (*step)(struct crypto_async_request *req); 472 void (*step)(struct crypto_async_request *req);
463 void (*cleanup)(struct crypto_async_request *req); 473 void (*cleanup)(struct crypto_async_request *req);
474 void (*complete)(struct crypto_async_request *req);
464}; 475};
465 476
466/** 477/**
@@ -507,21 +518,11 @@ enum mv_cesa_req_type {
507 518
508/** 519/**
509 * struct mv_cesa_req - CESA request 520 * struct mv_cesa_req - CESA request
510 * @type: request type
511 * @engine: engine associated with this request 521 * @engine: engine associated with this request
522 * @chain: list of tdma descriptors associated with this request
512 */ 523 */
513struct mv_cesa_req { 524struct mv_cesa_req {
514 enum mv_cesa_req_type type;
515 struct mv_cesa_engine *engine; 525 struct mv_cesa_engine *engine;
516};
517
518/**
519 * struct mv_cesa_tdma_req - CESA TDMA request
520 * @base: base information
521 * @chain: TDMA chain
522 */
523struct mv_cesa_tdma_req {
524 struct mv_cesa_req base;
525 struct mv_cesa_tdma_chain chain; 526 struct mv_cesa_tdma_chain chain;
526}; 527};
527 528
@@ -538,13 +539,11 @@ struct mv_cesa_sg_std_iter {
538 539
539/** 540/**
540 * struct mv_cesa_ablkcipher_std_req - cipher standard request 541 * struct mv_cesa_ablkcipher_std_req - cipher standard request
541 * @base: base information
542 * @op: operation context 542 * @op: operation context
543 * @offset: current operation offset 543 * @offset: current operation offset
544 * @size: size of the crypto operation 544 * @size: size of the crypto operation
545 */ 545 */
546struct mv_cesa_ablkcipher_std_req { 546struct mv_cesa_ablkcipher_std_req {
547 struct mv_cesa_req base;
548 struct mv_cesa_op_ctx op; 547 struct mv_cesa_op_ctx op;
549 unsigned int offset; 548 unsigned int offset;
550 unsigned int size; 549 unsigned int size;
@@ -558,34 +557,27 @@ struct mv_cesa_ablkcipher_std_req {
558 * @dst_nents: number of entries in the dest sg list 557 * @dst_nents: number of entries in the dest sg list
559 */ 558 */
560struct mv_cesa_ablkcipher_req { 559struct mv_cesa_ablkcipher_req {
561 union { 560 struct mv_cesa_req base;
562 struct mv_cesa_req base; 561 struct mv_cesa_ablkcipher_std_req std;
563 struct mv_cesa_tdma_req dma;
564 struct mv_cesa_ablkcipher_std_req std;
565 } req;
566 int src_nents; 562 int src_nents;
567 int dst_nents; 563 int dst_nents;
568}; 564};
569 565
570/** 566/**
571 * struct mv_cesa_ahash_std_req - standard hash request 567 * struct mv_cesa_ahash_std_req - standard hash request
572 * @base: base information
573 * @offset: current operation offset 568 * @offset: current operation offset
574 */ 569 */
575struct mv_cesa_ahash_std_req { 570struct mv_cesa_ahash_std_req {
576 struct mv_cesa_req base;
577 unsigned int offset; 571 unsigned int offset;
578}; 572};
579 573
580/** 574/**
581 * struct mv_cesa_ahash_dma_req - DMA hash request 575 * struct mv_cesa_ahash_dma_req - DMA hash request
582 * @base: base information
583 * @padding: padding buffer 576 * @padding: padding buffer
584 * @padding_dma: DMA address of the padding buffer 577 * @padding_dma: DMA address of the padding buffer
585 * @cache_dma: DMA address of the cache buffer 578 * @cache_dma: DMA address of the cache buffer
586 */ 579 */
587struct mv_cesa_ahash_dma_req { 580struct mv_cesa_ahash_dma_req {
588 struct mv_cesa_tdma_req base;
589 u8 *padding; 581 u8 *padding;
590 dma_addr_t padding_dma; 582 dma_addr_t padding_dma;
591 u8 *cache; 583 u8 *cache;
@@ -604,8 +596,8 @@ struct mv_cesa_ahash_dma_req {
604 * @state: hash state 596 * @state: hash state
605 */ 597 */
606struct mv_cesa_ahash_req { 598struct mv_cesa_ahash_req {
599 struct mv_cesa_req base;
607 union { 600 union {
608 struct mv_cesa_req base;
609 struct mv_cesa_ahash_dma_req dma; 601 struct mv_cesa_ahash_dma_req dma;
610 struct mv_cesa_ahash_std_req std; 602 struct mv_cesa_ahash_std_req std;
611 } req; 603 } req;
@@ -623,6 +615,35 @@ struct mv_cesa_ahash_req {
623 615
624extern struct mv_cesa_dev *cesa_dev; 616extern struct mv_cesa_dev *cesa_dev;
625 617
618
619static inline void
620mv_cesa_engine_enqueue_complete_request(struct mv_cesa_engine *engine,
621 struct crypto_async_request *req)
622{
623 list_add_tail(&req->list, &engine->complete_queue);
624}
625
626static inline struct crypto_async_request *
627mv_cesa_engine_dequeue_complete_request(struct mv_cesa_engine *engine)
628{
629 struct crypto_async_request *req;
630
631 req = list_first_entry_or_null(&engine->complete_queue,
632 struct crypto_async_request,
633 list);
634 if (req)
635 list_del(&req->list);
636
637 return req;
638}
639
640
641static inline enum mv_cesa_req_type
642mv_cesa_req_get_type(struct mv_cesa_req *req)
643{
644 return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ;
645}
646
626static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op, 647static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op,
627 u32 cfg, u32 mask) 648 u32 cfg, u32 mask)
628{ 649{
@@ -695,7 +716,32 @@ static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op)
695 CESA_SA_DESC_CFG_FIRST_FRAG; 716 CESA_SA_DESC_CFG_FIRST_FRAG;
696} 717}
697 718
698int mv_cesa_queue_req(struct crypto_async_request *req); 719int mv_cesa_queue_req(struct crypto_async_request *req,
720 struct mv_cesa_req *creq);
721
722struct crypto_async_request *
723mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
724 struct crypto_async_request **backlog);
725
726static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
727{
728 int i;
729 u32 min_load = U32_MAX;
730 struct mv_cesa_engine *selected = NULL;
731
732 for (i = 0; i < cesa_dev->caps->nengines; i++) {
733 struct mv_cesa_engine *engine = cesa_dev->engines + i;
734 u32 load = atomic_read(&engine->load);
735 if (load < min_load) {
736 min_load = load;
737 selected = engine;
738 }
739 }
740
741 atomic_add(weight, &selected->load);
742
743 return selected;
744}
699 745
700/* 746/*
701 * Helper function that indicates whether a crypto request needs to be 747 * Helper function that indicates whether a crypto request needs to be
@@ -765,9 +811,9 @@ static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter)
765 return iter->op_len; 811 return iter->op_len;
766} 812}
767 813
768void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq); 814void mv_cesa_dma_step(struct mv_cesa_req *dreq);
769 815
770static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq, 816static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq,
771 u32 status) 817 u32 status)
772{ 818{
773 if (!(status & CESA_SA_INT_ACC0_IDMA_DONE)) 819 if (!(status & CESA_SA_INT_ACC0_IDMA_DONE))
@@ -779,10 +825,13 @@ static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq,
779 return 0; 825 return 0;
780} 826}
781 827
782void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, 828void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
783 struct mv_cesa_engine *engine); 829 struct mv_cesa_engine *engine);
830void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq);
831void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
832 struct mv_cesa_req *dreq);
833int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status);
784 834
785void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq);
786 835
787static inline void 836static inline void
788mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain) 837mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
@@ -790,6 +839,9 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
790 memset(chain, 0, sizeof(*chain)); 839 memset(chain, 0, sizeof(*chain));
791} 840}
792 841
842int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
843 u32 size, u32 flags, gfp_t gfp_flags);
844
793struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, 845struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
794 const struct mv_cesa_op_ctx *op_templ, 846 const struct mv_cesa_op_ctx *op_templ,
795 bool skip_ctx, 847 bool skip_ctx,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index dcf1fceb9336..48df03a06066 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -70,25 +70,28 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
71 DMA_BIDIRECTIONAL); 71 DMA_BIDIRECTIONAL);
72 } 72 }
73 mv_cesa_dma_cleanup(&creq->req.dma); 73 mv_cesa_dma_cleanup(&creq->base);
74} 74}
75 75
76static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) 76static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
77{ 77{
78 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 78 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
79 79
80 if (creq->req.base.type == CESA_DMA_REQ) 80 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
81 mv_cesa_ablkcipher_dma_cleanup(req); 81 mv_cesa_ablkcipher_dma_cleanup(req);
82} 82}
83 83
84static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) 84static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
85{ 85{
86 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 86 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
87 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 87 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
88 struct mv_cesa_engine *engine = sreq->base.engine; 88 struct mv_cesa_engine *engine = creq->base.engine;
89 size_t len = min_t(size_t, req->nbytes - sreq->offset, 89 size_t len = min_t(size_t, req->nbytes - sreq->offset,
90 CESA_SA_SRAM_PAYLOAD_SIZE); 90 CESA_SA_SRAM_PAYLOAD_SIZE);
91 91
92 mv_cesa_adjust_op(engine, &sreq->op);
93 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
94
92 len = sg_pcopy_to_buffer(req->src, creq->src_nents, 95 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
93 engine->sram + CESA_SA_DATA_SRAM_OFFSET, 96 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
94 len, sreq->offset); 97 len, sreq->offset);
@@ -106,6 +109,8 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
106 109
107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 110 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 111 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
112 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
113 CESA_SA_CMD_EN_CESA_SA_ACCL0);
109 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 114 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
110} 115}
111 116
@@ -113,8 +118,8 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
113 u32 status) 118 u32 status)
114{ 119{
115 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 120 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
116 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 121 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
117 struct mv_cesa_engine *engine = sreq->base.engine; 122 struct mv_cesa_engine *engine = creq->base.engine;
118 size_t len; 123 size_t len;
119 124
120 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, 125 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
@@ -133,21 +138,19 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
133{ 138{
134 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 139 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
135 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 140 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
136 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 141 struct mv_cesa_req *basereq = &creq->base;
137 struct mv_cesa_engine *engine = sreq->base.engine; 142 unsigned int ivsize;
138 int ret; 143 int ret;
139 144
140 if (creq->req.base.type == CESA_DMA_REQ) 145 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
141 ret = mv_cesa_dma_process(&creq->req.dma, status); 146 return mv_cesa_ablkcipher_std_process(ablkreq, status);
142 else
143 ret = mv_cesa_ablkcipher_std_process(ablkreq, status);
144 147
148 ret = mv_cesa_dma_process(basereq, status);
145 if (ret) 149 if (ret)
146 return ret; 150 return ret;
147 151
148 memcpy_fromio(ablkreq->info, 152 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
149 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, 153 memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize);
150 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)));
151 154
152 return 0; 155 return 0;
153} 156}
@@ -157,8 +160,8 @@ static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
157 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 160 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
158 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 161 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
159 162
160 if (creq->req.base.type == CESA_DMA_REQ) 163 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
161 mv_cesa_dma_step(&creq->req.dma); 164 mv_cesa_dma_step(&creq->base);
162 else 165 else
163 mv_cesa_ablkcipher_std_step(ablkreq); 166 mv_cesa_ablkcipher_std_step(ablkreq);
164} 167}
@@ -167,22 +170,19 @@ static inline void
167mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) 170mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
168{ 171{
169 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 172 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
170 struct mv_cesa_tdma_req *dreq = &creq->req.dma; 173 struct mv_cesa_req *basereq = &creq->base;
171 174
172 mv_cesa_dma_prepare(dreq, dreq->base.engine); 175 mv_cesa_dma_prepare(basereq, basereq->engine);
173} 176}
174 177
175static inline void 178static inline void
176mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) 179mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
177{ 180{
178 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 181 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
179 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 182 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
180 struct mv_cesa_engine *engine = sreq->base.engine;
181 183
182 sreq->size = 0; 184 sreq->size = 0;
183 sreq->offset = 0; 185 sreq->offset = 0;
184 mv_cesa_adjust_op(engine, &sreq->op);
185 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
186} 186}
187 187
188static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, 188static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
@@ -190,9 +190,9 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
190{ 190{
191 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 191 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
192 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 192 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
193 creq->req.base.engine = engine; 193 creq->base.engine = engine;
194 194
195 if (creq->req.base.type == CESA_DMA_REQ) 195 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
196 mv_cesa_ablkcipher_dma_prepare(ablkreq); 196 mv_cesa_ablkcipher_dma_prepare(ablkreq);
197 else 197 else
198 mv_cesa_ablkcipher_std_prepare(ablkreq); 198 mv_cesa_ablkcipher_std_prepare(ablkreq);
@@ -206,11 +206,34 @@ mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
206 mv_cesa_ablkcipher_cleanup(ablkreq); 206 mv_cesa_ablkcipher_cleanup(ablkreq);
207} 207}
208 208
209static void
210mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
211{
212 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
213 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
214 struct mv_cesa_engine *engine = creq->base.engine;
215 unsigned int ivsize;
216
217 atomic_sub(ablkreq->nbytes, &engine->load);
218 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
219
220 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
221 struct mv_cesa_req *basereq;
222
223 basereq = &creq->base;
224 memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
225 } else {
226 memcpy_fromio(ablkreq->info,
227 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
228 ivsize);
229 }
230}
231
209static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { 232static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
210 .step = mv_cesa_ablkcipher_step, 233 .step = mv_cesa_ablkcipher_step,
211 .process = mv_cesa_ablkcipher_process, 234 .process = mv_cesa_ablkcipher_process,
212 .prepare = mv_cesa_ablkcipher_prepare,
213 .cleanup = mv_cesa_ablkcipher_req_cleanup, 235 .cleanup = mv_cesa_ablkcipher_req_cleanup,
236 .complete = mv_cesa_ablkcipher_complete,
214}; 237};
215 238
216static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) 239static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
@@ -295,15 +318,15 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
295 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 318 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
296 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 319 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
297 GFP_KERNEL : GFP_ATOMIC; 320 GFP_KERNEL : GFP_ATOMIC;
298 struct mv_cesa_tdma_req *dreq = &creq->req.dma; 321 struct mv_cesa_req *basereq = &creq->base;
299 struct mv_cesa_ablkcipher_dma_iter iter; 322 struct mv_cesa_ablkcipher_dma_iter iter;
300 struct mv_cesa_tdma_chain chain; 323 struct mv_cesa_tdma_chain chain;
301 bool skip_ctx = false; 324 bool skip_ctx = false;
302 int ret; 325 int ret;
326 unsigned int ivsize;
303 327
304 dreq->base.type = CESA_DMA_REQ; 328 basereq->chain.first = NULL;
305 dreq->chain.first = NULL; 329 basereq->chain.last = NULL;
306 dreq->chain.last = NULL;
307 330
308 if (req->src != req->dst) { 331 if (req->src != req->dst) {
309 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 332 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
@@ -358,12 +381,21 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
358 381
359 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); 382 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
360 383
361 dreq->chain = chain; 384 /* Add output data for IV */
385 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
386 ret = mv_cesa_dma_add_iv_op(&chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
387 ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
388
389 if (ret)
390 goto err_free_tdma;
391
392 basereq->chain = chain;
393 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
362 394
363 return 0; 395 return 0;
364 396
365err_free_tdma: 397err_free_tdma:
366 mv_cesa_dma_cleanup(dreq); 398 mv_cesa_dma_cleanup(basereq);
367 if (req->dst != req->src) 399 if (req->dst != req->src)
368 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, 400 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
369 DMA_FROM_DEVICE); 401 DMA_FROM_DEVICE);
@@ -380,11 +412,13 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
380 const struct mv_cesa_op_ctx *op_templ) 412 const struct mv_cesa_op_ctx *op_templ)
381{ 413{
382 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 414 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
383 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; 415 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
416 struct mv_cesa_req *basereq = &creq->base;
384 417
385 sreq->base.type = CESA_STD_REQ;
386 sreq->op = *op_templ; 418 sreq->op = *op_templ;
387 sreq->skip_ctx = false; 419 sreq->skip_ctx = false;
420 basereq->chain.first = NULL;
421 basereq->chain.last = NULL;
388 422
389 return 0; 423 return 0;
390} 424}
@@ -414,7 +448,6 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
414 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, 448 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
415 CESA_SA_DESC_CFG_OP_MSK); 449 CESA_SA_DESC_CFG_OP_MSK);
416 450
417 /* TODO: add a threshold for DMA usage */
418 if (cesa_dev->caps->has_tdma) 451 if (cesa_dev->caps->has_tdma)
419 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); 452 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
420 else 453 else
@@ -423,28 +456,41 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
423 return ret; 456 return ret;
424} 457}
425 458
426static int mv_cesa_des_op(struct ablkcipher_request *req, 459static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
427 struct mv_cesa_op_ctx *tmpl) 460 struct mv_cesa_op_ctx *tmpl)
428{ 461{
429 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
430 int ret; 462 int ret;
431 463 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
432 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, 464 struct mv_cesa_engine *engine;
433 CESA_SA_DESC_CFG_CRYPTM_MSK);
434
435 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
436 465
437 ret = mv_cesa_ablkcipher_req_init(req, tmpl); 466 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
438 if (ret) 467 if (ret)
439 return ret; 468 return ret;
440 469
441 ret = mv_cesa_queue_req(&req->base); 470 engine = mv_cesa_select_engine(req->nbytes);
471 mv_cesa_ablkcipher_prepare(&req->base, engine);
472
473 ret = mv_cesa_queue_req(&req->base, &creq->base);
474
442 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 475 if (mv_cesa_req_needs_cleanup(&req->base, ret))
443 mv_cesa_ablkcipher_cleanup(req); 476 mv_cesa_ablkcipher_cleanup(req);
444 477
445 return ret; 478 return ret;
446} 479}
447 480
481static int mv_cesa_des_op(struct ablkcipher_request *req,
482 struct mv_cesa_op_ctx *tmpl)
483{
484 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
485
486 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
487 CESA_SA_DESC_CFG_CRYPTM_MSK);
488
489 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
490
491 return mv_cesa_ablkcipher_queue_req(req, tmpl);
492}
493
448static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) 494static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
449{ 495{
450 struct mv_cesa_op_ctx tmpl; 496 struct mv_cesa_op_ctx tmpl;
@@ -547,22 +593,13 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
547 struct mv_cesa_op_ctx *tmpl) 593 struct mv_cesa_op_ctx *tmpl)
548{ 594{
549 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 595 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
550 int ret;
551 596
552 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, 597 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
553 CESA_SA_DESC_CFG_CRYPTM_MSK); 598 CESA_SA_DESC_CFG_CRYPTM_MSK);
554 599
555 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); 600 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
556 601
557 ret = mv_cesa_ablkcipher_req_init(req, tmpl); 602 return mv_cesa_ablkcipher_queue_req(req, tmpl);
558 if (ret)
559 return ret;
560
561 ret = mv_cesa_queue_req(&req->base);
562 if (mv_cesa_req_needs_cleanup(&req->base, ret))
563 mv_cesa_ablkcipher_cleanup(req);
564
565 return ret;
566} 603}
567 604
568static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) 605static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
@@ -673,7 +710,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
673 struct mv_cesa_op_ctx *tmpl) 710 struct mv_cesa_op_ctx *tmpl)
674{ 711{
675 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 712 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
676 int ret, i; 713 int i;
677 u32 *key; 714 u32 *key;
678 u32 cfg; 715 u32 cfg;
679 716
@@ -696,15 +733,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
696 CESA_SA_DESC_CFG_CRYPTM_MSK | 733 CESA_SA_DESC_CFG_CRYPTM_MSK |
697 CESA_SA_DESC_CFG_AES_LEN_MSK); 734 CESA_SA_DESC_CFG_AES_LEN_MSK);
698 735
699 ret = mv_cesa_ablkcipher_req_init(req, tmpl); 736 return mv_cesa_ablkcipher_queue_req(req, tmpl);
700 if (ret)
701 return ret;
702
703 ret = mv_cesa_queue_req(&req->base);
704 if (mv_cesa_req_needs_cleanup(&req->base, ret))
705 mv_cesa_ablkcipher_cleanup(req);
706
707 return ret;
708} 737}
709 738
710static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) 739static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 7a5058da9151..c35912b4fffb 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -103,14 +103,14 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
103 103
104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
105 mv_cesa_ahash_dma_free_cache(&creq->req.dma); 105 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
106 mv_cesa_dma_cleanup(&creq->req.dma.base); 106 mv_cesa_dma_cleanup(&creq->base);
107} 107}
108 108
109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) 109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
110{ 110{
111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
112 112
113 if (creq->req.base.type == CESA_DMA_REQ) 113 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
114 mv_cesa_ahash_dma_cleanup(req); 114 mv_cesa_ahash_dma_cleanup(req);
115} 115}
116 116
@@ -118,7 +118,7 @@ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
118{ 118{
119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
120 120
121 if (creq->req.base.type == CESA_DMA_REQ) 121 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
122 mv_cesa_ahash_dma_last_cleanup(req); 122 mv_cesa_ahash_dma_last_cleanup(req);
123} 123}
124 124
@@ -157,11 +157,23 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
157{ 157{
158 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 158 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
159 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 159 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
160 struct mv_cesa_engine *engine = sreq->base.engine; 160 struct mv_cesa_engine *engine = creq->base.engine;
161 struct mv_cesa_op_ctx *op; 161 struct mv_cesa_op_ctx *op;
162 unsigned int new_cache_ptr = 0; 162 unsigned int new_cache_ptr = 0;
163 u32 frag_mode; 163 u32 frag_mode;
164 size_t len; 164 size_t len;
165 unsigned int digsize;
166 int i;
167
168 mv_cesa_adjust_op(engine, &creq->op_tmpl);
169 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
170
171 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
172 for (i = 0; i < digsize / 4; i++)
173 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
174
175 mv_cesa_adjust_op(engine, &creq->op_tmpl);
176 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
165 177
166 if (creq->cache_ptr) 178 if (creq->cache_ptr)
167 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, 179 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
@@ -237,6 +249,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
237 249
238 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 250 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
239 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 251 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
252 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
253 CESA_SA_CMD_EN_CESA_SA_ACCL0);
240 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 254 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
241} 255}
242 256
@@ -254,20 +268,17 @@ static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
254static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) 268static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
255{ 269{
256 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 270 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
257 struct mv_cesa_tdma_req *dreq = &creq->req.dma.base; 271 struct mv_cesa_req *basereq = &creq->base;
258 272
259 mv_cesa_dma_prepare(dreq, dreq->base.engine); 273 mv_cesa_dma_prepare(basereq, basereq->engine);
260} 274}
261 275
262static void mv_cesa_ahash_std_prepare(struct ahash_request *req) 276static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
263{ 277{
264 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 278 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
265 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 279 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
266 struct mv_cesa_engine *engine = sreq->base.engine;
267 280
268 sreq->offset = 0; 281 sreq->offset = 0;
269 mv_cesa_adjust_op(engine, &creq->op_tmpl);
270 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
271} 282}
272 283
273static void mv_cesa_ahash_step(struct crypto_async_request *req) 284static void mv_cesa_ahash_step(struct crypto_async_request *req)
@@ -275,8 +286,8 @@ static void mv_cesa_ahash_step(struct crypto_async_request *req)
275 struct ahash_request *ahashreq = ahash_request_cast(req); 286 struct ahash_request *ahashreq = ahash_request_cast(req);
276 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 287 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
277 288
278 if (creq->req.base.type == CESA_DMA_REQ) 289 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
279 mv_cesa_dma_step(&creq->req.dma.base); 290 mv_cesa_dma_step(&creq->base);
280 else 291 else
281 mv_cesa_ahash_std_step(ahashreq); 292 mv_cesa_ahash_std_step(ahashreq);
282} 293}
@@ -285,17 +296,20 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
285{ 296{
286 struct ahash_request *ahashreq = ahash_request_cast(req); 297 struct ahash_request *ahashreq = ahash_request_cast(req);
287 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 298 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
288 struct mv_cesa_engine *engine = creq->req.base.engine;
289 unsigned int digsize;
290 int ret, i;
291 299
292 if (creq->req.base.type == CESA_DMA_REQ) 300 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
293 ret = mv_cesa_dma_process(&creq->req.dma.base, status); 301 return mv_cesa_dma_process(&creq->base, status);
294 else
295 ret = mv_cesa_ahash_std_process(ahashreq, status);
296 302
297 if (ret == -EINPROGRESS) 303 return mv_cesa_ahash_std_process(ahashreq, status);
298 return ret; 304}
305
306static void mv_cesa_ahash_complete(struct crypto_async_request *req)
307{
308 struct ahash_request *ahashreq = ahash_request_cast(req);
309 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
310 struct mv_cesa_engine *engine = creq->base.engine;
311 unsigned int digsize;
312 int i;
299 313
300 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 314 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
301 for (i = 0; i < digsize / 4; i++) 315 for (i = 0; i < digsize / 4; i++)
@@ -325,7 +339,7 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
325 } 339 }
326 } 340 }
327 341
328 return ret; 342 atomic_sub(ahashreq->nbytes, &engine->load);
329} 343}
330 344
331static void mv_cesa_ahash_prepare(struct crypto_async_request *req, 345static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
@@ -333,19 +347,13 @@ static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
333{ 347{
334 struct ahash_request *ahashreq = ahash_request_cast(req); 348 struct ahash_request *ahashreq = ahash_request_cast(req);
335 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 349 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
336 unsigned int digsize;
337 int i;
338 350
339 creq->req.base.engine = engine; 351 creq->base.engine = engine;
340 352
341 if (creq->req.base.type == CESA_DMA_REQ) 353 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
342 mv_cesa_ahash_dma_prepare(ahashreq); 354 mv_cesa_ahash_dma_prepare(ahashreq);
343 else 355 else
344 mv_cesa_ahash_std_prepare(ahashreq); 356 mv_cesa_ahash_std_prepare(ahashreq);
345
346 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
347 for (i = 0; i < digsize / 4; i++)
348 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
349} 357}
350 358
351static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 359static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
@@ -362,8 +370,8 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
362static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { 370static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
363 .step = mv_cesa_ahash_step, 371 .step = mv_cesa_ahash_step,
364 .process = mv_cesa_ahash_process, 372 .process = mv_cesa_ahash_process,
365 .prepare = mv_cesa_ahash_prepare,
366 .cleanup = mv_cesa_ahash_req_cleanup, 373 .cleanup = mv_cesa_ahash_req_cleanup,
374 .complete = mv_cesa_ahash_complete,
367}; 375};
368 376
369static int mv_cesa_ahash_init(struct ahash_request *req, 377static int mv_cesa_ahash_init(struct ahash_request *req,
@@ -553,15 +561,14 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
553 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 561 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
554 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 562 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
555 GFP_KERNEL : GFP_ATOMIC; 563 GFP_KERNEL : GFP_ATOMIC;
556 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 564 struct mv_cesa_req *basereq = &creq->base;
557 struct mv_cesa_tdma_req *dreq = &ahashdreq->base;
558 struct mv_cesa_ahash_dma_iter iter; 565 struct mv_cesa_ahash_dma_iter iter;
559 struct mv_cesa_op_ctx *op = NULL; 566 struct mv_cesa_op_ctx *op = NULL;
560 unsigned int frag_len; 567 unsigned int frag_len;
561 int ret; 568 int ret;
562 569
563 dreq->chain.first = NULL; 570 basereq->chain.first = NULL;
564 dreq->chain.last = NULL; 571 basereq->chain.last = NULL;
565 572
566 if (creq->src_nents) { 573 if (creq->src_nents) {
567 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 574 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
@@ -572,14 +579,14 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
572 } 579 }
573 } 580 }
574 581
575 mv_cesa_tdma_desc_iter_init(&dreq->chain); 582 mv_cesa_tdma_desc_iter_init(&basereq->chain);
576 mv_cesa_ahash_req_iter_init(&iter, req); 583 mv_cesa_ahash_req_iter_init(&iter, req);
577 584
578 /* 585 /*
579 * Add the cache (left-over data from a previous block) first. 586 * Add the cache (left-over data from a previous block) first.
580 * This will never overflow the SRAM size. 587 * This will never overflow the SRAM size.
581 */ 588 */
582 ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags); 589 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags);
583 if (ret) 590 if (ret)
584 goto err_free_tdma; 591 goto err_free_tdma;
585 592
@@ -590,7 +597,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
590 * data. We intentionally do not add the final op block. 597 * data. We intentionally do not add the final op block.
591 */ 598 */
592 while (true) { 599 while (true) {
593 ret = mv_cesa_dma_add_op_transfers(&dreq->chain, 600 ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
594 &iter.base, 601 &iter.base,
595 &iter.src, flags); 602 &iter.src, flags);
596 if (ret) 603 if (ret)
@@ -601,7 +608,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
601 if (!mv_cesa_ahash_req_iter_next_op(&iter)) 608 if (!mv_cesa_ahash_req_iter_next_op(&iter))
602 break; 609 break;
603 610
604 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl, 611 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
605 frag_len, flags); 612 frag_len, flags);
606 if (IS_ERR(op)) { 613 if (IS_ERR(op)) {
607 ret = PTR_ERR(op); 614 ret = PTR_ERR(op);
@@ -619,10 +626,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
619 * operation, which depends whether this is the final request. 626 * operation, which depends whether this is the final request.
620 */ 627 */
621 if (creq->last_req) 628 if (creq->last_req)
622 op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq, 629 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
623 frag_len, flags); 630 frag_len, flags);
624 else if (frag_len) 631 else if (frag_len)
625 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl, 632 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
626 frag_len, flags); 633 frag_len, flags);
627 634
628 if (IS_ERR(op)) { 635 if (IS_ERR(op)) {
@@ -632,7 +639,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
632 639
633 if (op) { 640 if (op) {
634 /* Add dummy desc to wait for crypto operation end */ 641 /* Add dummy desc to wait for crypto operation end */
635 ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags); 642 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
636 if (ret) 643 if (ret)
637 goto err_free_tdma; 644 goto err_free_tdma;
638 } 645 }
@@ -643,10 +650,13 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
643 else 650 else
644 creq->cache_ptr = 0; 651 creq->cache_ptr = 0;
645 652
653 basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
654 CESA_TDMA_BREAK_CHAIN);
655
646 return 0; 656 return 0;
647 657
648err_free_tdma: 658err_free_tdma:
649 mv_cesa_dma_cleanup(dreq); 659 mv_cesa_dma_cleanup(basereq);
650 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 660 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
651 661
652err: 662err:
@@ -660,11 +670,6 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
660 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 670 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
661 int ret; 671 int ret;
662 672
663 if (cesa_dev->caps->has_tdma)
664 creq->req.base.type = CESA_DMA_REQ;
665 else
666 creq->req.base.type = CESA_STD_REQ;
667
668 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 673 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
669 if (creq->src_nents < 0) { 674 if (creq->src_nents < 0) {
670 dev_err(cesa_dev->dev, "Invalid number of src SG"); 675 dev_err(cesa_dev->dev, "Invalid number of src SG");
@@ -678,19 +683,19 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
678 if (*cached) 683 if (*cached)
679 return 0; 684 return 0;
680 685
681 if (creq->req.base.type == CESA_DMA_REQ) 686 if (cesa_dev->caps->has_tdma)
682 ret = mv_cesa_ahash_dma_req_init(req); 687 ret = mv_cesa_ahash_dma_req_init(req);
683 688
684 return ret; 689 return ret;
685} 690}
686 691
687static int mv_cesa_ahash_update(struct ahash_request *req) 692static int mv_cesa_ahash_queue_req(struct ahash_request *req)
688{ 693{
689 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 694 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
695 struct mv_cesa_engine *engine;
690 bool cached = false; 696 bool cached = false;
691 int ret; 697 int ret;
692 698
693 creq->len += req->nbytes;
694 ret = mv_cesa_ahash_req_init(req, &cached); 699 ret = mv_cesa_ahash_req_init(req, &cached);
695 if (ret) 700 if (ret)
696 return ret; 701 return ret;
@@ -698,61 +703,48 @@ static int mv_cesa_ahash_update(struct ahash_request *req)
698 if (cached) 703 if (cached)
699 return 0; 704 return 0;
700 705
701 ret = mv_cesa_queue_req(&req->base); 706 engine = mv_cesa_select_engine(req->nbytes);
707 mv_cesa_ahash_prepare(&req->base, engine);
708
709 ret = mv_cesa_queue_req(&req->base, &creq->base);
710
702 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 711 if (mv_cesa_req_needs_cleanup(&req->base, ret))
703 mv_cesa_ahash_cleanup(req); 712 mv_cesa_ahash_cleanup(req);
704 713
705 return ret; 714 return ret;
706} 715}
707 716
717static int mv_cesa_ahash_update(struct ahash_request *req)
718{
719 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
720
721 creq->len += req->nbytes;
722
723 return mv_cesa_ahash_queue_req(req);
724}
725
708static int mv_cesa_ahash_final(struct ahash_request *req) 726static int mv_cesa_ahash_final(struct ahash_request *req)
709{ 727{
710 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 728 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
711 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 729 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
712 bool cached = false;
713 int ret;
714 730
715 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 731 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
716 creq->last_req = true; 732 creq->last_req = true;
717 req->nbytes = 0; 733 req->nbytes = 0;
718 734
719 ret = mv_cesa_ahash_req_init(req, &cached); 735 return mv_cesa_ahash_queue_req(req);
720 if (ret)
721 return ret;
722
723 if (cached)
724 return 0;
725
726 ret = mv_cesa_queue_req(&req->base);
727 if (mv_cesa_req_needs_cleanup(&req->base, ret))
728 mv_cesa_ahash_cleanup(req);
729
730 return ret;
731} 736}
732 737
733static int mv_cesa_ahash_finup(struct ahash_request *req) 738static int mv_cesa_ahash_finup(struct ahash_request *req)
734{ 739{
735 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 740 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
736 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 741 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
737 bool cached = false;
738 int ret;
739 742
740 creq->len += req->nbytes; 743 creq->len += req->nbytes;
741 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 744 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
742 creq->last_req = true; 745 creq->last_req = true;
743 746
744 ret = mv_cesa_ahash_req_init(req, &cached); 747 return mv_cesa_ahash_queue_req(req);
745 if (ret)
746 return ret;
747
748 if (cached)
749 return 0;
750
751 ret = mv_cesa_queue_req(&req->base);
752 if (mv_cesa_req_needs_cleanup(&req->base, ret))
753 mv_cesa_ahash_cleanup(req);
754
755 return ret;
756} 748}
757 749
758static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, 750static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 0ad8f1ecf175..86a065bcc187 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -37,9 +37,9 @@ bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
37 return true; 37 return true;
38} 38}
39 39
40void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq) 40void mv_cesa_dma_step(struct mv_cesa_req *dreq)
41{ 41{
42 struct mv_cesa_engine *engine = dreq->base.engine; 42 struct mv_cesa_engine *engine = dreq->engine;
43 43
44 writel_relaxed(0, engine->regs + CESA_SA_CFG); 44 writel_relaxed(0, engine->regs + CESA_SA_CFG);
45 45
@@ -53,19 +53,25 @@ void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq)
53 engine->regs + CESA_SA_CFG); 53 engine->regs + CESA_SA_CFG);
54 writel_relaxed(dreq->chain.first->cur_dma, 54 writel_relaxed(dreq->chain.first->cur_dma,
55 engine->regs + CESA_TDMA_NEXT_ADDR); 55 engine->regs + CESA_TDMA_NEXT_ADDR);
56 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
57 CESA_SA_CMD_EN_CESA_SA_ACCL0);
56 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 58 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
57} 59}
58 60
59void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) 61void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
60{ 62{
61 struct mv_cesa_tdma_desc *tdma; 63 struct mv_cesa_tdma_desc *tdma;
62 64
63 for (tdma = dreq->chain.first; tdma;) { 65 for (tdma = dreq->chain.first; tdma;) {
64 struct mv_cesa_tdma_desc *old_tdma = tdma; 66 struct mv_cesa_tdma_desc *old_tdma = tdma;
67 u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
65 68
66 if (tdma->flags & CESA_TDMA_OP) 69 if (type == CESA_TDMA_OP)
67 dma_pool_free(cesa_dev->dma->op_pool, tdma->op, 70 dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
68 le32_to_cpu(tdma->src)); 71 le32_to_cpu(tdma->src));
72 else if (type == CESA_TDMA_IV)
73 dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
74 le32_to_cpu(tdma->dst));
69 75
70 tdma = tdma->next; 76 tdma = tdma->next;
71 dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, 77 dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -76,7 +82,7 @@ void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq)
76 dreq->chain.last = NULL; 82 dreq->chain.last = NULL;
77} 83}
78 84
79void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, 85void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
80 struct mv_cesa_engine *engine) 86 struct mv_cesa_engine *engine)
81{ 87{
82 struct mv_cesa_tdma_desc *tdma; 88 struct mv_cesa_tdma_desc *tdma;
@@ -88,11 +94,97 @@ void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
88 if (tdma->flags & CESA_TDMA_SRC_IN_SRAM) 94 if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
89 tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); 95 tdma->src = cpu_to_le32(tdma->src + engine->sram_dma);
90 96
91 if (tdma->flags & CESA_TDMA_OP) 97 if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
92 mv_cesa_adjust_op(engine, tdma->op); 98 mv_cesa_adjust_op(engine, tdma->op);
93 } 99 }
94} 100}
95 101
102void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
103 struct mv_cesa_req *dreq)
104{
105 if (engine->chain.first == NULL && engine->chain.last == NULL) {
106 engine->chain.first = dreq->chain.first;
107 engine->chain.last = dreq->chain.last;
108 } else {
109 struct mv_cesa_tdma_desc *last;
110
111 last = engine->chain.last;
112 last->next = dreq->chain.first;
113 engine->chain.last = dreq->chain.last;
114
115 if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
116 last->next_dma = dreq->chain.first->cur_dma;
117 }
118}
119
120int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
121{
122 struct crypto_async_request *req = NULL;
123 struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
124 dma_addr_t tdma_cur;
125 int res = 0;
126
127 tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
128
129 for (tdma = engine->chain.first; tdma; tdma = next) {
130 spin_lock_bh(&engine->lock);
131 next = tdma->next;
132 spin_unlock_bh(&engine->lock);
133
134 if (tdma->flags & CESA_TDMA_END_OF_REQ) {
135 struct crypto_async_request *backlog = NULL;
136 struct mv_cesa_ctx *ctx;
137 u32 current_status;
138
139 spin_lock_bh(&engine->lock);
140 /*
141 * if req is NULL, this means we're processing the
142 * request in engine->req.
143 */
144 if (!req)
145 req = engine->req;
146 else
147 req = mv_cesa_dequeue_req_locked(engine,
148 &backlog);
149
150 /* Re-chaining to the next request */
151 engine->chain.first = tdma->next;
152 tdma->next = NULL;
153
154 /* If this is the last request, clear the chain */
155 if (engine->chain.first == NULL)
156 engine->chain.last = NULL;
157 spin_unlock_bh(&engine->lock);
158
159 ctx = crypto_tfm_ctx(req->tfm);
160 current_status = (tdma->cur_dma == tdma_cur) ?
161 status : CESA_SA_INT_ACC0_IDMA_DONE;
162 res = ctx->ops->process(req, current_status);
163 ctx->ops->complete(req);
164
165 if (res == 0)
166 mv_cesa_engine_enqueue_complete_request(engine,
167 req);
168
169 if (backlog)
170 backlog->complete(backlog, -EINPROGRESS);
171 }
172
173 if (res || tdma->cur_dma == tdma_cur)
174 break;
175 }
176
177 /* Save the last request in error to engine->req, so that the core
178 * knows which request was fautly */
179 if (res) {
180 spin_lock_bh(&engine->lock);
181 engine->req = req;
182 spin_unlock_bh(&engine->lock);
183 }
184
185 return res;
186}
187
96static struct mv_cesa_tdma_desc * 188static struct mv_cesa_tdma_desc *
97mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) 189mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
98{ 190{
@@ -117,6 +209,32 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
117 return new_tdma; 209 return new_tdma;
118} 210}
119 211
212int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
213 u32 size, u32 flags, gfp_t gfp_flags)
214{
215
216 struct mv_cesa_tdma_desc *tdma;
217 u8 *iv;
218 dma_addr_t dma_handle;
219
220 tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
221 if (IS_ERR(tdma))
222 return PTR_ERR(tdma);
223
224 iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
225 if (!iv)
226 return -ENOMEM;
227
228 tdma->byte_cnt = cpu_to_le32(size | BIT(31));
229 tdma->src = src;
230 tdma->dst = cpu_to_le32(dma_handle);
231 tdma->data = iv;
232
233 flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
234 tdma->flags = flags | CESA_TDMA_IV;
235 return 0;
236}
237
120struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, 238struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
121 const struct mv_cesa_op_ctx *op_templ, 239 const struct mv_cesa_op_ctx *op_templ,
122 bool skip_ctx, 240 bool skip_ctx,
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 59ed54e464a9..625ee50fd78b 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -11,7 +11,6 @@
11 * http://www.gnu.org/copyleft/gpl.html 11 * http://www.gnu.org/copyleft/gpl.html
12 */ 12 */
13 13
14#include <linux/crypto.h>
15#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <linux/io.h> 16#include <linux/io.h>
@@ -25,6 +24,7 @@
25#include <crypto/aes.h> 24#include <crypto/aes.h>
26#include <crypto/sha.h> 25#include <crypto/sha.h>
27#include <crypto/internal/hash.h> 26#include <crypto/internal/hash.h>
27#include <crypto/internal/skcipher.h>
28 28
29#define DCP_MAX_CHANS 4 29#define DCP_MAX_CHANS 4
30#define DCP_BUF_SZ PAGE_SIZE 30#define DCP_BUF_SZ PAGE_SIZE
@@ -84,7 +84,7 @@ struct dcp_async_ctx {
84 unsigned int hot:1; 84 unsigned int hot:1;
85 85
86 /* Crypto-specific context */ 86 /* Crypto-specific context */
87 struct crypto_ablkcipher *fallback; 87 struct crypto_skcipher *fallback;
88 unsigned int key_len; 88 unsigned int key_len;
89 uint8_t key[AES_KEYSIZE_128]; 89 uint8_t key[AES_KEYSIZE_128];
90}; 90};
@@ -374,20 +374,22 @@ static int dcp_chan_thread_aes(void *data)
374 374
375static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) 375static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
376{ 376{
377 struct crypto_tfm *tfm = 377 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
378 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); 378 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
379 struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx( 379 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
380 crypto_ablkcipher_reqtfm(req));
381 int ret; 380 int ret;
382 381
383 ablkcipher_request_set_tfm(req, ctx->fallback); 382 skcipher_request_set_tfm(subreq, ctx->fallback);
383 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
384 skcipher_request_set_crypt(subreq, req->src, req->dst,
385 req->nbytes, req->info);
384 386
385 if (enc) 387 if (enc)
386 ret = crypto_ablkcipher_encrypt(req); 388 ret = crypto_skcipher_encrypt(subreq);
387 else 389 else
388 ret = crypto_ablkcipher_decrypt(req); 390 ret = crypto_skcipher_decrypt(subreq);
389 391
390 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); 392 skcipher_request_zero(subreq);
391 393
392 return ret; 394 return ret;
393} 395}
@@ -453,28 +455,22 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
453 return 0; 455 return 0;
454 } 456 }
455 457
456 /* Check if the key size is supported by kernel at all. */
457 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
458 tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
459 return -EINVAL;
460 }
461
462 /* 458 /*
463 * If the requested AES key size is not supported by the hardware, 459 * If the requested AES key size is not supported by the hardware,
464 * but is supported by in-kernel software implementation, we use 460 * but is supported by in-kernel software implementation, we use
465 * software fallback. 461 * software fallback.
466 */ 462 */
467 actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 463 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
468 actx->fallback->base.crt_flags |= 464 crypto_skcipher_set_flags(actx->fallback,
469 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK; 465 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
470 466
471 ret = crypto_ablkcipher_setkey(actx->fallback, key, len); 467 ret = crypto_skcipher_setkey(actx->fallback, key, len);
472 if (!ret) 468 if (!ret)
473 return 0; 469 return 0;
474 470
475 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; 471 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
476 tfm->base.crt_flags |= 472 tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
477 actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK; 473 CRYPTO_TFM_RES_MASK;
478 474
479 return ret; 475 return ret;
480} 476}
@@ -484,9 +480,9 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
484 const char *name = crypto_tfm_alg_name(tfm); 480 const char *name = crypto_tfm_alg_name(tfm);
485 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; 481 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
486 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); 482 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
487 struct crypto_ablkcipher *blk; 483 struct crypto_skcipher *blk;
488 484
489 blk = crypto_alloc_ablkcipher(name, 0, flags); 485 blk = crypto_alloc_skcipher(name, 0, flags);
490 if (IS_ERR(blk)) 486 if (IS_ERR(blk))
491 return PTR_ERR(blk); 487 return PTR_ERR(blk);
492 488
@@ -499,8 +495,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
499{ 495{
500 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); 496 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
501 497
502 crypto_free_ablkcipher(actx->fallback); 498 crypto_free_skcipher(actx->fallback);
503 actx->fallback = NULL;
504} 499}
505 500
506/* 501/*
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 0794f1cc0018..42f0f229f7f7 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -392,7 +392,7 @@ static void nx_of_update_msc(struct device *dev,
392 ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) && 392 ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
393 i < msc->triplets; 393 i < msc->triplets;
394 i++) { 394 i++) {
395 if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) { 395 if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
396 dev_err(dev, "unknown function code/mode " 396 dev_err(dev, "unknown function code/mode "
397 "combo: %d/%d (ignored)\n", msc->fc, 397 "combo: %d/%d (ignored)\n", msc->fc,
398 msc->mode); 398 msc->mode);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ce174d3b842c..4ab53a604312 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -528,8 +528,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
528 528
529 omap_aes_dma_stop(dd); 529 omap_aes_dma_stop(dd);
530 530
531 dmaengine_terminate_all(dd->dma_lch_in);
532 dmaengine_terminate_all(dd->dma_lch_out);
533 531
534 return 0; 532 return 0;
535} 533}
@@ -580,10 +578,12 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
580 sg_init_table(&dd->in_sgl, 1); 578 sg_init_table(&dd->in_sgl, 1);
581 sg_set_buf(&dd->in_sgl, buf_in, total); 579 sg_set_buf(&dd->in_sgl, buf_in, total);
582 dd->in_sg = &dd->in_sgl; 580 dd->in_sg = &dd->in_sgl;
581 dd->in_sg_len = 1;
583 582
584 sg_init_table(&dd->out_sgl, 1); 583 sg_init_table(&dd->out_sgl, 1);
585 sg_set_buf(&dd->out_sgl, buf_out, total); 584 sg_set_buf(&dd->out_sgl, buf_out, total);
586 dd->out_sg = &dd->out_sgl; 585 dd->out_sg = &dd->out_sgl;
586 dd->out_sg_len = 1;
587 587
588 return 0; 588 return 0;
589} 589}
@@ -604,7 +604,6 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
604 crypto_ablkcipher_reqtfm(req)); 604 crypto_ablkcipher_reqtfm(req));
605 struct omap_aes_dev *dd = omap_aes_find_dev(ctx); 605 struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
606 struct omap_aes_reqctx *rctx; 606 struct omap_aes_reqctx *rctx;
607 int len;
608 607
609 if (!dd) 608 if (!dd)
610 return -ENODEV; 609 return -ENODEV;
@@ -616,6 +615,14 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
616 dd->in_sg = req->src; 615 dd->in_sg = req->src;
617 dd->out_sg = req->dst; 616 dd->out_sg = req->dst;
618 617
618 dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
619 if (dd->in_sg_len < 0)
620 return dd->in_sg_len;
621
622 dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
623 if (dd->out_sg_len < 0)
624 return dd->out_sg_len;
625
619 if (omap_aes_check_aligned(dd->in_sg, dd->total) || 626 if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
620 omap_aes_check_aligned(dd->out_sg, dd->total)) { 627 omap_aes_check_aligned(dd->out_sg, dd->total)) {
621 if (omap_aes_copy_sgs(dd)) 628 if (omap_aes_copy_sgs(dd))
@@ -625,11 +632,6 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
625 dd->sgs_copied = 0; 632 dd->sgs_copied = 0;
626 } 633 }
627 634
628 len = ALIGN(dd->total, AES_BLOCK_SIZE);
629 dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
630 dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
631 BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
632
633 rctx = ablkcipher_request_ctx(req); 635 rctx = ablkcipher_request_ctx(req);
634 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 636 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
635 rctx->mode &= FLAGS_MODE_MASK; 637 rctx->mode &= FLAGS_MODE_MASK;
@@ -1185,17 +1187,19 @@ static int omap_aes_probe(struct platform_device *pdev)
1185 spin_unlock(&list_lock); 1187 spin_unlock(&list_lock);
1186 1188
1187 for (i = 0; i < dd->pdata->algs_info_size; i++) { 1189 for (i = 0; i < dd->pdata->algs_info_size; i++) {
1188 for (j = 0; j < dd->pdata->algs_info[i].size; j++) { 1190 if (!dd->pdata->algs_info[i].registered) {
1189 algp = &dd->pdata->algs_info[i].algs_list[j]; 1191 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1192 algp = &dd->pdata->algs_info[i].algs_list[j];
1190 1193
1191 pr_debug("reg alg: %s\n", algp->cra_name); 1194 pr_debug("reg alg: %s\n", algp->cra_name);
1192 INIT_LIST_HEAD(&algp->cra_list); 1195 INIT_LIST_HEAD(&algp->cra_list);
1193 1196
1194 err = crypto_register_alg(algp); 1197 err = crypto_register_alg(algp);
1195 if (err) 1198 if (err)
1196 goto err_algs; 1199 goto err_algs;
1197 1200
1198 dd->pdata->algs_info[i].registered++; 1201 dd->pdata->algs_info[i].registered++;
1202 }
1199 } 1203 }
1200 } 1204 }
1201 1205
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 3eedb03111ba..5691434ffb2d 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -560,10 +560,12 @@ static int omap_des_copy_sgs(struct omap_des_dev *dd)
560 sg_init_table(&dd->in_sgl, 1); 560 sg_init_table(&dd->in_sgl, 1);
561 sg_set_buf(&dd->in_sgl, buf_in, dd->total); 561 sg_set_buf(&dd->in_sgl, buf_in, dd->total);
562 dd->in_sg = &dd->in_sgl; 562 dd->in_sg = &dd->in_sgl;
563 dd->in_sg_len = 1;
563 564
564 sg_init_table(&dd->out_sgl, 1); 565 sg_init_table(&dd->out_sgl, 1);
565 sg_set_buf(&dd->out_sgl, buf_out, dd->total); 566 sg_set_buf(&dd->out_sgl, buf_out, dd->total);
566 dd->out_sg = &dd->out_sgl; 567 dd->out_sg = &dd->out_sgl;
568 dd->out_sg_len = 1;
567 569
568 return 0; 570 return 0;
569} 571}
@@ -595,6 +597,14 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
595 dd->in_sg = req->src; 597 dd->in_sg = req->src;
596 dd->out_sg = req->dst; 598 dd->out_sg = req->dst;
597 599
600 dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
601 if (dd->in_sg_len < 0)
602 return dd->in_sg_len;
603
604 dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
605 if (dd->out_sg_len < 0)
606 return dd->out_sg_len;
607
598 if (omap_des_copy_needed(dd->in_sg) || 608 if (omap_des_copy_needed(dd->in_sg) ||
599 omap_des_copy_needed(dd->out_sg)) { 609 omap_des_copy_needed(dd->out_sg)) {
600 if (omap_des_copy_sgs(dd)) 610 if (omap_des_copy_sgs(dd))
@@ -604,10 +614,6 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
604 dd->sgs_copied = 0; 614 dd->sgs_copied = 0;
605 } 615 }
606 616
607 dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
608 dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
609 BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
610
611 rctx = ablkcipher_request_ctx(req); 617 rctx = ablkcipher_request_ctx(req);
612 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 618 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
613 rctx->mode &= FLAGS_MODE_MASK; 619 rctx->mode &= FLAGS_MODE_MASK;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 63464e86f2b1..7fe4eef12fe2 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -100,6 +100,8 @@
100 100
101#define DEFAULT_TIMEOUT_INTERVAL HZ 101#define DEFAULT_TIMEOUT_INTERVAL HZ
102 102
103#define DEFAULT_AUTOSUSPEND_DELAY 1000
104
103/* mostly device flags */ 105/* mostly device flags */
104#define FLAGS_BUSY 0 106#define FLAGS_BUSY 0
105#define FLAGS_FINAL 1 107#define FLAGS_FINAL 1
@@ -173,7 +175,7 @@ struct omap_sham_ctx {
173 struct omap_sham_hmac_ctx base[0]; 175 struct omap_sham_hmac_ctx base[0];
174}; 176};
175 177
176#define OMAP_SHAM_QUEUE_LENGTH 1 178#define OMAP_SHAM_QUEUE_LENGTH 10
177 179
178struct omap_sham_algs_info { 180struct omap_sham_algs_info {
179 struct ahash_alg *algs_list; 181 struct ahash_alg *algs_list;
@@ -813,7 +815,6 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
813{ 815{
814 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 816 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
815 817
816 dmaengine_terminate_all(dd->dma_lch);
817 818
818 if (ctx->flags & BIT(FLAGS_SG)) { 819 if (ctx->flags & BIT(FLAGS_SG)) {
819 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 820 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
@@ -999,7 +1000,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
999 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | 1000 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1000 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); 1001 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1001 1002
1002 pm_runtime_put(dd->dev); 1003 pm_runtime_mark_last_busy(dd->dev);
1004 pm_runtime_put_autosuspend(dd->dev);
1003 1005
1004 if (req->base.complete) 1006 if (req->base.complete)
1005 req->base.complete(&req->base, err); 1007 req->base.complete(&req->base, err);
@@ -1093,7 +1095,7 @@ static int omap_sham_update(struct ahash_request *req)
1093 ctx->offset = 0; 1095 ctx->offset = 0;
1094 1096
1095 if (ctx->flags & BIT(FLAGS_FINUP)) { 1097 if (ctx->flags & BIT(FLAGS_FINUP)) {
1096 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { 1098 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) {
1097 /* 1099 /*
1098 * OMAP HW accel works only with buffers >= 9 1100 * OMAP HW accel works only with buffers >= 9
1099 * will switch to bypass in final() 1101 * will switch to bypass in final()
@@ -1149,9 +1151,13 @@ static int omap_sham_final(struct ahash_request *req)
1149 if (ctx->flags & BIT(FLAGS_ERROR)) 1151 if (ctx->flags & BIT(FLAGS_ERROR))
1150 return 0; /* uncompleted hash is not needed */ 1152 return 0; /* uncompleted hash is not needed */
1151 1153
1152 /* OMAP HW accel works only with buffers >= 9 */ 1154 /*
1153 /* HMAC is always >= 9 because ipad == block size */ 1155 * OMAP HW accel works only with buffers >= 9.
1154 if ((ctx->digcnt + ctx->bufcnt) < 9) 1156 * HMAC is always >= 9 because ipad == block size.
1157 * If buffersize is less than 240, we use fallback SW encoding,
1158 * as using DMA + HW in this case doesn't provide any benefit.
1159 */
1160 if ((ctx->digcnt + ctx->bufcnt) < 240)
1155 return omap_sham_final_shash(req); 1161 return omap_sham_final_shash(req);
1156 else if (ctx->bufcnt) 1162 else if (ctx->bufcnt)
1157 return omap_sham_enqueue(req, OP_FINAL); 1163 return omap_sham_enqueue(req, OP_FINAL);
@@ -1328,7 +1334,7 @@ static struct ahash_alg algs_sha1_md5[] = {
1328 .halg.base = { 1334 .halg.base = {
1329 .cra_name = "sha1", 1335 .cra_name = "sha1",
1330 .cra_driver_name = "omap-sha1", 1336 .cra_driver_name = "omap-sha1",
1331 .cra_priority = 100, 1337 .cra_priority = 400,
1332 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1338 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1333 CRYPTO_ALG_KERN_DRIVER_ONLY | 1339 CRYPTO_ALG_KERN_DRIVER_ONLY |
1334 CRYPTO_ALG_ASYNC | 1340 CRYPTO_ALG_ASYNC |
@@ -1351,7 +1357,7 @@ static struct ahash_alg algs_sha1_md5[] = {
1351 .halg.base = { 1357 .halg.base = {
1352 .cra_name = "md5", 1358 .cra_name = "md5",
1353 .cra_driver_name = "omap-md5", 1359 .cra_driver_name = "omap-md5",
1354 .cra_priority = 100, 1360 .cra_priority = 400,
1355 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1361 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1356 CRYPTO_ALG_KERN_DRIVER_ONLY | 1362 CRYPTO_ALG_KERN_DRIVER_ONLY |
1357 CRYPTO_ALG_ASYNC | 1363 CRYPTO_ALG_ASYNC |
@@ -1375,7 +1381,7 @@ static struct ahash_alg algs_sha1_md5[] = {
1375 .halg.base = { 1381 .halg.base = {
1376 .cra_name = "hmac(sha1)", 1382 .cra_name = "hmac(sha1)",
1377 .cra_driver_name = "omap-hmac-sha1", 1383 .cra_driver_name = "omap-hmac-sha1",
1378 .cra_priority = 100, 1384 .cra_priority = 400,
1379 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1385 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1380 CRYPTO_ALG_KERN_DRIVER_ONLY | 1386 CRYPTO_ALG_KERN_DRIVER_ONLY |
1381 CRYPTO_ALG_ASYNC | 1387 CRYPTO_ALG_ASYNC |
@@ -1400,7 +1406,7 @@ static struct ahash_alg algs_sha1_md5[] = {
1400 .halg.base = { 1406 .halg.base = {
1401 .cra_name = "hmac(md5)", 1407 .cra_name = "hmac(md5)",
1402 .cra_driver_name = "omap-hmac-md5", 1408 .cra_driver_name = "omap-hmac-md5",
1403 .cra_priority = 100, 1409 .cra_priority = 400,
1404 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1410 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1405 CRYPTO_ALG_KERN_DRIVER_ONLY | 1411 CRYPTO_ALG_KERN_DRIVER_ONLY |
1406 CRYPTO_ALG_ASYNC | 1412 CRYPTO_ALG_ASYNC |
@@ -1428,7 +1434,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
1428 .halg.base = { 1434 .halg.base = {
1429 .cra_name = "sha224", 1435 .cra_name = "sha224",
1430 .cra_driver_name = "omap-sha224", 1436 .cra_driver_name = "omap-sha224",
1431 .cra_priority = 100, 1437 .cra_priority = 400,
1432 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1438 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1433 CRYPTO_ALG_ASYNC | 1439 CRYPTO_ALG_ASYNC |
1434 CRYPTO_ALG_NEED_FALLBACK, 1440 CRYPTO_ALG_NEED_FALLBACK,
@@ -1450,7 +1456,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
1450 .halg.base = { 1456 .halg.base = {
1451 .cra_name = "sha256", 1457 .cra_name = "sha256",
1452 .cra_driver_name = "omap-sha256", 1458 .cra_driver_name = "omap-sha256",
1453 .cra_priority = 100, 1459 .cra_priority = 400,
1454 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1460 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1455 CRYPTO_ALG_ASYNC | 1461 CRYPTO_ALG_ASYNC |
1456 CRYPTO_ALG_NEED_FALLBACK, 1462 CRYPTO_ALG_NEED_FALLBACK,
@@ -1473,7 +1479,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
1473 .halg.base = { 1479 .halg.base = {
1474 .cra_name = "hmac(sha224)", 1480 .cra_name = "hmac(sha224)",
1475 .cra_driver_name = "omap-hmac-sha224", 1481 .cra_driver_name = "omap-hmac-sha224",
1476 .cra_priority = 100, 1482 .cra_priority = 400,
1477 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1483 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1478 CRYPTO_ALG_ASYNC | 1484 CRYPTO_ALG_ASYNC |
1479 CRYPTO_ALG_NEED_FALLBACK, 1485 CRYPTO_ALG_NEED_FALLBACK,
@@ -1497,7 +1503,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
1497 .halg.base = { 1503 .halg.base = {
1498 .cra_name = "hmac(sha256)", 1504 .cra_name = "hmac(sha256)",
1499 .cra_driver_name = "omap-hmac-sha256", 1505 .cra_driver_name = "omap-hmac-sha256",
1500 .cra_priority = 100, 1506 .cra_priority = 400,
1501 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1507 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1502 CRYPTO_ALG_ASYNC | 1508 CRYPTO_ALG_ASYNC |
1503 CRYPTO_ALG_NEED_FALLBACK, 1509 CRYPTO_ALG_NEED_FALLBACK,
@@ -1523,7 +1529,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
1523 .halg.base = { 1529 .halg.base = {
1524 .cra_name = "sha384", 1530 .cra_name = "sha384",
1525 .cra_driver_name = "omap-sha384", 1531 .cra_driver_name = "omap-sha384",
1526 .cra_priority = 100, 1532 .cra_priority = 400,
1527 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1533 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1528 CRYPTO_ALG_ASYNC | 1534 CRYPTO_ALG_ASYNC |
1529 CRYPTO_ALG_NEED_FALLBACK, 1535 CRYPTO_ALG_NEED_FALLBACK,
@@ -1545,7 +1551,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
1545 .halg.base = { 1551 .halg.base = {
1546 .cra_name = "sha512", 1552 .cra_name = "sha512",
1547 .cra_driver_name = "omap-sha512", 1553 .cra_driver_name = "omap-sha512",
1548 .cra_priority = 100, 1554 .cra_priority = 400,
1549 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1555 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1550 CRYPTO_ALG_ASYNC | 1556 CRYPTO_ALG_ASYNC |
1551 CRYPTO_ALG_NEED_FALLBACK, 1557 CRYPTO_ALG_NEED_FALLBACK,
@@ -1568,7 +1574,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
1568 .halg.base = { 1574 .halg.base = {
1569 .cra_name = "hmac(sha384)", 1575 .cra_name = "hmac(sha384)",
1570 .cra_driver_name = "omap-hmac-sha384", 1576 .cra_driver_name = "omap-hmac-sha384",
1571 .cra_priority = 100, 1577 .cra_priority = 400,
1572 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1578 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1573 CRYPTO_ALG_ASYNC | 1579 CRYPTO_ALG_ASYNC |
1574 CRYPTO_ALG_NEED_FALLBACK, 1580 CRYPTO_ALG_NEED_FALLBACK,
@@ -1592,7 +1598,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
1592 .halg.base = { 1598 .halg.base = {
1593 .cra_name = "hmac(sha512)", 1599 .cra_name = "hmac(sha512)",
1594 .cra_driver_name = "omap-hmac-sha512", 1600 .cra_driver_name = "omap-hmac-sha512",
1595 .cra_priority = 100, 1601 .cra_priority = 400,
1596 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1602 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1597 CRYPTO_ALG_ASYNC | 1603 CRYPTO_ALG_ASYNC |
1598 CRYPTO_ALG_NEED_FALLBACK, 1604 CRYPTO_ALG_NEED_FALLBACK,
@@ -1946,6 +1952,9 @@ static int omap_sham_probe(struct platform_device *pdev)
1946 1952
1947 dd->flags |= dd->pdata->flags; 1953 dd->flags |= dd->pdata->flags;
1948 1954
1955 pm_runtime_use_autosuspend(dev);
1956 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
1957
1949 pm_runtime_enable(dev); 1958 pm_runtime_enable(dev);
1950 pm_runtime_irq_safe(dev); 1959 pm_runtime_irq_safe(dev);
1951 1960
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 3b1c7ecf078f..47576098831f 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -171,7 +171,7 @@ struct spacc_ablk_ctx {
171 * The fallback cipher. If the operation can't be done in hardware, 171 * The fallback cipher. If the operation can't be done in hardware,
172 * fallback to a software version. 172 * fallback to a software version.
173 */ 173 */
174 struct crypto_ablkcipher *sw_cipher; 174 struct crypto_skcipher *sw_cipher;
175}; 175};
176 176
177/* AEAD cipher context. */ 177/* AEAD cipher context. */
@@ -789,33 +789,35 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
789 * request for any other size (192 bits) then we need to do a software 789 * request for any other size (192 bits) then we need to do a software
790 * fallback. 790 * fallback.
791 */ 791 */
792 if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && 792 if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
793 ctx->sw_cipher) { 793 if (!ctx->sw_cipher)
794 return -EINVAL;
795
794 /* 796 /*
795 * Set the fallback transform to use the same request flags as 797 * Set the fallback transform to use the same request flags as
796 * the hardware transform. 798 * the hardware transform.
797 */ 799 */
798 ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 800 crypto_skcipher_clear_flags(ctx->sw_cipher,
799 ctx->sw_cipher->base.crt_flags |= 801 CRYPTO_TFM_REQ_MASK);
800 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK; 802 crypto_skcipher_set_flags(ctx->sw_cipher,
803 cipher->base.crt_flags &
804 CRYPTO_TFM_REQ_MASK);
805
806 err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
807
808 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
809 tfm->crt_flags |=
810 crypto_skcipher_get_flags(ctx->sw_cipher) &
811 CRYPTO_TFM_RES_MASK;
801 812
802 err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
803 if (err) 813 if (err)
804 goto sw_setkey_failed; 814 goto sw_setkey_failed;
805 } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 && 815 }
806 !ctx->sw_cipher)
807 err = -EINVAL;
808 816
809 memcpy(ctx->key, key, len); 817 memcpy(ctx->key, key, len);
810 ctx->key_len = len; 818 ctx->key_len = len;
811 819
812sw_setkey_failed: 820sw_setkey_failed:
813 if (err && ctx->sw_cipher) {
814 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
815 tfm->crt_flags |=
816 ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
817 }
818
819 return err; 821 return err;
820} 822}
821 823
@@ -910,20 +912,21 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
910 struct crypto_tfm *old_tfm = 912 struct crypto_tfm *old_tfm =
911 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); 913 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
912 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); 914 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
915 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
913 int err; 916 int err;
914 917
915 if (!ctx->sw_cipher)
916 return -EINVAL;
917
918 /* 918 /*
919 * Change the request to use the software fallback transform, and once 919 * Change the request to use the software fallback transform, and once
920 * the ciphering has completed, put the old transform back into the 920 * the ciphering has completed, put the old transform back into the
921 * request. 921 * request.
922 */ 922 */
923 ablkcipher_request_set_tfm(req, ctx->sw_cipher); 923 skcipher_request_set_tfm(subreq, ctx->sw_cipher);
924 err = is_encrypt ? crypto_ablkcipher_encrypt(req) : 924 skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
925 crypto_ablkcipher_decrypt(req); 925 skcipher_request_set_crypt(subreq, req->src, req->dst,
926 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm)); 926 req->nbytes, req->info);
927 err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
928 crypto_skcipher_decrypt(subreq);
929 skcipher_request_zero(subreq);
927 930
928 return err; 931 return err;
929} 932}
@@ -1015,12 +1018,13 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
1015 ctx->generic.flags = spacc_alg->type; 1018 ctx->generic.flags = spacc_alg->type;
1016 ctx->generic.engine = engine; 1019 ctx->generic.engine = engine;
1017 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { 1020 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
1018 ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0, 1021 ctx->sw_cipher = crypto_alloc_skcipher(
1019 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 1022 alg->cra_name, 0, CRYPTO_ALG_ASYNC |
1023 CRYPTO_ALG_NEED_FALLBACK);
1020 if (IS_ERR(ctx->sw_cipher)) { 1024 if (IS_ERR(ctx->sw_cipher)) {
1021 dev_warn(engine->dev, "failed to allocate fallback for %s\n", 1025 dev_warn(engine->dev, "failed to allocate fallback for %s\n",
1022 alg->cra_name); 1026 alg->cra_name);
1023 ctx->sw_cipher = NULL; 1027 return PTR_ERR(ctx->sw_cipher);
1024 } 1028 }
1025 } 1029 }
1026 ctx->generic.key_offs = spacc_alg->key_offs; 1030 ctx->generic.key_offs = spacc_alg->key_offs;
@@ -1035,9 +1039,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
1035{ 1039{
1036 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); 1040 struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1037 1041
1038 if (ctx->sw_cipher) 1042 crypto_free_skcipher(ctx->sw_cipher);
1039 crypto_free_ablkcipher(ctx->sw_cipher);
1040 ctx->sw_cipher = NULL;
1041} 1043}
1042 1044
1043static int spacc_ablk_encrypt(struct ablkcipher_request *req) 1045static int spacc_ablk_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 85b44e577684..ce3cae40f949 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -4,12 +4,13 @@ config CRYPTO_DEV_QAT
4 select CRYPTO_AUTHENC 4 select CRYPTO_AUTHENC
5 select CRYPTO_BLKCIPHER 5 select CRYPTO_BLKCIPHER
6 select CRYPTO_AKCIPHER 6 select CRYPTO_AKCIPHER
7 select CRYPTO_DH
7 select CRYPTO_HMAC 8 select CRYPTO_HMAC
9 select CRYPTO_RSA
8 select CRYPTO_SHA1 10 select CRYPTO_SHA1
9 select CRYPTO_SHA256 11 select CRYPTO_SHA256
10 select CRYPTO_SHA512 12 select CRYPTO_SHA512
11 select FW_LOADER 13 select FW_LOADER
12 select ASN1
13 14
14config CRYPTO_DEV_QAT_DH895xCC 15config CRYPTO_DEV_QAT_DH895xCC
15 tristate "Support for Intel(R) DH895xCC" 16 tristate "Support for Intel(R) DH895xCC"
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index c5bd5a9abc4d..6bc68bc00d76 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -229,6 +229,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
229 hw_data->get_arb_mapping = adf_get_arbiter_mapping; 229 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
230 hw_data->enable_ints = adf_enable_ints; 230 hw_data->enable_ints = adf_enable_ints;
231 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; 231 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
232 hw_data->reset_device = adf_reset_flr;
232 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; 233 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
233} 234}
234 235
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index 879e04cae714..618cec360b39 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -239,6 +239,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
239 hw_data->get_arb_mapping = adf_get_arbiter_mapping; 239 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
240 hw_data->enable_ints = adf_enable_ints; 240 hw_data->enable_ints = adf_enable_ints;
241 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; 241 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
242 hw_data->reset_device = adf_reset_flr;
242 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; 243 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
243} 244}
244 245
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 5fc3dbb9ada0..92fb6ffdc062 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,12 +1,3 @@
1$(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
2 $(obj)/qat_rsapubkey-asn1.h
3$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
4 $(obj)/qat_rsaprivkey-asn1.h
5$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
6
7clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
8clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
9
10obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o 1obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
11intel_qat-objs := adf_cfg.o \ 2intel_qat-objs := adf_cfg.o \
12 adf_isr.o \ 3 adf_isr.o \
@@ -20,8 +11,6 @@ intel_qat-objs := adf_cfg.o \
20 adf_hw_arbiter.o \ 11 adf_hw_arbiter.o \
21 qat_crypto.o \ 12 qat_crypto.o \
22 qat_algs.o \ 13 qat_algs.o \
23 qat_rsapubkey-asn1.o \
24 qat_rsaprivkey-asn1.o \
25 qat_asym_algs.o \ 14 qat_asym_algs.o \
26 qat_uclo.o \ 15 qat_uclo.o \
27 qat_hal.o 16 qat_hal.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5a07208ce778..e8822536530b 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -176,6 +176,7 @@ struct adf_hw_device_data {
176 void (*disable_iov)(struct adf_accel_dev *accel_dev); 176 void (*disable_iov)(struct adf_accel_dev *accel_dev);
177 void (*enable_ints)(struct adf_accel_dev *accel_dev); 177 void (*enable_ints)(struct adf_accel_dev *accel_dev);
178 int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); 178 int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
179 void (*reset_device)(struct adf_accel_dev *accel_dev);
179 const char *fw_name; 180 const char *fw_name;
180 const char *fw_mmp_name; 181 const char *fw_mmp_name;
181 uint32_t fuses; 182 uint32_t fuses;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index b40d9c8dad96..2839fccdd84b 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -82,18 +82,12 @@ struct adf_reset_dev_data {
82 struct work_struct reset_work; 82 struct work_struct reset_work;
83}; 83};
84 84
85void adf_dev_restore(struct adf_accel_dev *accel_dev) 85void adf_reset_sbr(struct adf_accel_dev *accel_dev)
86{ 86{
87 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 87 struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
88 struct pci_dev *parent = pdev->bus->self; 88 struct pci_dev *parent = pdev->bus->self;
89 uint16_t bridge_ctl = 0; 89 uint16_t bridge_ctl = 0;
90 90
91 if (accel_dev->is_vf)
92 return;
93
94 dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
95 accel_dev->accel_id);
96
97 if (!parent) 91 if (!parent)
98 parent = pdev; 92 parent = pdev;
99 93
@@ -101,6 +95,8 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
101 dev_info(&GET_DEV(accel_dev), 95 dev_info(&GET_DEV(accel_dev),
102 "Transaction still in progress. Proceeding\n"); 96 "Transaction still in progress. Proceeding\n");
103 97
98 dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
99
104 pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); 100 pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
105 bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET; 101 bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
106 pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); 102 pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
@@ -108,8 +104,40 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
108 bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET; 104 bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
109 pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); 105 pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
110 msleep(100); 106 msleep(100);
111 pci_restore_state(pdev); 107}
112 pci_save_state(pdev); 108EXPORT_SYMBOL_GPL(adf_reset_sbr);
109
110void adf_reset_flr(struct adf_accel_dev *accel_dev)
111{
112 struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
113 u16 control = 0;
114 int pos = 0;
115
116 dev_info(&GET_DEV(accel_dev), "Function level reset\n");
117 pos = pci_pcie_cap(pdev);
118 if (!pos) {
119 dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
120 return;
121 }
122 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &control);
123 control |= PCI_EXP_DEVCTL_BCR_FLR;
124 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, control);
125 msleep(100);
126}
127EXPORT_SYMBOL_GPL(adf_reset_flr);
128
129void adf_dev_restore(struct adf_accel_dev *accel_dev)
130{
131 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
132 struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
133
134 if (hw_device->reset_device) {
135 dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
136 accel_dev->accel_id);
137 hw_device->reset_device(accel_dev);
138 pci_restore_state(pdev);
139 pci_save_state(pdev);
140 }
113} 141}
114 142
115static void adf_device_reset_worker(struct work_struct *work) 143static void adf_device_reset_worker(struct work_struct *work)
@@ -243,7 +271,8 @@ EXPORT_SYMBOL_GPL(adf_disable_aer);
243 271
244int adf_init_aer(void) 272int adf_init_aer(void)
245{ 273{
246 device_reset_wq = create_workqueue("qat_device_reset_wq"); 274 device_reset_wq = alloc_workqueue("qat_device_reset_wq",
275 WQ_MEM_RECLAIM, 0);
247 return !device_reset_wq ? -EFAULT : 0; 276 return !device_reset_wq ? -EFAULT : 0;
248} 277}
249 278
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 75faa39bc8d0..980e07475012 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -141,6 +141,8 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
141 141
142int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf); 142int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
143void adf_disable_aer(struct adf_accel_dev *accel_dev); 143void adf_disable_aer(struct adf_accel_dev *accel_dev);
144void adf_reset_sbr(struct adf_accel_dev *accel_dev);
145void adf_reset_flr(struct adf_accel_dev *accel_dev);
144void adf_dev_restore(struct adf_accel_dev *accel_dev); 146void adf_dev_restore(struct adf_accel_dev *accel_dev);
145int adf_init_aer(void); 147int adf_init_aer(void);
146void adf_exit_aer(void); 148void adf_exit_aer(void);
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 4a526e2f1d7f..9320ae1d005b 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure);
292int __init adf_init_pf_wq(void) 292int __init adf_init_pf_wq(void)
293{ 293{
294 /* Workqueue for PF2VF responses */ 294 /* Workqueue for PF2VF responses */
295 pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq"); 295 pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
296 296
297 return !pf2vf_resp_wq ? -ENOMEM : 0; 297 return !pf2vf_resp_wq ? -ENOMEM : 0;
298} 298}
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index aa689cabedb4..bf99e11a3403 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
321 321
322int __init adf_init_vf_wq(void) 322int __init adf_init_vf_wq(void)
323{ 323{
324 adf_vf_stop_wq = create_workqueue("adf_vf_stop_wq"); 324 adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
325 325
326 return !adf_vf_stop_wq ? -EFAULT : 0; 326 return !adf_vf_stop_wq ? -EFAULT : 0;
327} 327}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1e8852a8a057..769148dbaeb3 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -947,13 +947,13 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
947 return 0; 947 return 0;
948 948
949out_free_all: 949out_free_all:
950 memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd)); 950 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
951 dma_free_coherent(dev, sizeof(*ctx->enc_cd), 951 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
952 ctx->dec_cd, ctx->dec_cd_paddr); 952 ctx->dec_cd, ctx->dec_cd_paddr);
953 ctx->dec_cd = NULL; 953 ctx->dec_cd = NULL;
954out_free_enc: 954out_free_enc:
955 memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd)); 955 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
956 dma_free_coherent(dev, sizeof(*ctx->dec_cd), 956 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
957 ctx->enc_cd, ctx->enc_cd_paddr); 957 ctx->enc_cd, ctx->enc_cd_paddr);
958 ctx->enc_cd = NULL; 958 ctx->enc_cd = NULL;
959 return -ENOMEM; 959 return -ENOMEM;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 05f49d4f94b2..0d35dca2e925 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -49,11 +49,12 @@
49#include <crypto/internal/rsa.h> 49#include <crypto/internal/rsa.h>
50#include <crypto/internal/akcipher.h> 50#include <crypto/internal/akcipher.h>
51#include <crypto/akcipher.h> 51#include <crypto/akcipher.h>
52#include <crypto/kpp.h>
53#include <crypto/internal/kpp.h>
54#include <crypto/dh.h>
52#include <linux/dma-mapping.h> 55#include <linux/dma-mapping.h>
53#include <linux/fips.h> 56#include <linux/fips.h>
54#include <crypto/scatterwalk.h> 57#include <crypto/scatterwalk.h>
55#include "qat_rsapubkey-asn1.h"
56#include "qat_rsaprivkey-asn1.h"
57#include "icp_qat_fw_pke.h" 58#include "icp_qat_fw_pke.h"
58#include "adf_accel_devices.h" 59#include "adf_accel_devices.h"
59#include "adf_transport.h" 60#include "adf_transport.h"
@@ -75,6 +76,14 @@ struct qat_rsa_input_params {
75 dma_addr_t d; 76 dma_addr_t d;
76 dma_addr_t n; 77 dma_addr_t n;
77 } dec; 78 } dec;
79 struct {
80 dma_addr_t c;
81 dma_addr_t p;
82 dma_addr_t q;
83 dma_addr_t dp;
84 dma_addr_t dq;
85 dma_addr_t qinv;
86 } dec_crt;
78 u64 in_tab[8]; 87 u64 in_tab[8];
79 }; 88 };
80} __packed __aligned(64); 89} __packed __aligned(64);
@@ -95,71 +104,480 @@ struct qat_rsa_ctx {
95 char *n; 104 char *n;
96 char *e; 105 char *e;
97 char *d; 106 char *d;
107 char *p;
108 char *q;
109 char *dp;
110 char *dq;
111 char *qinv;
98 dma_addr_t dma_n; 112 dma_addr_t dma_n;
99 dma_addr_t dma_e; 113 dma_addr_t dma_e;
100 dma_addr_t dma_d; 114 dma_addr_t dma_d;
115 dma_addr_t dma_p;
116 dma_addr_t dma_q;
117 dma_addr_t dma_dp;
118 dma_addr_t dma_dq;
119 dma_addr_t dma_qinv;
101 unsigned int key_sz; 120 unsigned int key_sz;
121 bool crt_mode;
122 struct qat_crypto_instance *inst;
123} __packed __aligned(64);
124
125struct qat_dh_input_params {
126 union {
127 struct {
128 dma_addr_t b;
129 dma_addr_t xa;
130 dma_addr_t p;
131 } in;
132 struct {
133 dma_addr_t xa;
134 dma_addr_t p;
135 } in_g2;
136 u64 in_tab[8];
137 };
138} __packed __aligned(64);
139
140struct qat_dh_output_params {
141 union {
142 dma_addr_t r;
143 u64 out_tab[8];
144 };
145} __packed __aligned(64);
146
147struct qat_dh_ctx {
148 char *g;
149 char *xa;
150 char *p;
151 dma_addr_t dma_g;
152 dma_addr_t dma_xa;
153 dma_addr_t dma_p;
154 unsigned int p_size;
155 bool g2;
102 struct qat_crypto_instance *inst; 156 struct qat_crypto_instance *inst;
103} __packed __aligned(64); 157} __packed __aligned(64);
104 158
105struct qat_rsa_request { 159struct qat_asym_request {
106 struct qat_rsa_input_params in; 160 union {
107 struct qat_rsa_output_params out; 161 struct qat_rsa_input_params rsa;
162 struct qat_dh_input_params dh;
163 } in;
164 union {
165 struct qat_rsa_output_params rsa;
166 struct qat_dh_output_params dh;
167 } out;
108 dma_addr_t phy_in; 168 dma_addr_t phy_in;
109 dma_addr_t phy_out; 169 dma_addr_t phy_out;
110 char *src_align; 170 char *src_align;
111 char *dst_align; 171 char *dst_align;
112 struct icp_qat_fw_pke_request req; 172 struct icp_qat_fw_pke_request req;
113 struct qat_rsa_ctx *ctx; 173 union {
174 struct qat_rsa_ctx *rsa;
175 struct qat_dh_ctx *dh;
176 } ctx;
177 union {
178 struct akcipher_request *rsa;
179 struct kpp_request *dh;
180 } areq;
114 int err; 181 int err;
182 void (*cb)(struct icp_qat_fw_pke_resp *resp);
115} __aligned(64); 183} __aligned(64);
116 184
117static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) 185static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
118{ 186{
119 struct akcipher_request *areq = (void *)(__force long)resp->opaque; 187 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
120 struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); 188 struct kpp_request *areq = req->areq.dh;
121 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); 189 struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
122 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( 190 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
123 resp->pke_resp_hdr.comn_resp_flags); 191 resp->pke_resp_hdr.comn_resp_flags);
124 192
125 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; 193 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
126 194
127 if (req->src_align) 195 if (areq->src) {
128 dma_free_coherent(dev, req->ctx->key_sz, req->src_align, 196 if (req->src_align)
129 req->in.enc.m); 197 dma_free_coherent(dev, req->ctx.dh->p_size,
130 else 198 req->src_align, req->in.dh.in.b);
131 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, 199 else
132 DMA_TO_DEVICE); 200 dma_unmap_single(dev, req->in.dh.in.b,
201 req->ctx.dh->p_size, DMA_TO_DEVICE);
202 }
133 203
134 areq->dst_len = req->ctx->key_sz; 204 areq->dst_len = req->ctx.dh->p_size;
135 if (req->dst_align) { 205 if (req->dst_align) {
136 char *ptr = req->dst_align; 206 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
207 areq->dst_len, 1);
137 208
138 while (!(*ptr) && areq->dst_len) { 209 dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
139 areq->dst_len--; 210 req->out.dh.r);
140 ptr++; 211 } else {
141 } 212 dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
213 DMA_FROM_DEVICE);
214 }
142 215
143 if (areq->dst_len != req->ctx->key_sz) 216 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
144 memmove(req->dst_align, ptr, areq->dst_len); 217 DMA_TO_DEVICE);
218 dma_unmap_single(dev, req->phy_out,
219 sizeof(struct qat_dh_output_params),
220 DMA_TO_DEVICE);
145 221
146 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, 222 kpp_request_complete(areq, err);
147 areq->dst_len, 1); 223}
224
225#define PKE_DH_1536 0x390c1a49
226#define PKE_DH_G2_1536 0x2e0b1a3e
227#define PKE_DH_2048 0x4d0c1a60
228#define PKE_DH_G2_2048 0x3e0b1a55
229#define PKE_DH_3072 0x510c1a77
230#define PKE_DH_G2_3072 0x3a0b1a6c
231#define PKE_DH_4096 0x690c1a8e
232#define PKE_DH_G2_4096 0x4a0b1a83
233
234static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
235{
236 unsigned int bitslen = len << 3;
237
238 switch (bitslen) {
239 case 1536:
240 return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
241 case 2048:
242 return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
243 case 3072:
244 return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
245 case 4096:
246 return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
247 default:
248 return 0;
249 };
250}
251
252static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
253{
254 return kpp_tfm_ctx(tfm);
255}
256
257static int qat_dh_compute_value(struct kpp_request *req)
258{
259 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
260 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
261 struct qat_crypto_instance *inst = ctx->inst;
262 struct device *dev = &GET_DEV(inst->accel_dev);
263 struct qat_asym_request *qat_req =
264 PTR_ALIGN(kpp_request_ctx(req), 64);
265 struct icp_qat_fw_pke_request *msg = &qat_req->req;
266 int ret, ctr = 0;
267 int n_input_params = 0;
268
269 if (unlikely(!ctx->xa))
270 return -EINVAL;
271
272 if (req->dst_len < ctx->p_size) {
273 req->dst_len = ctx->p_size;
274 return -EOVERFLOW;
275 }
276 memset(msg, '\0', sizeof(*msg));
277 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
278 ICP_QAT_FW_COMN_REQ_FLAG_SET);
279
280 msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
281 !req->src && ctx->g2);
282 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
283 return -EINVAL;
284
285 qat_req->cb = qat_dh_cb;
286 qat_req->ctx.dh = ctx;
287 qat_req->areq.dh = req;
288 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
289 msg->pke_hdr.comn_req_flags =
290 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
291 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
148 292
149 dma_free_coherent(dev, req->ctx->key_sz, req->dst_align, 293 /*
150 req->out.enc.c); 294 * If no source is provided use g as base
295 */
296 if (req->src) {
297 qat_req->in.dh.in.xa = ctx->dma_xa;
298 qat_req->in.dh.in.p = ctx->dma_p;
299 n_input_params = 3;
151 } else { 300 } else {
152 char *ptr = sg_virt(areq->dst); 301 if (ctx->g2) {
302 qat_req->in.dh.in_g2.xa = ctx->dma_xa;
303 qat_req->in.dh.in_g2.p = ctx->dma_p;
304 n_input_params = 2;
305 } else {
306 qat_req->in.dh.in.b = ctx->dma_g;
307 qat_req->in.dh.in.xa = ctx->dma_xa;
308 qat_req->in.dh.in.p = ctx->dma_p;
309 n_input_params = 3;
310 }
311 }
153 312
154 while (!(*ptr) && areq->dst_len) { 313 ret = -ENOMEM;
155 areq->dst_len--; 314 if (req->src) {
156 ptr++; 315 /*
316 * src can be of any size in valid range, but HW expects it to
317 * be the same as modulo p so in case it is different we need
318 * to allocate a new buf and copy src data.
319 * In other case we just need to map the user provided buffer.
320 * Also need to make sure that it is in contiguous buffer.
321 */
322 if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
323 qat_req->src_align = NULL;
324 qat_req->in.dh.in.b = dma_map_single(dev,
325 sg_virt(req->src),
326 req->src_len,
327 DMA_TO_DEVICE);
328 if (unlikely(dma_mapping_error(dev,
329 qat_req->in.dh.in.b)))
330 return ret;
331
332 } else {
333 int shift = ctx->p_size - req->src_len;
334
335 qat_req->src_align = dma_zalloc_coherent(dev,
336 ctx->p_size,
337 &qat_req->in.dh.in.b,
338 GFP_KERNEL);
339 if (unlikely(!qat_req->src_align))
340 return ret;
341
342 scatterwalk_map_and_copy(qat_req->src_align + shift,
343 req->src, 0, req->src_len, 0);
157 } 344 }
345 }
346 /*
347 * dst can be of any size in valid range, but HW expects it to be the
348 * same as modulo m so in case it is different we need to allocate a
349 * new buf and copy src data.
350 * In other case we just need to map the user provided buffer.
351 * Also need to make sure that it is in contiguous buffer.
352 */
353 if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
354 qat_req->dst_align = NULL;
355 qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
356 req->dst_len,
357 DMA_FROM_DEVICE);
158 358
159 if (sg_virt(areq->dst) != ptr && areq->dst_len) 359 if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
160 memmove(sg_virt(areq->dst), ptr, areq->dst_len); 360 goto unmap_src;
361
362 } else {
363 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
364 &qat_req->out.dh.r,
365 GFP_KERNEL);
366 if (unlikely(!qat_req->dst_align))
367 goto unmap_src;
368 }
161 369
162 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz, 370 qat_req->in.dh.in_tab[n_input_params] = 0;
371 qat_req->out.dh.out_tab[1] = 0;
372 /* Mapping in.in.b or in.in_g2.xa is the same */
373 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
374 sizeof(struct qat_dh_input_params),
375 DMA_TO_DEVICE);
376 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
377 goto unmap_dst;
378
379 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
380 sizeof(struct qat_dh_output_params),
381 DMA_TO_DEVICE);
382 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
383 goto unmap_in_params;
384
385 msg->pke_mid.src_data_addr = qat_req->phy_in;
386 msg->pke_mid.dest_data_addr = qat_req->phy_out;
387 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
388 msg->input_param_count = n_input_params;
389 msg->output_param_count = 1;
390
391 do {
392 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
393 } while (ret == -EBUSY && ctr++ < 100);
394
395 if (!ret)
396 return -EINPROGRESS;
397
398 if (!dma_mapping_error(dev, qat_req->phy_out))
399 dma_unmap_single(dev, qat_req->phy_out,
400 sizeof(struct qat_dh_output_params),
401 DMA_TO_DEVICE);
402unmap_in_params:
403 if (!dma_mapping_error(dev, qat_req->phy_in))
404 dma_unmap_single(dev, qat_req->phy_in,
405 sizeof(struct qat_dh_input_params),
406 DMA_TO_DEVICE);
407unmap_dst:
408 if (qat_req->dst_align)
409 dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
410 qat_req->out.dh.r);
411 else
412 if (!dma_mapping_error(dev, qat_req->out.dh.r))
413 dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
414 DMA_FROM_DEVICE);
415unmap_src:
416 if (req->src) {
417 if (qat_req->src_align)
418 dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
419 qat_req->in.dh.in.b);
420 else
421 if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
422 dma_unmap_single(dev, qat_req->in.dh.in.b,
423 ctx->p_size,
424 DMA_TO_DEVICE);
425 }
426 return ret;
427}
428
429static int qat_dh_check_params_length(unsigned int p_len)
430{
431 switch (p_len) {
432 case 1536:
433 case 2048:
434 case 3072:
435 case 4096:
436 return 0;
437 }
438 return -EINVAL;
439}
440
441static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
442{
443 struct qat_crypto_instance *inst = ctx->inst;
444 struct device *dev = &GET_DEV(inst->accel_dev);
445
446 if (unlikely(!params->p || !params->g))
447 return -EINVAL;
448
449 if (qat_dh_check_params_length(params->p_size << 3))
450 return -EINVAL;
451
452 ctx->p_size = params->p_size;
453 ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
454 if (!ctx->p)
455 return -ENOMEM;
456 memcpy(ctx->p, params->p, ctx->p_size);
457
458 /* If g equals 2 don't copy it */
459 if (params->g_size == 1 && *(char *)params->g == 0x02) {
460 ctx->g2 = true;
461 return 0;
462 }
463
464 ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
465 if (!ctx->g) {
466 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
467 ctx->p = NULL;
468 return -ENOMEM;
469 }
470 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
471 params->g_size);
472
473 return 0;
474}
475
476static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
477{
478 if (ctx->g) {
479 dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
480 ctx->g = NULL;
481 }
482 if (ctx->xa) {
483 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
484 ctx->xa = NULL;
485 }
486 if (ctx->p) {
487 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
488 ctx->p = NULL;
489 }
490 ctx->p_size = 0;
491 ctx->g2 = false;
492}
493
494static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf,
495 unsigned int len)
496{
497 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
498 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
499 struct dh params;
500 int ret;
501
502 if (crypto_dh_decode_key(buf, len, &params) < 0)
503 return -EINVAL;
504
505 /* Free old secret if any */
506 qat_dh_clear_ctx(dev, ctx);
507
508 ret = qat_dh_set_params(ctx, &params);
509 if (ret < 0)
510 return ret;
511
512 ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
513 GFP_KERNEL);
514 if (!ctx->xa) {
515 qat_dh_clear_ctx(dev, ctx);
516 return -ENOMEM;
517 }
518 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
519 params.key_size);
520
521 return 0;
522}
523
524static int qat_dh_max_size(struct crypto_kpp *tfm)
525{
526 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
527
528 return ctx->p ? ctx->p_size : -EINVAL;
529}
530
531static int qat_dh_init_tfm(struct crypto_kpp *tfm)
532{
533 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
534 struct qat_crypto_instance *inst =
535 qat_crypto_get_instance_node(get_current_node());
536
537 if (!inst)
538 return -EINVAL;
539
540 ctx->p_size = 0;
541 ctx->g2 = false;
542 ctx->inst = inst;
543 return 0;
544}
545
546static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
547{
548 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
549 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
550
551 qat_dh_clear_ctx(dev, ctx);
552 qat_crypto_put_instance(ctx->inst);
553}
554
555static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
556{
557 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
558 struct akcipher_request *areq = req->areq.rsa;
559 struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
560 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
561 resp->pke_resp_hdr.comn_resp_flags);
562
563 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
564
565 if (req->src_align)
566 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
567 req->in.rsa.enc.m);
568 else
569 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
570 DMA_TO_DEVICE);
571
572 areq->dst_len = req->ctx.rsa->key_sz;
573 if (req->dst_align) {
574 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
575 areq->dst_len, 1);
576
577 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
578 req->out.rsa.enc.c);
579 } else {
580 dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
163 DMA_FROM_DEVICE); 581 DMA_FROM_DEVICE);
164 } 582 }
165 583
@@ -175,8 +593,9 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
175void qat_alg_asym_callback(void *_resp) 593void qat_alg_asym_callback(void *_resp)
176{ 594{
177 struct icp_qat_fw_pke_resp *resp = _resp; 595 struct icp_qat_fw_pke_resp *resp = _resp;
596 struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
178 597
179 qat_rsa_cb(resp); 598 areq->cb(resp);
180} 599}
181 600
182#define PKE_RSA_EP_512 0x1c161b21 601#define PKE_RSA_EP_512 0x1c161b21
@@ -237,13 +656,42 @@ static unsigned long qat_rsa_dec_fn_id(unsigned int len)
237 }; 656 };
238} 657}
239 658
659#define PKE_RSA_DP2_512 0x1c131b57
660#define PKE_RSA_DP2_1024 0x26131c2d
661#define PKE_RSA_DP2_1536 0x45111d12
662#define PKE_RSA_DP2_2048 0x59121dfa
663#define PKE_RSA_DP2_3072 0x81121ed9
664#define PKE_RSA_DP2_4096 0xb1111fb2
665
666static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
667{
668 unsigned int bitslen = len << 3;
669
670 switch (bitslen) {
671 case 512:
672 return PKE_RSA_DP2_512;
673 case 1024:
674 return PKE_RSA_DP2_1024;
675 case 1536:
676 return PKE_RSA_DP2_1536;
677 case 2048:
678 return PKE_RSA_DP2_2048;
679 case 3072:
680 return PKE_RSA_DP2_3072;
681 case 4096:
682 return PKE_RSA_DP2_4096;
683 default:
684 return 0;
685 };
686}
687
240static int qat_rsa_enc(struct akcipher_request *req) 688static int qat_rsa_enc(struct akcipher_request *req)
241{ 689{
242 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 690 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
243 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 691 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
244 struct qat_crypto_instance *inst = ctx->inst; 692 struct qat_crypto_instance *inst = ctx->inst;
245 struct device *dev = &GET_DEV(inst->accel_dev); 693 struct device *dev = &GET_DEV(inst->accel_dev);
246 struct qat_rsa_request *qat_req = 694 struct qat_asym_request *qat_req =
247 PTR_ALIGN(akcipher_request_ctx(req), 64); 695 PTR_ALIGN(akcipher_request_ctx(req), 64);
248 struct icp_qat_fw_pke_request *msg = &qat_req->req; 696 struct icp_qat_fw_pke_request *msg = &qat_req->req;
249 int ret, ctr = 0; 697 int ret, ctr = 0;
@@ -262,14 +710,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
262 if (unlikely(!msg->pke_hdr.cd_pars.func_id)) 710 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
263 return -EINVAL; 711 return -EINVAL;
264 712
265 qat_req->ctx = ctx; 713 qat_req->cb = qat_rsa_cb;
714 qat_req->ctx.rsa = ctx;
715 qat_req->areq.rsa = req;
266 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; 716 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
267 msg->pke_hdr.comn_req_flags = 717 msg->pke_hdr.comn_req_flags =
268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, 718 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
269 QAT_COMN_CD_FLD_TYPE_64BIT_ADR); 719 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
270 720
271 qat_req->in.enc.e = ctx->dma_e; 721 qat_req->in.rsa.enc.e = ctx->dma_e;
272 qat_req->in.enc.n = ctx->dma_n; 722 qat_req->in.rsa.enc.n = ctx->dma_n;
273 ret = -ENOMEM; 723 ret = -ENOMEM;
274 724
275 /* 725 /*
@@ -281,16 +731,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
281 */ 731 */
282 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { 732 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
283 qat_req->src_align = NULL; 733 qat_req->src_align = NULL;
284 qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src), 734 qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
285 req->src_len, DMA_TO_DEVICE); 735 req->src_len, DMA_TO_DEVICE);
286 if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m))) 736 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
287 return ret; 737 return ret;
288 738
289 } else { 739 } else {
290 int shift = ctx->key_sz - req->src_len; 740 int shift = ctx->key_sz - req->src_len;
291 741
292 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 742 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
293 &qat_req->in.enc.m, 743 &qat_req->in.rsa.enc.m,
294 GFP_KERNEL); 744 GFP_KERNEL);
295 if (unlikely(!qat_req->src_align)) 745 if (unlikely(!qat_req->src_align))
296 return ret; 746 return ret;
@@ -300,30 +750,30 @@ static int qat_rsa_enc(struct akcipher_request *req)
300 } 750 }
301 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { 751 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
302 qat_req->dst_align = NULL; 752 qat_req->dst_align = NULL;
303 qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst), 753 qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
304 req->dst_len, 754 req->dst_len,
305 DMA_FROM_DEVICE); 755 DMA_FROM_DEVICE);
306 756
307 if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c))) 757 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
308 goto unmap_src; 758 goto unmap_src;
309 759
310 } else { 760 } else {
311 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 761 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
312 &qat_req->out.enc.c, 762 &qat_req->out.rsa.enc.c,
313 GFP_KERNEL); 763 GFP_KERNEL);
314 if (unlikely(!qat_req->dst_align)) 764 if (unlikely(!qat_req->dst_align))
315 goto unmap_src; 765 goto unmap_src;
316 766
317 } 767 }
318 qat_req->in.in_tab[3] = 0; 768 qat_req->in.rsa.in_tab[3] = 0;
319 qat_req->out.out_tab[1] = 0; 769 qat_req->out.rsa.out_tab[1] = 0;
320 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m, 770 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
321 sizeof(struct qat_rsa_input_params), 771 sizeof(struct qat_rsa_input_params),
322 DMA_TO_DEVICE); 772 DMA_TO_DEVICE);
323 if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) 773 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
324 goto unmap_dst; 774 goto unmap_dst;
325 775
326 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c, 776 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
327 sizeof(struct qat_rsa_output_params), 777 sizeof(struct qat_rsa_output_params),
328 DMA_TO_DEVICE); 778 DMA_TO_DEVICE);
329 if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) 779 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -331,7 +781,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
331 781
332 msg->pke_mid.src_data_addr = qat_req->phy_in; 782 msg->pke_mid.src_data_addr = qat_req->phy_in;
333 msg->pke_mid.dest_data_addr = qat_req->phy_out; 783 msg->pke_mid.dest_data_addr = qat_req->phy_out;
334 msg->pke_mid.opaque = (uint64_t)(__force long)req; 784 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
335 msg->input_param_count = 3; 785 msg->input_param_count = 3;
336 msg->output_param_count = 1; 786 msg->output_param_count = 1;
337 do { 787 do {
@@ -353,19 +803,19 @@ unmap_in_params:
353unmap_dst: 803unmap_dst:
354 if (qat_req->dst_align) 804 if (qat_req->dst_align)
355 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, 805 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
356 qat_req->out.enc.c); 806 qat_req->out.rsa.enc.c);
357 else 807 else
358 if (!dma_mapping_error(dev, qat_req->out.enc.c)) 808 if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
359 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz, 809 dma_unmap_single(dev, qat_req->out.rsa.enc.c,
360 DMA_FROM_DEVICE); 810 ctx->key_sz, DMA_FROM_DEVICE);
361unmap_src: 811unmap_src:
362 if (qat_req->src_align) 812 if (qat_req->src_align)
363 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 813 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
364 qat_req->in.enc.m); 814 qat_req->in.rsa.enc.m);
365 else 815 else
366 if (!dma_mapping_error(dev, qat_req->in.enc.m)) 816 if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
367 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz, 817 dma_unmap_single(dev, qat_req->in.rsa.enc.m,
368 DMA_TO_DEVICE); 818 ctx->key_sz, DMA_TO_DEVICE);
369 return ret; 819 return ret;
370} 820}
371 821
@@ -375,7 +825,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
375 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 825 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
376 struct qat_crypto_instance *inst = ctx->inst; 826 struct qat_crypto_instance *inst = ctx->inst;
377 struct device *dev = &GET_DEV(inst->accel_dev); 827 struct device *dev = &GET_DEV(inst->accel_dev);
378 struct qat_rsa_request *qat_req = 828 struct qat_asym_request *qat_req =
379 PTR_ALIGN(akcipher_request_ctx(req), 64); 829 PTR_ALIGN(akcipher_request_ctx(req), 64);
380 struct icp_qat_fw_pke_request *msg = &qat_req->req; 830 struct icp_qat_fw_pke_request *msg = &qat_req->req;
381 int ret, ctr = 0; 831 int ret, ctr = 0;
@@ -390,18 +840,30 @@ static int qat_rsa_dec(struct akcipher_request *req)
390 memset(msg, '\0', sizeof(*msg)); 840 memset(msg, '\0', sizeof(*msg));
391 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, 841 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
392 ICP_QAT_FW_COMN_REQ_FLAG_SET); 842 ICP_QAT_FW_COMN_REQ_FLAG_SET);
393 msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz); 843 msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
844 qat_rsa_dec_fn_id_crt(ctx->key_sz) :
845 qat_rsa_dec_fn_id(ctx->key_sz);
394 if (unlikely(!msg->pke_hdr.cd_pars.func_id)) 846 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
395 return -EINVAL; 847 return -EINVAL;
396 848
397 qat_req->ctx = ctx; 849 qat_req->cb = qat_rsa_cb;
850 qat_req->ctx.rsa = ctx;
851 qat_req->areq.rsa = req;
398 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; 852 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
399 msg->pke_hdr.comn_req_flags = 853 msg->pke_hdr.comn_req_flags =
400 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, 854 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
401 QAT_COMN_CD_FLD_TYPE_64BIT_ADR); 855 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
402 856
403 qat_req->in.dec.d = ctx->dma_d; 857 if (ctx->crt_mode) {
404 qat_req->in.dec.n = ctx->dma_n; 858 qat_req->in.rsa.dec_crt.p = ctx->dma_p;
859 qat_req->in.rsa.dec_crt.q = ctx->dma_q;
860 qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
861 qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
862 qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
863 } else {
864 qat_req->in.rsa.dec.d = ctx->dma_d;
865 qat_req->in.rsa.dec.n = ctx->dma_n;
866 }
405 ret = -ENOMEM; 867 ret = -ENOMEM;
406 868
407 /* 869 /*
@@ -413,16 +875,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
413 */ 875 */
414 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { 876 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
415 qat_req->src_align = NULL; 877 qat_req->src_align = NULL;
416 qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src), 878 qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
417 req->dst_len, DMA_TO_DEVICE); 879 req->dst_len, DMA_TO_DEVICE);
418 if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c))) 880 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
419 return ret; 881 return ret;
420 882
421 } else { 883 } else {
422 int shift = ctx->key_sz - req->src_len; 884 int shift = ctx->key_sz - req->src_len;
423 885
424 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 886 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
425 &qat_req->in.dec.c, 887 &qat_req->in.rsa.dec.c,
426 GFP_KERNEL); 888 GFP_KERNEL);
427 if (unlikely(!qat_req->src_align)) 889 if (unlikely(!qat_req->src_align))
428 return ret; 890 return ret;
@@ -432,31 +894,34 @@ static int qat_rsa_dec(struct akcipher_request *req)
432 } 894 }
433 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { 895 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
434 qat_req->dst_align = NULL; 896 qat_req->dst_align = NULL;
435 qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst), 897 qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
436 req->dst_len, 898 req->dst_len,
437 DMA_FROM_DEVICE); 899 DMA_FROM_DEVICE);
438 900
439 if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m))) 901 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
440 goto unmap_src; 902 goto unmap_src;
441 903
442 } else { 904 } else {
443 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 905 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
444 &qat_req->out.dec.m, 906 &qat_req->out.rsa.dec.m,
445 GFP_KERNEL); 907 GFP_KERNEL);
446 if (unlikely(!qat_req->dst_align)) 908 if (unlikely(!qat_req->dst_align))
447 goto unmap_src; 909 goto unmap_src;
448 910
449 } 911 }
450 912
451 qat_req->in.in_tab[3] = 0; 913 if (ctx->crt_mode)
452 qat_req->out.out_tab[1] = 0; 914 qat_req->in.rsa.in_tab[6] = 0;
453 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c, 915 else
916 qat_req->in.rsa.in_tab[3] = 0;
917 qat_req->out.rsa.out_tab[1] = 0;
918 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
454 sizeof(struct qat_rsa_input_params), 919 sizeof(struct qat_rsa_input_params),
455 DMA_TO_DEVICE); 920 DMA_TO_DEVICE);
456 if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) 921 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
457 goto unmap_dst; 922 goto unmap_dst;
458 923
459 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m, 924 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
460 sizeof(struct qat_rsa_output_params), 925 sizeof(struct qat_rsa_output_params),
461 DMA_TO_DEVICE); 926 DMA_TO_DEVICE);
462 if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) 927 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -464,8 +929,12 @@ static int qat_rsa_dec(struct akcipher_request *req)
464 929
465 msg->pke_mid.src_data_addr = qat_req->phy_in; 930 msg->pke_mid.src_data_addr = qat_req->phy_in;
466 msg->pke_mid.dest_data_addr = qat_req->phy_out; 931 msg->pke_mid.dest_data_addr = qat_req->phy_out;
467 msg->pke_mid.opaque = (uint64_t)(__force long)req; 932 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
468 msg->input_param_count = 3; 933 if (ctx->crt_mode)
934 msg->input_param_count = 6;
935 else
936 msg->input_param_count = 3;
937
469 msg->output_param_count = 1; 938 msg->output_param_count = 1;
470 do { 939 do {
471 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); 940 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
@@ -486,26 +955,24 @@ unmap_in_params:
486unmap_dst: 955unmap_dst:
487 if (qat_req->dst_align) 956 if (qat_req->dst_align)
488 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, 957 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
489 qat_req->out.dec.m); 958 qat_req->out.rsa.dec.m);
490 else 959 else
491 if (!dma_mapping_error(dev, qat_req->out.dec.m)) 960 if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
492 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz, 961 dma_unmap_single(dev, qat_req->out.rsa.dec.m,
493 DMA_FROM_DEVICE); 962 ctx->key_sz, DMA_FROM_DEVICE);
494unmap_src: 963unmap_src:
495 if (qat_req->src_align) 964 if (qat_req->src_align)
496 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, 965 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
497 qat_req->in.dec.c); 966 qat_req->in.rsa.dec.c);
498 else 967 else
499 if (!dma_mapping_error(dev, qat_req->in.dec.c)) 968 if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
500 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz, 969 dma_unmap_single(dev, qat_req->in.rsa.dec.c,
501 DMA_TO_DEVICE); 970 ctx->key_sz, DMA_TO_DEVICE);
502 return ret; 971 return ret;
503} 972}
504 973
505int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, 974int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
506 const void *value, size_t vlen)
507{ 975{
508 struct qat_rsa_ctx *ctx = context;
509 struct qat_crypto_instance *inst = ctx->inst; 976 struct qat_crypto_instance *inst = ctx->inst;
510 struct device *dev = &GET_DEV(inst->accel_dev); 977 struct device *dev = &GET_DEV(inst->accel_dev);
511 const char *ptr = value; 978 const char *ptr = value;
@@ -518,11 +985,6 @@ int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
518 985
519 ctx->key_sz = vlen; 986 ctx->key_sz = vlen;
520 ret = -EINVAL; 987 ret = -EINVAL;
521 /* In FIPS mode only allow key size 2K & 3K */
522 if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
523 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
524 goto err;
525 }
526 /* invalid key size provided */ 988 /* invalid key size provided */
527 if (!qat_rsa_enc_fn_id(ctx->key_sz)) 989 if (!qat_rsa_enc_fn_id(ctx->key_sz))
528 goto err; 990 goto err;
@@ -540,10 +1002,8 @@ err:
540 return ret; 1002 return ret;
541} 1003}
542 1004
543int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, 1005int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
544 const void *value, size_t vlen)
545{ 1006{
546 struct qat_rsa_ctx *ctx = context;
547 struct qat_crypto_instance *inst = ctx->inst; 1007 struct qat_crypto_instance *inst = ctx->inst;
548 struct device *dev = &GET_DEV(inst->accel_dev); 1008 struct device *dev = &GET_DEV(inst->accel_dev);
549 const char *ptr = value; 1009 const char *ptr = value;
@@ -559,18 +1019,15 @@ int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
559 } 1019 }
560 1020
561 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); 1021 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
562 if (!ctx->e) { 1022 if (!ctx->e)
563 ctx->e = NULL;
564 return -ENOMEM; 1023 return -ENOMEM;
565 } 1024
566 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); 1025 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
567 return 0; 1026 return 0;
568} 1027}
569 1028
570int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, 1029int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
571 const void *value, size_t vlen)
572{ 1030{
573 struct qat_rsa_ctx *ctx = context;
574 struct qat_crypto_instance *inst = ctx->inst; 1031 struct qat_crypto_instance *inst = ctx->inst;
575 struct device *dev = &GET_DEV(inst->accel_dev); 1032 struct device *dev = &GET_DEV(inst->accel_dev);
576 const char *ptr = value; 1033 const char *ptr = value;
@@ -585,12 +1042,6 @@ int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
585 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) 1042 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
586 goto err; 1043 goto err;
587 1044
588 /* In FIPS mode only allow key size 2K & 3K */
589 if (fips_enabled && (vlen != 256 && vlen != 384)) {
590 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
591 goto err;
592 }
593
594 ret = -ENOMEM; 1045 ret = -ENOMEM;
595 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); 1046 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
596 if (!ctx->d) 1047 if (!ctx->d)
@@ -603,12 +1054,106 @@ err:
603 return ret; 1054 return ret;
604} 1055}
605 1056
606static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, 1057static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
607 unsigned int keylen, bool private)
608{ 1058{
609 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 1059 while (!**ptr && *len) {
610 struct device *dev = &GET_DEV(ctx->inst->accel_dev); 1060 (*ptr)++;
611 int ret; 1061 (*len)--;
1062 }
1063}
1064
1065static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1066{
1067 struct qat_crypto_instance *inst = ctx->inst;
1068 struct device *dev = &GET_DEV(inst->accel_dev);
1069 const char *ptr;
1070 unsigned int len;
1071 unsigned int half_key_sz = ctx->key_sz / 2;
1072
1073 /* p */
1074 ptr = rsa_key->p;
1075 len = rsa_key->p_sz;
1076 qat_rsa_drop_leading_zeros(&ptr, &len);
1077 if (!len)
1078 goto err;
1079 ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1080 if (!ctx->p)
1081 goto err;
1082 memcpy(ctx->p + (half_key_sz - len), ptr, len);
1083
1084 /* q */
1085 ptr = rsa_key->q;
1086 len = rsa_key->q_sz;
1087 qat_rsa_drop_leading_zeros(&ptr, &len);
1088 if (!len)
1089 goto free_p;
1090 ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1091 if (!ctx->q)
1092 goto free_p;
1093 memcpy(ctx->q + (half_key_sz - len), ptr, len);
1094
1095 /* dp */
1096 ptr = rsa_key->dp;
1097 len = rsa_key->dp_sz;
1098 qat_rsa_drop_leading_zeros(&ptr, &len);
1099 if (!len)
1100 goto free_q;
1101 ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1102 GFP_KERNEL);
1103 if (!ctx->dp)
1104 goto free_q;
1105 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1106
1107 /* dq */
1108 ptr = rsa_key->dq;
1109 len = rsa_key->dq_sz;
1110 qat_rsa_drop_leading_zeros(&ptr, &len);
1111 if (!len)
1112 goto free_dp;
1113 ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1114 GFP_KERNEL);
1115 if (!ctx->dq)
1116 goto free_dp;
1117 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1118
1119 /* qinv */
1120 ptr = rsa_key->qinv;
1121 len = rsa_key->qinv_sz;
1122 qat_rsa_drop_leading_zeros(&ptr, &len);
1123 if (!len)
1124 goto free_dq;
1125 ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1126 GFP_KERNEL);
1127 if (!ctx->qinv)
1128 goto free_dq;
1129 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1130
1131 ctx->crt_mode = true;
1132 return;
1133
1134free_dq:
1135 memset(ctx->dq, '\0', half_key_sz);
1136 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1137 ctx->dq = NULL;
1138free_dp:
1139 memset(ctx->dp, '\0', half_key_sz);
1140 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1141 ctx->dp = NULL;
1142free_q:
1143 memset(ctx->q, '\0', half_key_sz);
1144 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1145 ctx->q = NULL;
1146free_p:
1147 memset(ctx->p, '\0', half_key_sz);
1148 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1149 ctx->p = NULL;
1150err:
1151 ctx->crt_mode = false;
1152}
1153
1154static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1155{
1156 unsigned int half_key_sz = ctx->key_sz / 2;
612 1157
613 /* Free the old key if any */ 1158 /* Free the old key if any */
614 if (ctx->n) 1159 if (ctx->n)
@@ -619,19 +1164,68 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
619 memset(ctx->d, '\0', ctx->key_sz); 1164 memset(ctx->d, '\0', ctx->key_sz);
620 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); 1165 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
621 } 1166 }
1167 if (ctx->p) {
1168 memset(ctx->p, '\0', half_key_sz);
1169 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1170 }
1171 if (ctx->q) {
1172 memset(ctx->q, '\0', half_key_sz);
1173 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1174 }
1175 if (ctx->dp) {
1176 memset(ctx->dp, '\0', half_key_sz);
1177 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1178 }
1179 if (ctx->dq) {
1180 memset(ctx->dq, '\0', half_key_sz);
1181 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1182 }
1183 if (ctx->qinv) {
1184 memset(ctx->qinv, '\0', half_key_sz);
1185 dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1186 }
622 1187
623 ctx->n = NULL; 1188 ctx->n = NULL;
624 ctx->e = NULL; 1189 ctx->e = NULL;
625 ctx->d = NULL; 1190 ctx->d = NULL;
1191 ctx->p = NULL;
1192 ctx->q = NULL;
1193 ctx->dp = NULL;
1194 ctx->dq = NULL;
1195 ctx->qinv = NULL;
1196 ctx->crt_mode = false;
1197 ctx->key_sz = 0;
1198}
1199
1200static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1201 unsigned int keylen, bool private)
1202{
1203 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1204 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1205 struct rsa_key rsa_key;
1206 int ret;
1207
1208 qat_rsa_clear_ctx(dev, ctx);
626 1209
627 if (private) 1210 if (private)
628 ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key, 1211 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
629 keylen);
630 else 1212 else
631 ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key, 1213 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
632 keylen); 1214 if (ret < 0)
1215 goto free;
1216
1217 ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
633 if (ret < 0) 1218 if (ret < 0)
634 goto free; 1219 goto free;
1220 ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1221 if (ret < 0)
1222 goto free;
1223 if (private) {
1224 ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1225 if (ret < 0)
1226 goto free;
1227 qat_rsa_setkey_crt(ctx, &rsa_key);
1228 }
635 1229
636 if (!ctx->n || !ctx->e) { 1230 if (!ctx->n || !ctx->e) {
637 /* invalid key provided */ 1231 /* invalid key provided */
@@ -646,20 +1240,7 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
646 1240
647 return 0; 1241 return 0;
648free: 1242free:
649 if (ctx->d) { 1243 qat_rsa_clear_ctx(dev, ctx);
650 memset(ctx->d, '\0', ctx->key_sz);
651 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
652 ctx->d = NULL;
653 }
654 if (ctx->e) {
655 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
656 ctx->e = NULL;
657 }
658 if (ctx->n) {
659 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
660 ctx->n = NULL;
661 ctx->key_sz = 0;
662 }
663 return ret; 1244 return ret;
664} 1245}
665 1246
@@ -725,7 +1306,7 @@ static struct akcipher_alg rsa = {
725 .max_size = qat_rsa_max_size, 1306 .max_size = qat_rsa_max_size,
726 .init = qat_rsa_init_tfm, 1307 .init = qat_rsa_init_tfm,
727 .exit = qat_rsa_exit_tfm, 1308 .exit = qat_rsa_exit_tfm,
728 .reqsize = sizeof(struct qat_rsa_request) + 64, 1309 .reqsize = sizeof(struct qat_asym_request) + 64,
729 .base = { 1310 .base = {
730 .cra_name = "rsa", 1311 .cra_name = "rsa",
731 .cra_driver_name = "qat-rsa", 1312 .cra_driver_name = "qat-rsa",
@@ -735,6 +1316,23 @@ static struct akcipher_alg rsa = {
735 }, 1316 },
736}; 1317};
737 1318
1319static struct kpp_alg dh = {
1320 .set_secret = qat_dh_set_secret,
1321 .generate_public_key = qat_dh_compute_value,
1322 .compute_shared_secret = qat_dh_compute_value,
1323 .max_size = qat_dh_max_size,
1324 .init = qat_dh_init_tfm,
1325 .exit = qat_dh_exit_tfm,
1326 .reqsize = sizeof(struct qat_asym_request) + 64,
1327 .base = {
1328 .cra_name = "dh",
1329 .cra_driver_name = "qat-dh",
1330 .cra_priority = 1000,
1331 .cra_module = THIS_MODULE,
1332 .cra_ctxsize = sizeof(struct qat_dh_ctx),
1333 },
1334};
1335
738int qat_asym_algs_register(void) 1336int qat_asym_algs_register(void)
739{ 1337{
740 int ret = 0; 1338 int ret = 0;
@@ -743,7 +1341,11 @@ int qat_asym_algs_register(void)
743 if (++active_devs == 1) { 1341 if (++active_devs == 1) {
744 rsa.base.cra_flags = 0; 1342 rsa.base.cra_flags = 0;
745 ret = crypto_register_akcipher(&rsa); 1343 ret = crypto_register_akcipher(&rsa);
1344 if (ret)
1345 goto unlock;
1346 ret = crypto_register_kpp(&dh);
746 } 1347 }
1348unlock:
747 mutex_unlock(&algs_lock); 1349 mutex_unlock(&algs_lock);
748 return ret; 1350 return ret;
749} 1351}
@@ -751,7 +1353,9 @@ int qat_asym_algs_register(void)
751void qat_asym_algs_unregister(void) 1353void qat_asym_algs_unregister(void)
752{ 1354{
753 mutex_lock(&algs_lock); 1355 mutex_lock(&algs_lock);
754 if (--active_devs == 0) 1356 if (--active_devs == 0) {
755 crypto_unregister_akcipher(&rsa); 1357 crypto_unregister_akcipher(&rsa);
1358 crypto_unregister_kpp(&dh);
1359 }
756 mutex_unlock(&algs_lock); 1360 mutex_unlock(&algs_lock);
757} 1361}
diff --git a/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
deleted file mode 100644
index f0066adb79b8..000000000000
--- a/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
+++ /dev/null
@@ -1,11 +0,0 @@
1RsaPrivKey ::= SEQUENCE {
2 version INTEGER,
3 n INTEGER ({ qat_rsa_get_n }),
4 e INTEGER ({ qat_rsa_get_e }),
5 d INTEGER ({ qat_rsa_get_d }),
6 prime1 INTEGER,
7 prime2 INTEGER,
8 exponent1 INTEGER,
9 exponent2 INTEGER,
10 coefficient INTEGER
11}
diff --git a/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
deleted file mode 100644
index bd667b31a21a..000000000000
--- a/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
+++ /dev/null
@@ -1,4 +0,0 @@
1RsaPubKey ::= SEQUENCE {
2 n INTEGER ({ qat_rsa_get_n }),
3 e INTEGER ({ qat_rsa_get_e })
4}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 6e1d5e185526..1dfcab317bed 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -252,6 +252,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
252 hw_data->get_arb_mapping = adf_get_arbiter_mapping; 252 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
253 hw_data->enable_ints = adf_enable_ints; 253 hw_data->enable_ints = adf_enable_ints;
254 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; 254 hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
255 hw_data->reset_device = adf_reset_sbr;
255 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; 256 hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
256} 257}
257 258
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index dbcbbe242bd6..b04b42f48366 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -15,8 +15,8 @@
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <crypto/aes.h> 17#include <crypto/aes.h>
18#include <crypto/algapi.h>
19#include <crypto/des.h> 18#include <crypto/des.h>
19#include <crypto/internal/skcipher.h>
20 20
21#include "cipher.h" 21#include "cipher.h"
22 22
@@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
189 memcpy(ctx->enc_key, key, keylen); 189 memcpy(ctx->enc_key, key, keylen);
190 return 0; 190 return 0;
191fallback: 191fallback:
192 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); 192 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
193 if (!ret) 193 if (!ret)
194 ctx->enc_keylen = keylen; 194 ctx->enc_keylen = keylen;
195 return ret; 195 return ret;
@@ -212,10 +212,16 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
212 212
213 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 && 213 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
214 ctx->enc_keylen != AES_KEYSIZE_256) { 214 ctx->enc_keylen != AES_KEYSIZE_256) {
215 ablkcipher_request_set_tfm(req, ctx->fallback); 215 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
216 ret = encrypt ? crypto_ablkcipher_encrypt(req) : 216
217 crypto_ablkcipher_decrypt(req); 217 skcipher_request_set_tfm(subreq, ctx->fallback);
218 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); 218 skcipher_request_set_callback(subreq, req->base.flags,
219 NULL, NULL);
220 skcipher_request_set_crypt(subreq, req->src, req->dst,
221 req->nbytes, req->info);
222 ret = encrypt ? crypto_skcipher_encrypt(subreq) :
223 crypto_skcipher_decrypt(subreq);
224 skcipher_request_zero(subreq);
219 return ret; 225 return ret;
220 } 226 }
221 227
@@ -239,10 +245,9 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
239 memset(ctx, 0, sizeof(*ctx)); 245 memset(ctx, 0, sizeof(*ctx));
240 tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx); 246 tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
241 247
242 ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 248 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
243 CRYPTO_ALG_TYPE_ABLKCIPHER, 249 CRYPTO_ALG_ASYNC |
244 CRYPTO_ALG_ASYNC | 250 CRYPTO_ALG_NEED_FALLBACK);
245 CRYPTO_ALG_NEED_FALLBACK);
246 if (IS_ERR(ctx->fallback)) 251 if (IS_ERR(ctx->fallback))
247 return PTR_ERR(ctx->fallback); 252 return PTR_ERR(ctx->fallback);
248 253
@@ -253,7 +258,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
253{ 258{
254 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 259 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
255 260
256 crypto_free_ablkcipher(ctx->fallback); 261 crypto_free_skcipher(ctx->fallback);
257} 262}
258 263
259struct qce_ablkcipher_def { 264struct qce_ablkcipher_def {
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 5c6a5f8633e5..2b0278bb6e92 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -22,7 +22,7 @@
22struct qce_cipher_ctx { 22struct qce_cipher_ctx {
23 u8 enc_key[QCE_MAX_KEY_SIZE]; 23 u8 enc_key[QCE_MAX_KEY_SIZE];
24 unsigned int enc_keylen; 24 unsigned int enc_keylen;
25 struct crypto_ablkcipher *fallback; 25 struct crypto_skcipher *fallback;
26}; 26};
27 27
28/** 28/**
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 2b3a0cfe3331..dce1af0ce85c 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -155,43 +155,43 @@
155 * expansion of its usage. 155 * expansion of its usage.
156 */ 156 */
157struct samsung_aes_variant { 157struct samsung_aes_variant {
158 unsigned int aes_offset; 158 unsigned int aes_offset;
159}; 159};
160 160
161struct s5p_aes_reqctx { 161struct s5p_aes_reqctx {
162 unsigned long mode; 162 unsigned long mode;
163}; 163};
164 164
165struct s5p_aes_ctx { 165struct s5p_aes_ctx {
166 struct s5p_aes_dev *dev; 166 struct s5p_aes_dev *dev;
167 167
168 uint8_t aes_key[AES_MAX_KEY_SIZE]; 168 uint8_t aes_key[AES_MAX_KEY_SIZE];
169 uint8_t nonce[CTR_RFC3686_NONCE_SIZE]; 169 uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
170 int keylen; 170 int keylen;
171}; 171};
172 172
173struct s5p_aes_dev { 173struct s5p_aes_dev {
174 struct device *dev; 174 struct device *dev;
175 struct clk *clk; 175 struct clk *clk;
176 void __iomem *ioaddr; 176 void __iomem *ioaddr;
177 void __iomem *aes_ioaddr; 177 void __iomem *aes_ioaddr;
178 int irq_fc; 178 int irq_fc;
179 179
180 struct ablkcipher_request *req; 180 struct ablkcipher_request *req;
181 struct s5p_aes_ctx *ctx; 181 struct s5p_aes_ctx *ctx;
182 struct scatterlist *sg_src; 182 struct scatterlist *sg_src;
183 struct scatterlist *sg_dst; 183 struct scatterlist *sg_dst;
184 184
185 /* In case of unaligned access: */ 185 /* In case of unaligned access: */
186 struct scatterlist *sg_src_cpy; 186 struct scatterlist *sg_src_cpy;
187 struct scatterlist *sg_dst_cpy; 187 struct scatterlist *sg_dst_cpy;
188 188
189 struct tasklet_struct tasklet; 189 struct tasklet_struct tasklet;
190 struct crypto_queue queue; 190 struct crypto_queue queue;
191 bool busy; 191 bool busy;
192 spinlock_t lock; 192 spinlock_t lock;
193 193
194 struct samsung_aes_variant *variant; 194 struct samsung_aes_variant *variant;
195}; 195};
196 196
197static struct s5p_aes_dev *s5p_dev; 197static struct s5p_aes_dev *s5p_dev;
@@ -421,11 +421,11 @@ static bool s5p_aes_rx(struct s5p_aes_dev *dev)
421static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) 421static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
422{ 422{
423 struct platform_device *pdev = dev_id; 423 struct platform_device *pdev = dev_id;
424 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 424 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
425 uint32_t status; 425 bool set_dma_tx = false;
426 unsigned long flags; 426 bool set_dma_rx = false;
427 bool set_dma_tx = false; 427 unsigned long flags;
428 bool set_dma_rx = false; 428 uint32_t status;
429 429
430 spin_lock_irqsave(&dev->lock, flags); 430 spin_lock_irqsave(&dev->lock, flags);
431 431
@@ -538,10 +538,10 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
538 538
539static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) 539static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
540{ 540{
541 struct ablkcipher_request *req = dev->req; 541 struct ablkcipher_request *req = dev->req;
542 uint32_t aes_control; 542 uint32_t aes_control;
543 int err; 543 unsigned long flags;
544 unsigned long flags; 544 int err;
545 545
546 aes_control = SSS_AES_KEY_CHANGE_MODE; 546 aes_control = SSS_AES_KEY_CHANGE_MODE;
547 if (mode & FLAGS_AES_DECRYPT) 547 if (mode & FLAGS_AES_DECRYPT)
@@ -653,10 +653,10 @@ exit:
653 653
654static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 654static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
655{ 655{
656 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 656 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
657 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 657 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
658 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); 658 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
659 struct s5p_aes_dev *dev = ctx->dev; 659 struct s5p_aes_dev *dev = ctx->dev;
660 660
661 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { 661 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
662 dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); 662 dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
@@ -671,7 +671,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
671static int s5p_aes_setkey(struct crypto_ablkcipher *cipher, 671static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
672 const uint8_t *key, unsigned int keylen) 672 const uint8_t *key, unsigned int keylen)
673{ 673{
674 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 674 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
675 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); 675 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
676 676
677 if (keylen != AES_KEYSIZE_128 && 677 if (keylen != AES_KEYSIZE_128 &&
@@ -763,11 +763,11 @@ static struct crypto_alg algs[] = {
763 763
764static int s5p_aes_probe(struct platform_device *pdev) 764static int s5p_aes_probe(struct platform_device *pdev)
765{ 765{
766 int i, j, err = -ENODEV; 766 struct device *dev = &pdev->dev;
767 struct s5p_aes_dev *pdata; 767 int i, j, err = -ENODEV;
768 struct device *dev = &pdev->dev;
769 struct resource *res;
770 struct samsung_aes_variant *variant; 768 struct samsung_aes_variant *variant;
769 struct s5p_aes_dev *pdata;
770 struct resource *res;
771 771
772 if (s5p_dev) 772 if (s5p_dev)
773 return -EEXIST; 773 return -EEXIST;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index c3f3d89e4831..0c49956ee0ce 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -14,10 +14,9 @@
14 * Based on omap-aes.c and tegra-aes.c 14 * Based on omap-aes.c and tegra-aes.c
15 */ 15 */
16 16
17#include <crypto/algapi.h>
18#include <crypto/aes.h> 17#include <crypto/aes.h>
19#include <crypto/hash.h>
20#include <crypto/internal/hash.h> 18#include <crypto/internal/hash.h>
19#include <crypto/internal/skcipher.h>
21#include <crypto/scatterwalk.h> 20#include <crypto/scatterwalk.h>
22#include <crypto/sha.h> 21#include <crypto/sha.h>
23 22
@@ -150,10 +149,7 @@ struct sahara_ctx {
150 /* AES-specific context */ 149 /* AES-specific context */
151 int keylen; 150 int keylen;
152 u8 key[AES_KEYSIZE_128]; 151 u8 key[AES_KEYSIZE_128];
153 struct crypto_ablkcipher *fallback; 152 struct crypto_skcipher *fallback;
154
155 /* SHA-specific context */
156 struct crypto_shash *shash_fallback;
157}; 153};
158 154
159struct sahara_aes_reqctx { 155struct sahara_aes_reqctx {
@@ -620,25 +616,21 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
620 return 0; 616 return 0;
621 } 617 }
622 618
623 if (keylen != AES_KEYSIZE_128 && 619 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
624 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
625 return -EINVAL; 620 return -EINVAL;
626 621
627 /* 622 /*
628 * The requested key size is not supported by HW, do a fallback. 623 * The requested key size is not supported by HW, do a fallback.
629 */ 624 */
630 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 625 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
631 ctx->fallback->base.crt_flags |= 626 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
632 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 627 CRYPTO_TFM_REQ_MASK);
633 628
634 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen); 629 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
635 if (ret) {
636 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
637 630
638 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK; 631 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
639 tfm_aux->crt_flags |= 632 tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
640 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK); 633 CRYPTO_TFM_RES_MASK;
641 }
642 return ret; 634 return ret;
643} 635}
644 636
@@ -670,16 +662,20 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
670 662
671static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req) 663static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
672{ 664{
673 struct crypto_tfm *tfm =
674 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
675 struct sahara_ctx *ctx = crypto_ablkcipher_ctx( 665 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
676 crypto_ablkcipher_reqtfm(req)); 666 crypto_ablkcipher_reqtfm(req));
677 int err; 667 int err;
678 668
679 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { 669 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
680 ablkcipher_request_set_tfm(req, ctx->fallback); 670 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
681 err = crypto_ablkcipher_encrypt(req); 671
682 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); 672 skcipher_request_set_tfm(subreq, ctx->fallback);
673 skcipher_request_set_callback(subreq, req->base.flags,
674 NULL, NULL);
675 skcipher_request_set_crypt(subreq, req->src, req->dst,
676 req->nbytes, req->info);
677 err = crypto_skcipher_encrypt(subreq);
678 skcipher_request_zero(subreq);
683 return err; 679 return err;
684 } 680 }
685 681
@@ -688,16 +684,20 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
688 684
689static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req) 685static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
690{ 686{
691 struct crypto_tfm *tfm =
692 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
693 struct sahara_ctx *ctx = crypto_ablkcipher_ctx( 687 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
694 crypto_ablkcipher_reqtfm(req)); 688 crypto_ablkcipher_reqtfm(req));
695 int err; 689 int err;
696 690
697 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { 691 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
698 ablkcipher_request_set_tfm(req, ctx->fallback); 692 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
699 err = crypto_ablkcipher_decrypt(req); 693
700 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); 694 skcipher_request_set_tfm(subreq, ctx->fallback);
695 skcipher_request_set_callback(subreq, req->base.flags,
696 NULL, NULL);
697 skcipher_request_set_crypt(subreq, req->src, req->dst,
698 req->nbytes, req->info);
699 err = crypto_skcipher_decrypt(subreq);
700 skcipher_request_zero(subreq);
701 return err; 701 return err;
702 } 702 }
703 703
@@ -706,16 +706,20 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
706 706
707static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req) 707static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
708{ 708{
709 struct crypto_tfm *tfm =
710 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
711 struct sahara_ctx *ctx = crypto_ablkcipher_ctx( 709 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
712 crypto_ablkcipher_reqtfm(req)); 710 crypto_ablkcipher_reqtfm(req));
713 int err; 711 int err;
714 712
715 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { 713 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
716 ablkcipher_request_set_tfm(req, ctx->fallback); 714 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
717 err = crypto_ablkcipher_encrypt(req); 715
718 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); 716 skcipher_request_set_tfm(subreq, ctx->fallback);
717 skcipher_request_set_callback(subreq, req->base.flags,
718 NULL, NULL);
719 skcipher_request_set_crypt(subreq, req->src, req->dst,
720 req->nbytes, req->info);
721 err = crypto_skcipher_encrypt(subreq);
722 skcipher_request_zero(subreq);
719 return err; 723 return err;
720 } 724 }
721 725
@@ -724,16 +728,20 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
724 728
725static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req) 729static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
726{ 730{
727 struct crypto_tfm *tfm =
728 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
729 struct sahara_ctx *ctx = crypto_ablkcipher_ctx( 731 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
730 crypto_ablkcipher_reqtfm(req)); 732 crypto_ablkcipher_reqtfm(req));
731 int err; 733 int err;
732 734
733 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { 735 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
734 ablkcipher_request_set_tfm(req, ctx->fallback); 736 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
735 err = crypto_ablkcipher_decrypt(req); 737
736 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); 738 skcipher_request_set_tfm(subreq, ctx->fallback);
739 skcipher_request_set_callback(subreq, req->base.flags,
740 NULL, NULL);
741 skcipher_request_set_crypt(subreq, req->src, req->dst,
742 req->nbytes, req->info);
743 err = crypto_skcipher_decrypt(subreq);
744 skcipher_request_zero(subreq);
737 return err; 745 return err;
738 } 746 }
739 747
@@ -745,8 +753,9 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
745 const char *name = crypto_tfm_alg_name(tfm); 753 const char *name = crypto_tfm_alg_name(tfm);
746 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); 754 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
747 755
748 ctx->fallback = crypto_alloc_ablkcipher(name, 0, 756 ctx->fallback = crypto_alloc_skcipher(name, 0,
749 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 757 CRYPTO_ALG_ASYNC |
758 CRYPTO_ALG_NEED_FALLBACK);
750 if (IS_ERR(ctx->fallback)) { 759 if (IS_ERR(ctx->fallback)) {
751 pr_err("Error allocating fallback algo %s\n", name); 760 pr_err("Error allocating fallback algo %s\n", name);
752 return PTR_ERR(ctx->fallback); 761 return PTR_ERR(ctx->fallback);
@@ -761,9 +770,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
761{ 770{
762 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm); 771 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
763 772
764 if (ctx->fallback) 773 crypto_free_skcipher(ctx->fallback);
765 crypto_free_ablkcipher(ctx->fallback);
766 ctx->fallback = NULL;
767} 774}
768 775
769static u32 sahara_sha_init_hdr(struct sahara_dev *dev, 776static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
@@ -1180,15 +1187,6 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
1180 1187
1181static int sahara_sha_cra_init(struct crypto_tfm *tfm) 1188static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1182{ 1189{
1183 const char *name = crypto_tfm_alg_name(tfm);
1184 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1185
1186 ctx->shash_fallback = crypto_alloc_shash(name, 0,
1187 CRYPTO_ALG_NEED_FALLBACK);
1188 if (IS_ERR(ctx->shash_fallback)) {
1189 pr_err("Error allocating fallback algo %s\n", name);
1190 return PTR_ERR(ctx->shash_fallback);
1191 }
1192 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1190 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1193 sizeof(struct sahara_sha_reqctx) + 1191 sizeof(struct sahara_sha_reqctx) +
1194 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE); 1192 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
@@ -1196,14 +1194,6 @@ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1196 return 0; 1194 return 0;
1197} 1195}
1198 1196
1199static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1200{
1201 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1202
1203 crypto_free_shash(ctx->shash_fallback);
1204 ctx->shash_fallback = NULL;
1205}
1206
1207static struct crypto_alg aes_algs[] = { 1197static struct crypto_alg aes_algs[] = {
1208{ 1198{
1209 .cra_name = "ecb(aes)", 1199 .cra_name = "ecb(aes)",
@@ -1272,7 +1262,6 @@ static struct ahash_alg sha_v3_algs[] = {
1272 .cra_alignmask = 0, 1262 .cra_alignmask = 0,
1273 .cra_module = THIS_MODULE, 1263 .cra_module = THIS_MODULE,
1274 .cra_init = sahara_sha_cra_init, 1264 .cra_init = sahara_sha_cra_init,
1275 .cra_exit = sahara_sha_cra_exit,
1276 } 1265 }
1277}, 1266},
1278}; 1267};
@@ -1300,7 +1289,6 @@ static struct ahash_alg sha_v4_algs[] = {
1300 .cra_alignmask = 0, 1289 .cra_alignmask = 0,
1301 .cra_module = THIS_MODULE, 1290 .cra_module = THIS_MODULE,
1302 .cra_init = sahara_sha_cra_init, 1291 .cra_init = sahara_sha_cra_init,
1303 .cra_exit = sahara_sha_cra_exit,
1304 } 1292 }
1305}, 1293},
1306}; 1294};
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b7ee8d30147d..0418a2f41dc0 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -91,10 +91,17 @@ static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
91 return be16_to_cpu(ptr->len); 91 return be16_to_cpu(ptr->len);
92} 92}
93 93
94static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) 94static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
95 bool is_sec1)
95{ 96{
96 if (!is_sec1) 97 if (!is_sec1)
97 ptr->j_extent = 0; 98 ptr->j_extent = val;
99}
100
101static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
102{
103 if (!is_sec1)
104 ptr->j_extent |= val;
98} 105}
99 106
100/* 107/*
@@ -111,7 +118,7 @@ static void map_single_talitos_ptr(struct device *dev,
111 118
112 to_talitos_ptr_len(ptr, len, is_sec1); 119 to_talitos_ptr_len(ptr, len, is_sec1);
113 to_talitos_ptr(ptr, dma_addr, is_sec1); 120 to_talitos_ptr(ptr, dma_addr, is_sec1);
114 to_talitos_ptr_extent_clear(ptr, is_sec1); 121 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
115} 122}
116 123
117/* 124/*
@@ -804,6 +811,11 @@ static void talitos_unregister_rng(struct device *dev)
804 * crypto alg 811 * crypto alg
805 */ 812 */
806#define TALITOS_CRA_PRIORITY 3000 813#define TALITOS_CRA_PRIORITY 3000
814/*
815 * Defines a priority for doing AEAD with descriptors type
816 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
817 */
818#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
807#define TALITOS_MAX_KEY_SIZE 96 819#define TALITOS_MAX_KEY_SIZE 96
808#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 820#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
809 821
@@ -904,35 +916,59 @@ struct talitos_edesc {
904static void talitos_sg_unmap(struct device *dev, 916static void talitos_sg_unmap(struct device *dev,
905 struct talitos_edesc *edesc, 917 struct talitos_edesc *edesc,
906 struct scatterlist *src, 918 struct scatterlist *src,
907 struct scatterlist *dst) 919 struct scatterlist *dst,
920 unsigned int len, unsigned int offset)
908{ 921{
922 struct talitos_private *priv = dev_get_drvdata(dev);
923 bool is_sec1 = has_ftr_sec1(priv);
909 unsigned int src_nents = edesc->src_nents ? : 1; 924 unsigned int src_nents = edesc->src_nents ? : 1;
910 unsigned int dst_nents = edesc->dst_nents ? : 1; 925 unsigned int dst_nents = edesc->dst_nents ? : 1;
911 926
927 if (is_sec1 && dst && dst_nents > 1) {
928 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
929 len, DMA_FROM_DEVICE);
930 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
931 offset);
932 }
912 if (src != dst) { 933 if (src != dst) {
913 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 934 if (src_nents == 1 || !is_sec1)
935 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
914 936
915 if (dst) { 937 if (dst && (dst_nents == 1 || !is_sec1))
916 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 938 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
917 } 939 } else if (src_nents == 1 || !is_sec1) {
918 } else
919 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 940 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
941 }
920} 942}
921 943
922static void ipsec_esp_unmap(struct device *dev, 944static void ipsec_esp_unmap(struct device *dev,
923 struct talitos_edesc *edesc, 945 struct talitos_edesc *edesc,
924 struct aead_request *areq) 946 struct aead_request *areq)
925{ 947{
926 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); 948 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
949 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
950 unsigned int ivsize = crypto_aead_ivsize(aead);
951
952 if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
953 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
954 DMA_FROM_DEVICE);
927 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); 955 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
928 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 956 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
929 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); 957 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
930 958
931 talitos_sg_unmap(dev, edesc, areq->src, areq->dst); 959 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
960 areq->assoclen);
932 961
933 if (edesc->dma_len) 962 if (edesc->dma_len)
934 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 963 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
935 DMA_BIDIRECTIONAL); 964 DMA_BIDIRECTIONAL);
965
966 if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
967 unsigned int dst_nents = edesc->dst_nents ? : 1;
968
969 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
970 areq->assoclen + areq->cryptlen - ivsize);
971 }
936} 972}
937 973
938/* 974/*
@@ -942,6 +978,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
942 struct talitos_desc *desc, void *context, 978 struct talitos_desc *desc, void *context,
943 int err) 979 int err)
944{ 980{
981 struct talitos_private *priv = dev_get_drvdata(dev);
982 bool is_sec1 = has_ftr_sec1(priv);
945 struct aead_request *areq = context; 983 struct aead_request *areq = context;
946 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 984 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
947 unsigned int authsize = crypto_aead_authsize(authenc); 985 unsigned int authsize = crypto_aead_authsize(authenc);
@@ -955,8 +993,11 @@ static void ipsec_esp_encrypt_done(struct device *dev,
955 993
956 /* copy the generated ICV to dst */ 994 /* copy the generated ICV to dst */
957 if (edesc->icv_ool) { 995 if (edesc->icv_ool) {
958 icvdata = &edesc->link_tbl[edesc->src_nents + 996 if (is_sec1)
959 edesc->dst_nents + 2]; 997 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
998 else
999 icvdata = &edesc->link_tbl[edesc->src_nents +
1000 edesc->dst_nents + 2];
960 sg = sg_last(areq->dst, edesc->dst_nents); 1001 sg = sg_last(areq->dst, edesc->dst_nents);
961 memcpy((char *)sg_virt(sg) + sg->length - authsize, 1002 memcpy((char *)sg_virt(sg) + sg->length - authsize,
962 icvdata, authsize); 1003 icvdata, authsize);
@@ -977,6 +1018,8 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
977 struct talitos_edesc *edesc; 1018 struct talitos_edesc *edesc;
978 struct scatterlist *sg; 1019 struct scatterlist *sg;
979 char *oicv, *icv; 1020 char *oicv, *icv;
1021 struct talitos_private *priv = dev_get_drvdata(dev);
1022 bool is_sec1 = has_ftr_sec1(priv);
980 1023
981 edesc = container_of(desc, struct talitos_edesc, desc); 1024 edesc = container_of(desc, struct talitos_edesc, desc);
982 1025
@@ -988,7 +1031,12 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
988 icv = (char *)sg_virt(sg) + sg->length - authsize; 1031 icv = (char *)sg_virt(sg) + sg->length - authsize;
989 1032
990 if (edesc->dma_len) { 1033 if (edesc->dma_len) {
991 oicv = (char *)&edesc->link_tbl[edesc->src_nents + 1034 if (is_sec1)
1035 oicv = (char *)&edesc->dma_link_tbl +
1036 req->assoclen + req->cryptlen;
1037 else
1038 oicv = (char *)
1039 &edesc->link_tbl[edesc->src_nents +
992 edesc->dst_nents + 2]; 1040 edesc->dst_nents + 2];
993 if (edesc->icv_ool) 1041 if (edesc->icv_ool)
994 icv = oicv + authsize; 1042 icv = oicv + authsize;
@@ -1050,8 +1098,8 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1050 1098
1051 to_talitos_ptr(link_tbl_ptr + count, 1099 to_talitos_ptr(link_tbl_ptr + count,
1052 sg_dma_address(sg) + offset, 0); 1100 sg_dma_address(sg) + offset, 0);
1053 link_tbl_ptr[count].len = cpu_to_be16(len); 1101 to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
1054 link_tbl_ptr[count].j_extent = 0; 1102 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1055 count++; 1103 count++;
1056 cryptlen -= len; 1104 cryptlen -= len;
1057 offset = 0; 1105 offset = 0;
@@ -1062,17 +1110,43 @@ next:
1062 1110
1063 /* tag end of link table */ 1111 /* tag end of link table */
1064 if (count > 0) 1112 if (count > 0)
1065 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN; 1113 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1114 DESC_PTR_LNKTBL_RETURN, 0);
1066 1115
1067 return count; 1116 return count;
1068} 1117}
1069 1118
1070static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count, 1119int talitos_sg_map(struct device *dev, struct scatterlist *src,
1071 int cryptlen, 1120 unsigned int len, struct talitos_edesc *edesc,
1072 struct talitos_ptr *link_tbl_ptr) 1121 struct talitos_ptr *ptr,
1122 int sg_count, unsigned int offset, int tbl_off)
1073{ 1123{
1074 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen, 1124 struct talitos_private *priv = dev_get_drvdata(dev);
1075 link_tbl_ptr); 1125 bool is_sec1 = has_ftr_sec1(priv);
1126
1127 to_talitos_ptr_len(ptr, len, is_sec1);
1128 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
1129
1130 if (sg_count == 1) {
1131 to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
1132 return sg_count;
1133 }
1134 if (is_sec1) {
1135 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
1136 return sg_count;
1137 }
1138 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
1139 &edesc->link_tbl[tbl_off]);
1140 if (sg_count == 1) {
1141 /* Only one segment now, so no link tbl needed*/
1142 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1143 return sg_count;
1144 }
1145 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1146 tbl_off * sizeof(struct talitos_ptr), is_sec1);
1147 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1148
1149 return sg_count;
1076} 1150}
1077 1151
1078/* 1152/*
@@ -1093,42 +1167,52 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1093 int tbl_off = 0; 1167 int tbl_off = 0;
1094 int sg_count, ret; 1168 int sg_count, ret;
1095 int sg_link_tbl_len; 1169 int sg_link_tbl_len;
1170 bool sync_needed = false;
1171 struct talitos_private *priv = dev_get_drvdata(dev);
1172 bool is_sec1 = has_ftr_sec1(priv);
1096 1173
1097 /* hmac key */ 1174 /* hmac key */
1098 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 1175 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1099 DMA_TO_DEVICE); 1176 DMA_TO_DEVICE);
1100 1177
1101 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, 1178 sg_count = edesc->src_nents ?: 1;
1102 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1179 if (is_sec1 && sg_count > 1)
1103 : DMA_TO_DEVICE); 1180 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1104 /* hmac data */ 1181 areq->assoclen + cryptlen);
1105 desc->ptr[1].len = cpu_to_be16(areq->assoclen); 1182 else
1106 if (sg_count > 1 && 1183 sg_count = dma_map_sg(dev, areq->src, sg_count,
1107 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, 1184 (areq->src == areq->dst) ?
1108 areq->assoclen, 1185 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1109 &edesc->link_tbl[tbl_off])) > 1) {
1110 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1111 sizeof(struct talitos_ptr), 0);
1112 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1113 1186
1114 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1187 /* hmac data */
1115 edesc->dma_len, DMA_BIDIRECTIONAL); 1188 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1189 &desc->ptr[1], sg_count, 0, tbl_off);
1116 1190
1191 if (ret > 1) {
1117 tbl_off += ret; 1192 tbl_off += ret;
1118 } else { 1193 sync_needed = true;
1119 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1120 desc->ptr[1].j_extent = 0;
1121 } 1194 }
1122 1195
1123 /* cipher iv */ 1196 /* cipher iv */
1124 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); 1197 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1125 desc->ptr[2].len = cpu_to_be16(ivsize); 1198 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
1126 desc->ptr[2].j_extent = 0; 1199 to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
1200 to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
1201 } else {
1202 to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
1203 to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
1204 to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
1205 }
1127 1206
1128 /* cipher key */ 1207 /* cipher key */
1129 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, 1208 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1130 (char *)&ctx->key + ctx->authkeylen, 1209 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1131 DMA_TO_DEVICE); 1210 (char *)&ctx->key + ctx->authkeylen,
1211 DMA_TO_DEVICE);
1212 else
1213 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
1214 (char *)&ctx->key + ctx->authkeylen,
1215 DMA_TO_DEVICE);
1132 1216
1133 /* 1217 /*
1134 * cipher in 1218 * cipher in
@@ -1136,78 +1220,82 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1136 * extent is bytes of HMAC postpended to ciphertext, 1220 * extent is bytes of HMAC postpended to ciphertext,
1137 * typically 12 for ipsec 1221 * typically 12 for ipsec
1138 */ 1222 */
1139 desc->ptr[4].len = cpu_to_be16(cryptlen); 1223 to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
1140 desc->ptr[4].j_extent = authsize; 1224 to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
1141 1225
1142 sg_link_tbl_len = cryptlen; 1226 sg_link_tbl_len = cryptlen;
1143 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1144 sg_link_tbl_len += authsize;
1145 1227
1146 if (sg_count == 1) { 1228 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1147 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) + 1229 to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
1148 areq->assoclen, 0); 1230
1149 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count, 1231 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1150 areq->assoclen, sg_link_tbl_len, 1232 sg_link_tbl_len += authsize;
1151 &edesc->link_tbl[tbl_off])) >
1152 1) {
1153 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1154 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1155 tbl_off *
1156 sizeof(struct talitos_ptr), 0);
1157 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1158 edesc->dma_len,
1159 DMA_BIDIRECTIONAL);
1160 tbl_off += ret;
1161 } else {
1162 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1163 } 1233 }
1164 1234
1165 /* cipher out */ 1235 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1166 desc->ptr[5].len = cpu_to_be16(cryptlen); 1236 &desc->ptr[4], sg_count, areq->assoclen,
1167 desc->ptr[5].j_extent = authsize; 1237 tbl_off);
1168 1238
1169 if (areq->src != areq->dst) 1239 if (sg_count > 1) {
1170 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, 1240 tbl_off += sg_count;
1171 DMA_FROM_DEVICE); 1241 sync_needed = true;
1242 }
1172 1243
1173 edesc->icv_ool = false; 1244 /* cipher out */
1245 if (areq->src != areq->dst) {
1246 sg_count = edesc->dst_nents ? : 1;
1247 if (!is_sec1 || sg_count == 1)
1248 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1249 }
1174 1250
1175 if (sg_count == 1) { 1251 sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
1176 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) + 1252 &desc->ptr[5], sg_count, areq->assoclen,
1177 areq->assoclen, 0); 1253 tbl_off);
1178 } else if ((sg_count =
1179 sg_to_link_tbl_offset(areq->dst, sg_count,
1180 areq->assoclen, cryptlen,
1181 &edesc->link_tbl[tbl_off])) > 1) {
1182 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1183
1184 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1185 tbl_off * sizeof(struct talitos_ptr), 0);
1186
1187 /* Add an entry to the link table for ICV data */
1188 tbl_ptr += sg_count - 1;
1189 tbl_ptr->j_extent = 0;
1190 tbl_ptr++;
1191 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1192 tbl_ptr->len = cpu_to_be16(authsize);
1193
1194 /* icv data follows link tables */
1195 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1196 (edesc->src_nents + edesc->dst_nents +
1197 2) * sizeof(struct talitos_ptr) +
1198 authsize, 0);
1199 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1200 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1201 edesc->dma_len, DMA_BIDIRECTIONAL);
1202 1254
1255 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1256 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1257
1258 if (sg_count > 1) {
1203 edesc->icv_ool = true; 1259 edesc->icv_ool = true;
1260 sync_needed = true;
1261
1262 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1263 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1264 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1265 sizeof(struct talitos_ptr) + authsize;
1266
1267 /* Add an entry to the link table for ICV data */
1268 tbl_ptr += sg_count - 1;
1269 to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
1270 tbl_ptr++;
1271 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1272 is_sec1);
1273 to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1274
1275 /* icv data follows link tables */
1276 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1277 is_sec1);
1278 }
1204 } else { 1279 } else {
1205 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0); 1280 edesc->icv_ool = false;
1281 }
1282
1283 /* ICV data */
1284 if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1285 to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1286 to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
1287 areq->assoclen + cryptlen, is_sec1);
1206 } 1288 }
1207 1289
1208 /* iv out */ 1290 /* iv out */
1209 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1291 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1210 DMA_FROM_DEVICE); 1292 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1293 DMA_FROM_DEVICE);
1294
1295 if (sync_needed)
1296 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1297 edesc->dma_len,
1298 DMA_BIDIRECTIONAL);
1211 1299
1212 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1300 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1213 if (ret != -EINPROGRESS) { 1301 if (ret != -EINPROGRESS) {
@@ -1233,7 +1321,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1233 bool encrypt) 1321 bool encrypt)
1234{ 1322{
1235 struct talitos_edesc *edesc; 1323 struct talitos_edesc *edesc;
1236 int src_nents, dst_nents, alloc_len, dma_len; 1324 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1237 dma_addr_t iv_dma = 0; 1325 dma_addr_t iv_dma = 0;
1238 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1326 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1239 GFP_ATOMIC; 1327 GFP_ATOMIC;
@@ -1251,8 +1339,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1251 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1339 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1252 1340
1253 if (!dst || dst == src) { 1341 if (!dst || dst == src) {
1254 src_nents = sg_nents_for_len(src, 1342 src_len = assoclen + cryptlen + authsize;
1255 assoclen + cryptlen + authsize); 1343 src_nents = sg_nents_for_len(src, src_len);
1256 if (src_nents < 0) { 1344 if (src_nents < 0) {
1257 dev_err(dev, "Invalid number of src SG.\n"); 1345 dev_err(dev, "Invalid number of src SG.\n");
1258 err = ERR_PTR(-EINVAL); 1346 err = ERR_PTR(-EINVAL);
@@ -1260,17 +1348,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1260 } 1348 }
1261 src_nents = (src_nents == 1) ? 0 : src_nents; 1349 src_nents = (src_nents == 1) ? 0 : src_nents;
1262 dst_nents = dst ? src_nents : 0; 1350 dst_nents = dst ? src_nents : 0;
1351 dst_len = 0;
1263 } else { /* dst && dst != src*/ 1352 } else { /* dst && dst != src*/
1264 src_nents = sg_nents_for_len(src, assoclen + cryptlen + 1353 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1265 (encrypt ? 0 : authsize)); 1354 src_nents = sg_nents_for_len(src, src_len);
1266 if (src_nents < 0) { 1355 if (src_nents < 0) {
1267 dev_err(dev, "Invalid number of src SG.\n"); 1356 dev_err(dev, "Invalid number of src SG.\n");
1268 err = ERR_PTR(-EINVAL); 1357 err = ERR_PTR(-EINVAL);
1269 goto error_sg; 1358 goto error_sg;
1270 } 1359 }
1271 src_nents = (src_nents == 1) ? 0 : src_nents; 1360 src_nents = (src_nents == 1) ? 0 : src_nents;
1272 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen + 1361 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1273 (encrypt ? authsize : 0)); 1362 dst_nents = sg_nents_for_len(dst, dst_len);
1274 if (dst_nents < 0) { 1363 if (dst_nents < 0) {
1275 dev_err(dev, "Invalid number of dst SG.\n"); 1364 dev_err(dev, "Invalid number of dst SG.\n");
1276 err = ERR_PTR(-EINVAL); 1365 err = ERR_PTR(-EINVAL);
@@ -1287,8 +1376,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1287 alloc_len = sizeof(struct talitos_edesc); 1376 alloc_len = sizeof(struct talitos_edesc);
1288 if (src_nents || dst_nents) { 1377 if (src_nents || dst_nents) {
1289 if (is_sec1) 1378 if (is_sec1)
1290 dma_len = (src_nents ? cryptlen : 0) + 1379 dma_len = (src_nents ? src_len : 0) +
1291 (dst_nents ? cryptlen : 0); 1380 (dst_nents ? dst_len : 0);
1292 else 1381 else
1293 dma_len = (src_nents + dst_nents + 2) * 1382 dma_len = (src_nents + dst_nents + 2) *
1294 sizeof(struct talitos_ptr) + authsize * 2; 1383 sizeof(struct talitos_ptr) + authsize * 2;
@@ -1412,40 +1501,13 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1412 return 0; 1501 return 0;
1413} 1502}
1414 1503
1415static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1416 struct scatterlist *dst, unsigned int len,
1417 struct talitos_edesc *edesc)
1418{
1419 struct talitos_private *priv = dev_get_drvdata(dev);
1420 bool is_sec1 = has_ftr_sec1(priv);
1421
1422 if (is_sec1) {
1423 if (!edesc->src_nents) {
1424 dma_unmap_sg(dev, src, 1,
1425 dst != src ? DMA_TO_DEVICE
1426 : DMA_BIDIRECTIONAL);
1427 }
1428 if (dst && edesc->dst_nents) {
1429 dma_sync_single_for_device(dev,
1430 edesc->dma_link_tbl + len,
1431 len, DMA_FROM_DEVICE);
1432 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1433 edesc->buf + len, len);
1434 } else if (dst && dst != src) {
1435 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1436 }
1437 } else {
1438 talitos_sg_unmap(dev, edesc, src, dst);
1439 }
1440}
1441
1442static void common_nonsnoop_unmap(struct device *dev, 1504static void common_nonsnoop_unmap(struct device *dev,
1443 struct talitos_edesc *edesc, 1505 struct talitos_edesc *edesc,
1444 struct ablkcipher_request *areq) 1506 struct ablkcipher_request *areq)
1445{ 1507{
1446 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1508 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1447 1509
1448 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc); 1510 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1449 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 1511 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1450 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); 1512 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1451 1513
@@ -1470,100 +1532,6 @@ static void ablkcipher_done(struct device *dev,
1470 areq->base.complete(&areq->base, err); 1532 areq->base.complete(&areq->base, err);
1471} 1533}
1472 1534
1473int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1474 unsigned int len, struct talitos_edesc *edesc,
1475 enum dma_data_direction dir, struct talitos_ptr *ptr)
1476{
1477 int sg_count;
1478 struct talitos_private *priv = dev_get_drvdata(dev);
1479 bool is_sec1 = has_ftr_sec1(priv);
1480
1481 to_talitos_ptr_len(ptr, len, is_sec1);
1482
1483 if (is_sec1) {
1484 sg_count = edesc->src_nents ? : 1;
1485
1486 if (sg_count == 1) {
1487 dma_map_sg(dev, src, 1, dir);
1488 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1489 } else {
1490 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1491 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1492 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1493 len, DMA_TO_DEVICE);
1494 }
1495 } else {
1496 to_talitos_ptr_extent_clear(ptr, is_sec1);
1497
1498 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1499
1500 if (sg_count == 1) {
1501 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1502 } else {
1503 sg_count = sg_to_link_tbl(src, sg_count, len,
1504 &edesc->link_tbl[0]);
1505 if (sg_count > 1) {
1506 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1507 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1508 dma_sync_single_for_device(dev,
1509 edesc->dma_link_tbl,
1510 edesc->dma_len,
1511 DMA_BIDIRECTIONAL);
1512 } else {
1513 /* Only one segment now, so no link tbl needed*/
1514 to_talitos_ptr(ptr, sg_dma_address(src),
1515 is_sec1);
1516 }
1517 }
1518 }
1519 return sg_count;
1520}
1521
1522void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1523 unsigned int len, struct talitos_edesc *edesc,
1524 enum dma_data_direction dir,
1525 struct talitos_ptr *ptr, int sg_count)
1526{
1527 struct talitos_private *priv = dev_get_drvdata(dev);
1528 bool is_sec1 = has_ftr_sec1(priv);
1529
1530 if (dir != DMA_NONE)
1531 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1532
1533 to_talitos_ptr_len(ptr, len, is_sec1);
1534
1535 if (is_sec1) {
1536 if (sg_count == 1) {
1537 if (dir != DMA_NONE)
1538 dma_map_sg(dev, dst, 1, dir);
1539 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1540 } else {
1541 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1542 dma_sync_single_for_device(dev,
1543 edesc->dma_link_tbl + len,
1544 len, DMA_FROM_DEVICE);
1545 }
1546 } else {
1547 to_talitos_ptr_extent_clear(ptr, is_sec1);
1548
1549 if (sg_count == 1) {
1550 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1551 } else {
1552 struct talitos_ptr *link_tbl_ptr =
1553 &edesc->link_tbl[edesc->src_nents + 1];
1554
1555 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1556 (edesc->src_nents + 1) *
1557 sizeof(struct talitos_ptr), 0);
1558 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1559 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1560 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1561 edesc->dma_len,
1562 DMA_BIDIRECTIONAL);
1563 }
1564 }
1565}
1566
1567static int common_nonsnoop(struct talitos_edesc *edesc, 1535static int common_nonsnoop(struct talitos_edesc *edesc,
1568 struct ablkcipher_request *areq, 1536 struct ablkcipher_request *areq,
1569 void (*callback) (struct device *dev, 1537 void (*callback) (struct device *dev,
@@ -1577,6 +1545,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1577 unsigned int cryptlen = areq->nbytes; 1545 unsigned int cryptlen = areq->nbytes;
1578 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1546 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1579 int sg_count, ret; 1547 int sg_count, ret;
1548 bool sync_needed = false;
1580 struct talitos_private *priv = dev_get_drvdata(dev); 1549 struct talitos_private *priv = dev_get_drvdata(dev);
1581 bool is_sec1 = has_ftr_sec1(priv); 1550 bool is_sec1 = has_ftr_sec1(priv);
1582 1551
@@ -1586,25 +1555,39 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1586 /* cipher iv */ 1555 /* cipher iv */
1587 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); 1556 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1588 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); 1557 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1589 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1); 1558 to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
1590 1559
1591 /* cipher key */ 1560 /* cipher key */
1592 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1561 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1593 (char *)&ctx->key, DMA_TO_DEVICE); 1562 (char *)&ctx->key, DMA_TO_DEVICE);
1594 1563
1564 sg_count = edesc->src_nents ?: 1;
1565 if (is_sec1 && sg_count > 1)
1566 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1567 cryptlen);
1568 else
1569 sg_count = dma_map_sg(dev, areq->src, sg_count,
1570 (areq->src == areq->dst) ?
1571 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1595 /* 1572 /*
1596 * cipher in 1573 * cipher in
1597 */ 1574 */
1598 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc, 1575 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1599 (areq->src == areq->dst) ? 1576 &desc->ptr[3], sg_count, 0, 0);
1600 DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 1577 if (sg_count > 1)
1601 &desc->ptr[3]); 1578 sync_needed = true;
1602 1579
1603 /* cipher out */ 1580 /* cipher out */
1604 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc, 1581 if (areq->src != areq->dst) {
1605 (areq->src == areq->dst) ? DMA_NONE 1582 sg_count = edesc->dst_nents ? : 1;
1606 : DMA_FROM_DEVICE, 1583 if (!is_sec1 || sg_count == 1)
1607 &desc->ptr[4], sg_count); 1584 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1585 }
1586
1587 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1588 sg_count, 0, (edesc->src_nents + 1));
1589 if (ret > 1)
1590 sync_needed = true;
1608 1591
1609 /* iv out */ 1592 /* iv out */
1610 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 1593 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
@@ -1613,6 +1596,10 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1613 /* last DWORD empty */ 1596 /* last DWORD empty */
1614 desc->ptr[6] = zero_entry; 1597 desc->ptr[6] = zero_entry;
1615 1598
1599 if (sync_needed)
1600 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1601 edesc->dma_len, DMA_BIDIRECTIONAL);
1602
1616 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1603 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1617 if (ret != -EINPROGRESS) { 1604 if (ret != -EINPROGRESS) {
1618 common_nonsnoop_unmap(dev, edesc, areq); 1605 common_nonsnoop_unmap(dev, edesc, areq);
@@ -1676,7 +1663,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
1676 1663
1677 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1664 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1678 1665
1679 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); 1666 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1680 1667
1681 /* When using hashctx-in, must unmap it. */ 1668 /* When using hashctx-in, must unmap it. */
1682 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) 1669 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
@@ -1747,8 +1734,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1747 struct device *dev = ctx->dev; 1734 struct device *dev = ctx->dev;
1748 struct talitos_desc *desc = &edesc->desc; 1735 struct talitos_desc *desc = &edesc->desc;
1749 int ret; 1736 int ret;
1737 bool sync_needed = false;
1750 struct talitos_private *priv = dev_get_drvdata(dev); 1738 struct talitos_private *priv = dev_get_drvdata(dev);
1751 bool is_sec1 = has_ftr_sec1(priv); 1739 bool is_sec1 = has_ftr_sec1(priv);
1740 int sg_count;
1752 1741
1753 /* first DWORD empty */ 1742 /* first DWORD empty */
1754 desc->ptr[0] = zero_entry; 1743 desc->ptr[0] = zero_entry;
@@ -1773,11 +1762,19 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1773 else 1762 else
1774 desc->ptr[2] = zero_entry; 1763 desc->ptr[2] = zero_entry;
1775 1764
1765 sg_count = edesc->src_nents ?: 1;
1766 if (is_sec1 && sg_count > 1)
1767 sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
1768 else
1769 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1770 DMA_TO_DEVICE);
1776 /* 1771 /*
1777 * data in 1772 * data in
1778 */ 1773 */
1779 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc, 1774 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1780 DMA_TO_DEVICE, &desc->ptr[3]); 1775 &desc->ptr[3], sg_count, 0, 0);
1776 if (sg_count > 1)
1777 sync_needed = true;
1781 1778
1782 /* fifth DWORD empty */ 1779 /* fifth DWORD empty */
1783 desc->ptr[4] = zero_entry; 1780 desc->ptr[4] = zero_entry;
@@ -1798,6 +1795,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1798 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) 1795 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1799 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); 1796 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1800 1797
1798 if (sync_needed)
1799 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1800 edesc->dma_len, DMA_BIDIRECTIONAL);
1801
1801 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1802 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1802 if (ret != -EINPROGRESS) { 1803 if (ret != -EINPROGRESS) {
1803 common_nonsnoop_hash_unmap(dev, edesc, areq); 1804 common_nonsnoop_hash_unmap(dev, edesc, areq);
@@ -2124,6 +2125,7 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2124 2125
2125struct talitos_alg_template { 2126struct talitos_alg_template {
2126 u32 type; 2127 u32 type;
2128 u32 priority;
2127 union { 2129 union {
2128 struct crypto_alg crypto; 2130 struct crypto_alg crypto;
2129 struct ahash_alg hash; 2131 struct ahash_alg hash;
@@ -2155,6 +2157,27 @@ static struct talitos_alg_template driver_algs[] = {
2155 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2157 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2156 }, 2158 },
2157 { .type = CRYPTO_ALG_TYPE_AEAD, 2159 { .type = CRYPTO_ALG_TYPE_AEAD,
2160 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2161 .alg.aead = {
2162 .base = {
2163 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2164 .cra_driver_name = "authenc-hmac-sha1-"
2165 "cbc-aes-talitos",
2166 .cra_blocksize = AES_BLOCK_SIZE,
2167 .cra_flags = CRYPTO_ALG_ASYNC,
2168 },
2169 .ivsize = AES_BLOCK_SIZE,
2170 .maxauthsize = SHA1_DIGEST_SIZE,
2171 },
2172 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2173 DESC_HDR_SEL0_AESU |
2174 DESC_HDR_MODE0_AESU_CBC |
2175 DESC_HDR_SEL1_MDEUA |
2176 DESC_HDR_MODE1_MDEU_INIT |
2177 DESC_HDR_MODE1_MDEU_PAD |
2178 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2179 },
2180 { .type = CRYPTO_ALG_TYPE_AEAD,
2158 .alg.aead = { 2181 .alg.aead = {
2159 .base = { 2182 .base = {
2160 .cra_name = "authenc(hmac(sha1)," 2183 .cra_name = "authenc(hmac(sha1),"
@@ -2176,6 +2199,29 @@ static struct talitos_alg_template driver_algs[] = {
2176 DESC_HDR_MODE1_MDEU_PAD | 2199 DESC_HDR_MODE1_MDEU_PAD |
2177 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2200 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2178 }, 2201 },
2202 { .type = CRYPTO_ALG_TYPE_AEAD,
2203 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2204 .alg.aead = {
2205 .base = {
2206 .cra_name = "authenc(hmac(sha1),"
2207 "cbc(des3_ede))",
2208 .cra_driver_name = "authenc-hmac-sha1-"
2209 "cbc-3des-talitos",
2210 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2211 .cra_flags = CRYPTO_ALG_ASYNC,
2212 },
2213 .ivsize = DES3_EDE_BLOCK_SIZE,
2214 .maxauthsize = SHA1_DIGEST_SIZE,
2215 },
2216 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2217 DESC_HDR_SEL0_DEU |
2218 DESC_HDR_MODE0_DEU_CBC |
2219 DESC_HDR_MODE0_DEU_3DES |
2220 DESC_HDR_SEL1_MDEUA |
2221 DESC_HDR_MODE1_MDEU_INIT |
2222 DESC_HDR_MODE1_MDEU_PAD |
2223 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2224 },
2179 { .type = CRYPTO_ALG_TYPE_AEAD, 2225 { .type = CRYPTO_ALG_TYPE_AEAD,
2180 .alg.aead = { 2226 .alg.aead = {
2181 .base = { 2227 .base = {
@@ -2196,6 +2242,27 @@ static struct talitos_alg_template driver_algs[] = {
2196 DESC_HDR_MODE1_MDEU_PAD | 2242 DESC_HDR_MODE1_MDEU_PAD |
2197 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2243 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2198 }, 2244 },
2245 { .type = CRYPTO_ALG_TYPE_AEAD,
2246 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2247 .alg.aead = {
2248 .base = {
2249 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2250 .cra_driver_name = "authenc-hmac-sha224-"
2251 "cbc-aes-talitos",
2252 .cra_blocksize = AES_BLOCK_SIZE,
2253 .cra_flags = CRYPTO_ALG_ASYNC,
2254 },
2255 .ivsize = AES_BLOCK_SIZE,
2256 .maxauthsize = SHA224_DIGEST_SIZE,
2257 },
2258 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2259 DESC_HDR_SEL0_AESU |
2260 DESC_HDR_MODE0_AESU_CBC |
2261 DESC_HDR_SEL1_MDEUA |
2262 DESC_HDR_MODE1_MDEU_INIT |
2263 DESC_HDR_MODE1_MDEU_PAD |
2264 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2265 },
2199 { .type = CRYPTO_ALG_TYPE_AEAD, 2266 { .type = CRYPTO_ALG_TYPE_AEAD,
2200 .alg.aead = { 2267 .alg.aead = {
2201 .base = { 2268 .base = {
@@ -2219,6 +2286,29 @@ static struct talitos_alg_template driver_algs[] = {
2219 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2286 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2220 }, 2287 },
2221 { .type = CRYPTO_ALG_TYPE_AEAD, 2288 { .type = CRYPTO_ALG_TYPE_AEAD,
2289 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2290 .alg.aead = {
2291 .base = {
2292 .cra_name = "authenc(hmac(sha224),"
2293 "cbc(des3_ede))",
2294 .cra_driver_name = "authenc-hmac-sha224-"
2295 "cbc-3des-talitos",
2296 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2297 .cra_flags = CRYPTO_ALG_ASYNC,
2298 },
2299 .ivsize = DES3_EDE_BLOCK_SIZE,
2300 .maxauthsize = SHA224_DIGEST_SIZE,
2301 },
2302 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2303 DESC_HDR_SEL0_DEU |
2304 DESC_HDR_MODE0_DEU_CBC |
2305 DESC_HDR_MODE0_DEU_3DES |
2306 DESC_HDR_SEL1_MDEUA |
2307 DESC_HDR_MODE1_MDEU_INIT |
2308 DESC_HDR_MODE1_MDEU_PAD |
2309 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2310 },
2311 { .type = CRYPTO_ALG_TYPE_AEAD,
2222 .alg.aead = { 2312 .alg.aead = {
2223 .base = { 2313 .base = {
2224 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2314 .cra_name = "authenc(hmac(sha256),cbc(aes))",
@@ -2239,6 +2329,27 @@ static struct talitos_alg_template driver_algs[] = {
2239 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2329 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2240 }, 2330 },
2241 { .type = CRYPTO_ALG_TYPE_AEAD, 2331 { .type = CRYPTO_ALG_TYPE_AEAD,
2332 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2333 .alg.aead = {
2334 .base = {
2335 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2336 .cra_driver_name = "authenc-hmac-sha256-"
2337 "cbc-aes-talitos",
2338 .cra_blocksize = AES_BLOCK_SIZE,
2339 .cra_flags = CRYPTO_ALG_ASYNC,
2340 },
2341 .ivsize = AES_BLOCK_SIZE,
2342 .maxauthsize = SHA256_DIGEST_SIZE,
2343 },
2344 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2345 DESC_HDR_SEL0_AESU |
2346 DESC_HDR_MODE0_AESU_CBC |
2347 DESC_HDR_SEL1_MDEUA |
2348 DESC_HDR_MODE1_MDEU_INIT |
2349 DESC_HDR_MODE1_MDEU_PAD |
2350 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2351 },
2352 { .type = CRYPTO_ALG_TYPE_AEAD,
2242 .alg.aead = { 2353 .alg.aead = {
2243 .base = { 2354 .base = {
2244 .cra_name = "authenc(hmac(sha256)," 2355 .cra_name = "authenc(hmac(sha256),"
@@ -2261,6 +2372,29 @@ static struct talitos_alg_template driver_algs[] = {
2261 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2372 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2262 }, 2373 },
2263 { .type = CRYPTO_ALG_TYPE_AEAD, 2374 { .type = CRYPTO_ALG_TYPE_AEAD,
2375 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2376 .alg.aead = {
2377 .base = {
2378 .cra_name = "authenc(hmac(sha256),"
2379 "cbc(des3_ede))",
2380 .cra_driver_name = "authenc-hmac-sha256-"
2381 "cbc-3des-talitos",
2382 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2383 .cra_flags = CRYPTO_ALG_ASYNC,
2384 },
2385 .ivsize = DES3_EDE_BLOCK_SIZE,
2386 .maxauthsize = SHA256_DIGEST_SIZE,
2387 },
2388 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2389 DESC_HDR_SEL0_DEU |
2390 DESC_HDR_MODE0_DEU_CBC |
2391 DESC_HDR_MODE0_DEU_3DES |
2392 DESC_HDR_SEL1_MDEUA |
2393 DESC_HDR_MODE1_MDEU_INIT |
2394 DESC_HDR_MODE1_MDEU_PAD |
2395 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2396 },
2397 { .type = CRYPTO_ALG_TYPE_AEAD,
2264 .alg.aead = { 2398 .alg.aead = {
2265 .base = { 2399 .base = {
2266 .cra_name = "authenc(hmac(sha384),cbc(aes))", 2400 .cra_name = "authenc(hmac(sha384),cbc(aes))",
@@ -2365,6 +2499,27 @@ static struct talitos_alg_template driver_algs[] = {
2365 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2499 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2366 }, 2500 },
2367 { .type = CRYPTO_ALG_TYPE_AEAD, 2501 { .type = CRYPTO_ALG_TYPE_AEAD,
2502 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2503 .alg.aead = {
2504 .base = {
2505 .cra_name = "authenc(hmac(md5),cbc(aes))",
2506 .cra_driver_name = "authenc-hmac-md5-"
2507 "cbc-aes-talitos",
2508 .cra_blocksize = AES_BLOCK_SIZE,
2509 .cra_flags = CRYPTO_ALG_ASYNC,
2510 },
2511 .ivsize = AES_BLOCK_SIZE,
2512 .maxauthsize = MD5_DIGEST_SIZE,
2513 },
2514 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2515 DESC_HDR_SEL0_AESU |
2516 DESC_HDR_MODE0_AESU_CBC |
2517 DESC_HDR_SEL1_MDEUA |
2518 DESC_HDR_MODE1_MDEU_INIT |
2519 DESC_HDR_MODE1_MDEU_PAD |
2520 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2521 },
2522 { .type = CRYPTO_ALG_TYPE_AEAD,
2368 .alg.aead = { 2523 .alg.aead = {
2369 .base = { 2524 .base = {
2370 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2525 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
@@ -2385,6 +2540,28 @@ static struct talitos_alg_template driver_algs[] = {
2385 DESC_HDR_MODE1_MDEU_PAD | 2540 DESC_HDR_MODE1_MDEU_PAD |
2386 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2541 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2387 }, 2542 },
2543 { .type = CRYPTO_ALG_TYPE_AEAD,
2544 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2545 .alg.aead = {
2546 .base = {
2547 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2548 .cra_driver_name = "authenc-hmac-md5-"
2549 "cbc-3des-talitos",
2550 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2551 .cra_flags = CRYPTO_ALG_ASYNC,
2552 },
2553 .ivsize = DES3_EDE_BLOCK_SIZE,
2554 .maxauthsize = MD5_DIGEST_SIZE,
2555 },
2556 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2557 DESC_HDR_SEL0_DEU |
2558 DESC_HDR_MODE0_DEU_CBC |
2559 DESC_HDR_MODE0_DEU_3DES |
2560 DESC_HDR_SEL1_MDEUA |
2561 DESC_HDR_MODE1_MDEU_INIT |
2562 DESC_HDR_MODE1_MDEU_PAD |
2563 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2564 },
2388 /* ABLKCIPHER algorithms. */ 2565 /* ABLKCIPHER algorithms. */
2389 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2566 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2390 .alg.crypto = { 2567 .alg.crypto = {
@@ -2901,7 +3078,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2901 } 3078 }
2902 3079
2903 alg->cra_module = THIS_MODULE; 3080 alg->cra_module = THIS_MODULE;
2904 alg->cra_priority = TALITOS_CRA_PRIORITY; 3081 if (t_alg->algt.priority)
3082 alg->cra_priority = t_alg->algt.priority;
3083 else
3084 alg->cra_priority = TALITOS_CRA_PRIORITY;
2905 alg->cra_alignmask = 0; 3085 alg->cra_alignmask = 0;
2906 alg->cra_ctxsize = sizeof(struct talitos_ctx); 3086 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2907 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; 3087 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
index e5d362a6f680..b497ae3dde07 100644
--- a/drivers/crypto/ux500/cryp/Makefile
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -4,9 +4,9 @@
4# * License terms: GNU General Public License (GPL) version 2 */ 4# * License terms: GNU General Public License (GPL) version 2 */
5 5
6ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG 6ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
7CFLAGS_cryp_core.o := -DDEBUG -O0 7CFLAGS_cryp_core.o := -DDEBUG
8CFLAGS_cryp.o := -DDEBUG -O0 8CFLAGS_cryp.o := -DDEBUG
9CFLAGS_cryp_irq.o := -DDEBUG -O0 9CFLAGS_cryp_irq.o := -DDEBUG
10endif 10endif
11 11
12obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o 12obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
index b2f90d9bac72..784d9c0a8853 100644
--- a/drivers/crypto/ux500/hash/Makefile
+++ b/drivers/crypto/ux500/hash/Makefile
@@ -4,7 +4,7 @@
4# License terms: GNU General Public License (GPL) version 2 4# License terms: GNU General Public License (GPL) version 2
5# 5#
6ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG 6ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
7CFLAGS_hash_core.o := -DDEBUG -O0 7CFLAGS_hash_core.o := -DDEBUG
8endif 8endif
9 9
10obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o 10obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
diff --git a/drivers/crypto/vmx/.gitignore b/drivers/crypto/vmx/.gitignore
new file mode 100644
index 000000000000..af4a7ce4738d
--- /dev/null
+++ b/drivers/crypto/vmx/.gitignore
@@ -0,0 +1,2 @@
1aesp8-ppc.S
2ghashp8-ppc.S
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index d28ab96a2475..de6e241b0866 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o 1obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
2vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o ghash.o 2vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
3 3
4ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) 4ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
5TARGET := linux-ppc64le 5TARGET := linux-ppc64le
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
new file mode 100644
index 000000000000..cfb25413917c
--- /dev/null
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -0,0 +1,190 @@
1/**
2 * AES XTS routines supporting VMX In-core instructions on Power 8
3 *
4 * Copyright (C) 2015 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundations; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY of FITNESS FOR A PARTICUPAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
20 */
21
22#include <linux/types.h>
23#include <linux/err.h>
24#include <linux/crypto.h>
25#include <linux/delay.h>
26#include <linux/hardirq.h>
27#include <asm/switch_to.h>
28#include <crypto/aes.h>
29#include <crypto/scatterwalk.h>
30#include <crypto/xts.h>
31
32#include "aesp8-ppc.h"
33
34struct p8_aes_xts_ctx {
35 struct crypto_blkcipher *fallback;
36 struct aes_key enc_key;
37 struct aes_key dec_key;
38 struct aes_key tweak_key;
39};
40
41static int p8_aes_xts_init(struct crypto_tfm *tfm)
42{
43 const char *alg;
44 struct crypto_blkcipher *fallback;
45 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
46
47 if (!(alg = crypto_tfm_alg_name(tfm))) {
48 printk(KERN_ERR "Failed to get algorithm name.\n");
49 return -ENOENT;
50 }
51
52 fallback =
53 crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
54 if (IS_ERR(fallback)) {
55 printk(KERN_ERR
56 "Failed to allocate transformation for '%s': %ld\n",
57 alg, PTR_ERR(fallback));
58 return PTR_ERR(fallback);
59 }
60 printk(KERN_INFO "Using '%s' as fallback implementation.\n",
61 crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
62
63 crypto_blkcipher_set_flags(
64 fallback,
65 crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
66 ctx->fallback = fallback;
67
68 return 0;
69}
70
71static void p8_aes_xts_exit(struct crypto_tfm *tfm)
72{
73 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
74
75 if (ctx->fallback) {
76 crypto_free_blkcipher(ctx->fallback);
77 ctx->fallback = NULL;
78 }
79}
80
81static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
82 unsigned int keylen)
83{
84 int ret;
85 struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
86
87 ret = xts_check_key(tfm, key, keylen);
88 if (ret)
89 return ret;
90
91 preempt_disable();
92 pagefault_disable();
93 enable_kernel_vsx();
94 ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
95 ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
96 ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
97 disable_kernel_vsx();
98 pagefault_enable();
99 preempt_enable();
100
101 ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
102 return ret;
103}
104
105static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
106 struct scatterlist *dst,
107 struct scatterlist *src,
108 unsigned int nbytes, int enc)
109{
110 int ret;
111 u8 tweak[AES_BLOCK_SIZE];
112 u8 *iv;
113 struct blkcipher_walk walk;
114 struct p8_aes_xts_ctx *ctx =
115 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
116 struct blkcipher_desc fallback_desc = {
117 .tfm = ctx->fallback,
118 .info = desc->info,
119 .flags = desc->flags
120 };
121
122 if (in_interrupt()) {
123 ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
124 crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
125 } else {
126 preempt_disable();
127 pagefault_disable();
128 enable_kernel_vsx();
129
130 blkcipher_walk_init(&walk, dst, src, nbytes);
131
132 iv = (u8 *)walk.iv;
133 ret = blkcipher_walk_virt(desc, &walk);
134 memset(tweak, 0, AES_BLOCK_SIZE);
135 aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
136
137 while ((nbytes = walk.nbytes)) {
138 if (enc)
139 aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
140 nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
141 else
142 aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
143 nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
144
145 nbytes &= AES_BLOCK_SIZE - 1;
146 ret = blkcipher_walk_done(desc, &walk, nbytes);
147 }
148
149 disable_kernel_vsx();
150 pagefault_enable();
151 preempt_enable();
152 }
153 return ret;
154}
155
156static int p8_aes_xts_encrypt(struct blkcipher_desc *desc,
157 struct scatterlist *dst,
158 struct scatterlist *src, unsigned int nbytes)
159{
160 return p8_aes_xts_crypt(desc, dst, src, nbytes, 1);
161}
162
163static int p8_aes_xts_decrypt(struct blkcipher_desc *desc,
164 struct scatterlist *dst,
165 struct scatterlist *src, unsigned int nbytes)
166{
167 return p8_aes_xts_crypt(desc, dst, src, nbytes, 0);
168}
169
170struct crypto_alg p8_aes_xts_alg = {
171 .cra_name = "xts(aes)",
172 .cra_driver_name = "p8_aes_xts",
173 .cra_module = THIS_MODULE,
174 .cra_priority = 2000,
175 .cra_type = &crypto_blkcipher_type,
176 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
177 .cra_alignmask = 0,
178 .cra_blocksize = AES_BLOCK_SIZE,
179 .cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
180 .cra_init = p8_aes_xts_init,
181 .cra_exit = p8_aes_xts_exit,
182 .cra_blkcipher = {
183 .ivsize = AES_BLOCK_SIZE,
184 .min_keysize = 2 * AES_MIN_KEY_SIZE,
185 .max_keysize = 2 * AES_MAX_KEY_SIZE,
186 .setkey = p8_aes_xts_setkey,
187 .encrypt = p8_aes_xts_encrypt,
188 .decrypt = p8_aes_xts_decrypt,
189 }
190};
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
index 4cd34ee54a94..01972e16a6c0 100644
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -19,3 +19,7 @@ void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
19void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, 19void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out,
20 size_t len, const struct aes_key *key, 20 size_t len, const struct aes_key *key,
21 const u8 *iv); 21 const u8 *iv);
22void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
23 const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
24void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
25 const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 228053921b3f..0b4a293b8a1e 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1,4 +1,11 @@
1#!/usr/bin/env perl 1#! /usr/bin/env perl
2# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
2# 9#
3# ==================================================================== 10# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -20,6 +27,19 @@
20# instructions are interleaved. It's reckoned that eventual 27# instructions are interleaved. It's reckoned that eventual
21# misalignment penalties at page boundaries are in average lower 28# misalignment penalties at page boundaries are in average lower
22# than additional overhead in pure AltiVec approach. 29# than additional overhead in pure AltiVec approach.
30#
31# May 2016
32#
33# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
34# systems were measured.
35#
36######################################################################
37# Current large-block performance in cycles per byte processed with
38# 128-bit key (less is better).
39#
40# CBC en-/decrypt CTR XTS
41# POWER8[le] 3.96/0.72 0.74 1.1
42# POWER8[be] 3.75/0.65 0.66 1.0
23 43
24$flavour = shift; 44$flavour = shift;
25 45
@@ -1875,6 +1895,1845 @@ Lctr32_enc8x_done:
1875___ 1895___
1876}} }}} 1896}} }}}
1877 1897
1898#########################################################################
1899{{{ # XTS procedures #
1900# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len, #
1901# const AES_KEY *key1, const AES_KEY *key2, #
1902# [const] unsigned char iv[16]); #
1903# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which #
1904# input tweak value is assumed to be encrypted already, and last tweak #
1905# value, one suitable for consecutive call on same chunk of data, is #
1906# written back to original buffer. In addition, in "tweak chaining" #
1907# mode only complete input blocks are processed. #
1908
1909my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) = map("r$_",(3..10));
1910my ($rndkey0,$rndkey1,$inout) = map("v$_",(0..2));
1911my ($output,$inptail,$inpperm,$leperm,$keyperm) = map("v$_",(3..7));
1912my ($tweak,$seven,$eighty7,$tmp,$tweak1) = map("v$_",(8..12));
1913my $taillen = $key2;
1914
1915 ($inp,$idx) = ($idx,$inp); # reassign
1916
1917$code.=<<___;
1918.globl .${prefix}_xts_encrypt
1919 mr $inp,r3 # reassign
1920 li r3,-1
1921 ${UCMP}i $len,16
1922 bltlr-
1923
1924 lis r0,0xfff0
1925 mfspr r12,256 # save vrsave
1926 li r11,0
1927 mtspr 256,r0
1928
1929 vspltisb $seven,0x07 # 0x070707..07
1930 le?lvsl $leperm,r11,r11
1931 le?vspltisb $tmp,0x0f
1932 le?vxor $leperm,$leperm,$seven
1933
1934 li $idx,15
1935 lvx $tweak,0,$ivp # load [unaligned] iv
1936 lvsl $inpperm,0,$ivp
1937 lvx $inptail,$idx,$ivp
1938 le?vxor $inpperm,$inpperm,$tmp
1939 vperm $tweak,$tweak,$inptail,$inpperm
1940
1941 neg r11,$inp
1942 lvsr $inpperm,0,r11 # prepare for unaligned load
1943 lvx $inout,0,$inp
1944 addi $inp,$inp,15 # 15 is not typo
1945 le?vxor $inpperm,$inpperm,$tmp
1946
1947 ${UCMP}i $key2,0 # key2==NULL?
1948 beq Lxts_enc_no_key2
1949
1950 ?lvsl $keyperm,0,$key2 # prepare for unaligned key
1951 lwz $rounds,240($key2)
1952 srwi $rounds,$rounds,1
1953 subi $rounds,$rounds,1
1954 li $idx,16
1955
1956 lvx $rndkey0,0,$key2
1957 lvx $rndkey1,$idx,$key2
1958 addi $idx,$idx,16
1959 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
1960 vxor $tweak,$tweak,$rndkey0
1961 lvx $rndkey0,$idx,$key2
1962 addi $idx,$idx,16
1963 mtctr $rounds
1964
1965Ltweak_xts_enc:
1966 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
1967 vcipher $tweak,$tweak,$rndkey1
1968 lvx $rndkey1,$idx,$key2
1969 addi $idx,$idx,16
1970 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
1971 vcipher $tweak,$tweak,$rndkey0
1972 lvx $rndkey0,$idx,$key2
1973 addi $idx,$idx,16
1974 bdnz Ltweak_xts_enc
1975
1976 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
1977 vcipher $tweak,$tweak,$rndkey1
1978 lvx $rndkey1,$idx,$key2
1979 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
1980 vcipherlast $tweak,$tweak,$rndkey0
1981
1982 li $ivp,0 # don't chain the tweak
1983 b Lxts_enc
1984
1985Lxts_enc_no_key2:
1986 li $idx,-16
1987 and $len,$len,$idx # in "tweak chaining"
1988 # mode only complete
1989 # blocks are processed
1990Lxts_enc:
1991 lvx $inptail,0,$inp
1992 addi $inp,$inp,16
1993
1994 ?lvsl $keyperm,0,$key1 # prepare for unaligned key
1995 lwz $rounds,240($key1)
1996 srwi $rounds,$rounds,1
1997 subi $rounds,$rounds,1
1998 li $idx,16
1999
2000 vslb $eighty7,$seven,$seven # 0x808080..80
2001 vor $eighty7,$eighty7,$seven # 0x878787..87
2002 vspltisb $tmp,1 # 0x010101..01
2003 vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
2004
2005 ${UCMP}i $len,96
2006 bge _aesp8_xts_encrypt6x
2007
2008 andi. $taillen,$len,15
2009 subic r0,$len,32
2010 subi $taillen,$taillen,16
2011 subfe r0,r0,r0
2012 and r0,r0,$taillen
2013 add $inp,$inp,r0
2014
2015 lvx $rndkey0,0,$key1
2016 lvx $rndkey1,$idx,$key1
2017 addi $idx,$idx,16
2018 vperm $inout,$inout,$inptail,$inpperm
2019 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2020 vxor $inout,$inout,$tweak
2021 vxor $inout,$inout,$rndkey0
2022 lvx $rndkey0,$idx,$key1
2023 addi $idx,$idx,16
2024 mtctr $rounds
2025 b Loop_xts_enc
2026
2027.align 5
2028Loop_xts_enc:
2029 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
2030 vcipher $inout,$inout,$rndkey1
2031 lvx $rndkey1,$idx,$key1
2032 addi $idx,$idx,16
2033 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2034 vcipher $inout,$inout,$rndkey0
2035 lvx $rndkey0,$idx,$key1
2036 addi $idx,$idx,16
2037 bdnz Loop_xts_enc
2038
2039 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
2040 vcipher $inout,$inout,$rndkey1
2041 lvx $rndkey1,$idx,$key1
2042 li $idx,16
2043 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2044 vxor $rndkey0,$rndkey0,$tweak
2045 vcipherlast $output,$inout,$rndkey0
2046
2047 le?vperm $tmp,$output,$output,$leperm
2048 be?nop
2049 le?stvx_u $tmp,0,$out
2050 be?stvx_u $output,0,$out
2051 addi $out,$out,16
2052
2053 subic. $len,$len,16
2054 beq Lxts_enc_done
2055
2056 vmr $inout,$inptail
2057 lvx $inptail,0,$inp
2058 addi $inp,$inp,16
2059 lvx $rndkey0,0,$key1
2060 lvx $rndkey1,$idx,$key1
2061 addi $idx,$idx,16
2062
2063 subic r0,$len,32
2064 subfe r0,r0,r0
2065 and r0,r0,$taillen
2066 add $inp,$inp,r0
2067
2068 vsrab $tmp,$tweak,$seven # next tweak value
2069 vaddubm $tweak,$tweak,$tweak
2070 vsldoi $tmp,$tmp,$tmp,15
2071 vand $tmp,$tmp,$eighty7
2072 vxor $tweak,$tweak,$tmp
2073
2074 vperm $inout,$inout,$inptail,$inpperm
2075 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2076 vxor $inout,$inout,$tweak
2077 vxor $output,$output,$rndkey0 # just in case $len<16
2078 vxor $inout,$inout,$rndkey0
2079 lvx $rndkey0,$idx,$key1
2080 addi $idx,$idx,16
2081
2082 mtctr $rounds
2083 ${UCMP}i $len,16
2084 bge Loop_xts_enc
2085
2086 vxor $output,$output,$tweak
2087 lvsr $inpperm,0,$len # $inpperm is no longer needed
2088 vxor $inptail,$inptail,$inptail # $inptail is no longer needed
2089 vspltisb $tmp,-1
2090 vperm $inptail,$inptail,$tmp,$inpperm
2091 vsel $inout,$inout,$output,$inptail
2092
2093 subi r11,$out,17
2094 subi $out,$out,16
2095 mtctr $len
2096 li $len,16
2097Loop_xts_enc_steal:
2098 lbzu r0,1(r11)
2099 stb r0,16(r11)
2100 bdnz Loop_xts_enc_steal
2101
2102 mtctr $rounds
2103 b Loop_xts_enc # one more time...
2104
2105Lxts_enc_done:
2106 ${UCMP}i $ivp,0
2107 beq Lxts_enc_ret
2108
2109 vsrab $tmp,$tweak,$seven # next tweak value
2110 vaddubm $tweak,$tweak,$tweak
2111 vsldoi $tmp,$tmp,$tmp,15
2112 vand $tmp,$tmp,$eighty7
2113 vxor $tweak,$tweak,$tmp
2114
2115 le?vperm $tweak,$tweak,$tweak,$leperm
2116 stvx_u $tweak,0,$ivp
2117
2118Lxts_enc_ret:
2119 mtspr 256,r12 # restore vrsave
2120 li r3,0
2121 blr
2122 .long 0
2123 .byte 0,12,0x04,0,0x80,6,6,0
2124 .long 0
2125.size .${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt
2126
2127.globl .${prefix}_xts_decrypt
2128 mr $inp,r3 # reassign
2129 li r3,-1
2130 ${UCMP}i $len,16
2131 bltlr-
2132
2133 lis r0,0xfff8
2134 mfspr r12,256 # save vrsave
2135 li r11,0
2136 mtspr 256,r0
2137
2138 andi. r0,$len,15
2139 neg r0,r0
2140 andi. r0,r0,16
2141 sub $len,$len,r0
2142
2143 vspltisb $seven,0x07 # 0x070707..07
2144 le?lvsl $leperm,r11,r11
2145 le?vspltisb $tmp,0x0f
2146 le?vxor $leperm,$leperm,$seven
2147
2148 li $idx,15
2149 lvx $tweak,0,$ivp # load [unaligned] iv
2150 lvsl $inpperm,0,$ivp
2151 lvx $inptail,$idx,$ivp
2152 le?vxor $inpperm,$inpperm,$tmp
2153 vperm $tweak,$tweak,$inptail,$inpperm
2154
2155 neg r11,$inp
2156 lvsr $inpperm,0,r11 # prepare for unaligned load
2157 lvx $inout,0,$inp
2158 addi $inp,$inp,15 # 15 is not typo
2159 le?vxor $inpperm,$inpperm,$tmp
2160
2161 ${UCMP}i $key2,0 # key2==NULL?
2162 beq Lxts_dec_no_key2
2163
2164 ?lvsl $keyperm,0,$key2 # prepare for unaligned key
2165 lwz $rounds,240($key2)
2166 srwi $rounds,$rounds,1
2167 subi $rounds,$rounds,1
2168 li $idx,16
2169
2170 lvx $rndkey0,0,$key2
2171 lvx $rndkey1,$idx,$key2
2172 addi $idx,$idx,16
2173 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2174 vxor $tweak,$tweak,$rndkey0
2175 lvx $rndkey0,$idx,$key2
2176 addi $idx,$idx,16
2177 mtctr $rounds
2178
2179Ltweak_xts_dec:
2180 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
2181 vcipher $tweak,$tweak,$rndkey1
2182 lvx $rndkey1,$idx,$key2
2183 addi $idx,$idx,16
2184 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2185 vcipher $tweak,$tweak,$rndkey0
2186 lvx $rndkey0,$idx,$key2
2187 addi $idx,$idx,16
2188 bdnz Ltweak_xts_dec
2189
2190 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
2191 vcipher $tweak,$tweak,$rndkey1
2192 lvx $rndkey1,$idx,$key2
2193 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2194 vcipherlast $tweak,$tweak,$rndkey0
2195
2196 li $ivp,0 # don't chain the tweak
2197 b Lxts_dec
2198
2199Lxts_dec_no_key2:
2200 neg $idx,$len
2201 andi. $idx,$idx,15
2202 add $len,$len,$idx # in "tweak chaining"
2203 # mode only complete
2204 # blocks are processed
2205Lxts_dec:
2206 lvx $inptail,0,$inp
2207 addi $inp,$inp,16
2208
2209 ?lvsl $keyperm,0,$key1 # prepare for unaligned key
2210 lwz $rounds,240($key1)
2211 srwi $rounds,$rounds,1
2212 subi $rounds,$rounds,1
2213 li $idx,16
2214
2215 vslb $eighty7,$seven,$seven # 0x808080..80
2216 vor $eighty7,$eighty7,$seven # 0x878787..87
2217 vspltisb $tmp,1 # 0x010101..01
2218 vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
2219
2220 ${UCMP}i $len,96
2221 bge _aesp8_xts_decrypt6x
2222
2223 lvx $rndkey0,0,$key1
2224 lvx $rndkey1,$idx,$key1
2225 addi $idx,$idx,16
2226 vperm $inout,$inout,$inptail,$inpperm
2227 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2228 vxor $inout,$inout,$tweak
2229 vxor $inout,$inout,$rndkey0
2230 lvx $rndkey0,$idx,$key1
2231 addi $idx,$idx,16
2232 mtctr $rounds
2233
2234 ${UCMP}i $len,16
2235 blt Ltail_xts_dec
2236 be?b Loop_xts_dec
2237
2238.align 5
2239Loop_xts_dec:
2240 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
2241 vncipher $inout,$inout,$rndkey1
2242 lvx $rndkey1,$idx,$key1
2243 addi $idx,$idx,16
2244 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2245 vncipher $inout,$inout,$rndkey0
2246 lvx $rndkey0,$idx,$key1
2247 addi $idx,$idx,16
2248 bdnz Loop_xts_dec
2249
2250 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
2251 vncipher $inout,$inout,$rndkey1
2252 lvx $rndkey1,$idx,$key1
2253 li $idx,16
2254 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2255 vxor $rndkey0,$rndkey0,$tweak
2256 vncipherlast $output,$inout,$rndkey0
2257
2258 le?vperm $tmp,$output,$output,$leperm
2259 be?nop
2260 le?stvx_u $tmp,0,$out
2261 be?stvx_u $output,0,$out
2262 addi $out,$out,16
2263
2264 subic. $len,$len,16
2265 beq Lxts_dec_done
2266
2267 vmr $inout,$inptail
2268 lvx $inptail,0,$inp
2269 addi $inp,$inp,16
2270 lvx $rndkey0,0,$key1
2271 lvx $rndkey1,$idx,$key1
2272 addi $idx,$idx,16
2273
2274 vsrab $tmp,$tweak,$seven # next tweak value
2275 vaddubm $tweak,$tweak,$tweak
2276 vsldoi $tmp,$tmp,$tmp,15
2277 vand $tmp,$tmp,$eighty7
2278 vxor $tweak,$tweak,$tmp
2279
2280 vperm $inout,$inout,$inptail,$inpperm
2281 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2282 vxor $inout,$inout,$tweak
2283 vxor $inout,$inout,$rndkey0
2284 lvx $rndkey0,$idx,$key1
2285 addi $idx,$idx,16
2286
2287 mtctr $rounds
2288 ${UCMP}i $len,16
2289 bge Loop_xts_dec
2290
2291Ltail_xts_dec:
2292 vsrab $tmp,$tweak,$seven # next tweak value
2293 vaddubm $tweak1,$tweak,$tweak
2294 vsldoi $tmp,$tmp,$tmp,15
2295 vand $tmp,$tmp,$eighty7
2296 vxor $tweak1,$tweak1,$tmp
2297
2298 subi $inp,$inp,16
2299 add $inp,$inp,$len
2300
2301 vxor $inout,$inout,$tweak # :-(
2302 vxor $inout,$inout,$tweak1 # :-)
2303
2304Loop_xts_dec_short:
2305 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
2306 vncipher $inout,$inout,$rndkey1
2307 lvx $rndkey1,$idx,$key1
2308 addi $idx,$idx,16
2309 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2310 vncipher $inout,$inout,$rndkey0
2311 lvx $rndkey0,$idx,$key1
2312 addi $idx,$idx,16
2313 bdnz Loop_xts_dec_short
2314
2315 ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
2316 vncipher $inout,$inout,$rndkey1
2317 lvx $rndkey1,$idx,$key1
2318 li $idx,16
2319 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2320 vxor $rndkey0,$rndkey0,$tweak1
2321 vncipherlast $output,$inout,$rndkey0
2322
2323 le?vperm $tmp,$output,$output,$leperm
2324 be?nop
2325 le?stvx_u $tmp,0,$out
2326 be?stvx_u $output,0,$out
2327
2328 vmr $inout,$inptail
2329 lvx $inptail,0,$inp
2330 #addi $inp,$inp,16
2331 lvx $rndkey0,0,$key1
2332 lvx $rndkey1,$idx,$key1
2333 addi $idx,$idx,16
2334 vperm $inout,$inout,$inptail,$inpperm
2335 ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
2336
2337 lvsr $inpperm,0,$len # $inpperm is no longer needed
2338 vxor $inptail,$inptail,$inptail # $inptail is no longer needed
2339 vspltisb $tmp,-1
2340 vperm $inptail,$inptail,$tmp,$inpperm
2341 vsel $inout,$inout,$output,$inptail
2342
2343 vxor $rndkey0,$rndkey0,$tweak
2344 vxor $inout,$inout,$rndkey0
2345 lvx $rndkey0,$idx,$key1
2346 addi $idx,$idx,16
2347
2348 subi r11,$out,1
2349 mtctr $len
2350 li $len,16
2351Loop_xts_dec_steal:
2352 lbzu r0,1(r11)
2353 stb r0,16(r11)
2354 bdnz Loop_xts_dec_steal
2355
2356 mtctr $rounds
2357 b Loop_xts_dec # one more time...
2358
2359Lxts_dec_done:
2360 ${UCMP}i $ivp,0
2361 beq Lxts_dec_ret
2362
2363 vsrab $tmp,$tweak,$seven # next tweak value
2364 vaddubm $tweak,$tweak,$tweak
2365 vsldoi $tmp,$tmp,$tmp,15
2366 vand $tmp,$tmp,$eighty7
2367 vxor $tweak,$tweak,$tmp
2368
2369 le?vperm $tweak,$tweak,$tweak,$leperm
2370 stvx_u $tweak,0,$ivp
2371
2372Lxts_dec_ret:
2373 mtspr 256,r12 # restore vrsave
2374 li r3,0
2375 blr
2376 .long 0
2377 .byte 0,12,0x04,0,0x80,6,6,0
2378 .long 0
2379.size .${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt
2380___
2381#########################################################################
2382{{ # Optimized XTS procedures #
2383my $key_=$key2;
2384my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31));
2385 $x00=0 if ($flavour =~ /osx/);
2386my ($in0, $in1, $in2, $in3, $in4, $in5 )=map("v$_",(0..5));
2387my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16));
2388my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22));
2389my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
2390 # v26-v31 last 6 round keys
2391my ($keyperm)=($out0); # aliases with "caller", redundant assignment
2392my $taillen=$x70;
2393
2394$code.=<<___;
2395.align 5
2396_aesp8_xts_encrypt6x:
2397 $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
2398 mflr r11
2399 li r7,`$FRAME+8*16+15`
2400 li r3,`$FRAME+8*16+31`
2401 $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
2402 stvx v20,r7,$sp # ABI says so
2403 addi r7,r7,32
2404 stvx v21,r3,$sp
2405 addi r3,r3,32
2406 stvx v22,r7,$sp
2407 addi r7,r7,32
2408 stvx v23,r3,$sp
2409 addi r3,r3,32
2410 stvx v24,r7,$sp
2411 addi r7,r7,32
2412 stvx v25,r3,$sp
2413 addi r3,r3,32
2414 stvx v26,r7,$sp
2415 addi r7,r7,32
2416 stvx v27,r3,$sp
2417 addi r3,r3,32
2418 stvx v28,r7,$sp
2419 addi r7,r7,32
2420 stvx v29,r3,$sp
2421 addi r3,r3,32
2422 stvx v30,r7,$sp
2423 stvx v31,r3,$sp
2424 li r0,-1
2425 stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
2426 li $x10,0x10
2427 $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
2428 li $x20,0x20
2429 $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
2430 li $x30,0x30
2431 $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
2432 li $x40,0x40
2433 $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
2434 li $x50,0x50
2435 $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
2436 li $x60,0x60
2437 $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
2438 li $x70,0x70
2439 mtspr 256,r0
2440
2441 subi $rounds,$rounds,3 # -4 in total
2442
2443 lvx $rndkey0,$x00,$key1 # load key schedule
2444 lvx v30,$x10,$key1
2445 addi $key1,$key1,0x20
2446 lvx v31,$x00,$key1
2447 ?vperm $rndkey0,$rndkey0,v30,$keyperm
2448 addi $key_,$sp,$FRAME+15
2449 mtctr $rounds
2450
2451Load_xts_enc_key:
2452 ?vperm v24,v30,v31,$keyperm
2453 lvx v30,$x10,$key1
2454 addi $key1,$key1,0x20
2455 stvx v24,$x00,$key_ # off-load round[1]
2456 ?vperm v25,v31,v30,$keyperm
2457 lvx v31,$x00,$key1
2458 stvx v25,$x10,$key_ # off-load round[2]
2459 addi $key_,$key_,0x20
2460 bdnz Load_xts_enc_key
2461
2462 lvx v26,$x10,$key1
2463 ?vperm v24,v30,v31,$keyperm
2464 lvx v27,$x20,$key1
2465 stvx v24,$x00,$key_ # off-load round[3]
2466 ?vperm v25,v31,v26,$keyperm
2467 lvx v28,$x30,$key1
2468 stvx v25,$x10,$key_ # off-load round[4]
2469 addi $key_,$sp,$FRAME+15 # rewind $key_
2470 ?vperm v26,v26,v27,$keyperm
2471 lvx v29,$x40,$key1
2472 ?vperm v27,v27,v28,$keyperm
2473 lvx v30,$x50,$key1
2474 ?vperm v28,v28,v29,$keyperm
2475 lvx v31,$x60,$key1
2476 ?vperm v29,v29,v30,$keyperm
2477 lvx $twk5,$x70,$key1 # borrow $twk5
2478 ?vperm v30,v30,v31,$keyperm
2479 lvx v24,$x00,$key_ # pre-load round[1]
2480 ?vperm v31,v31,$twk5,$keyperm
2481 lvx v25,$x10,$key_ # pre-load round[2]
2482
2483 vperm $in0,$inout,$inptail,$inpperm
2484 subi $inp,$inp,31 # undo "caller"
2485 vxor $twk0,$tweak,$rndkey0
2486 vsrab $tmp,$tweak,$seven # next tweak value
2487 vaddubm $tweak,$tweak,$tweak
2488 vsldoi $tmp,$tmp,$tmp,15
2489 vand $tmp,$tmp,$eighty7
2490 vxor $out0,$in0,$twk0
2491 vxor $tweak,$tweak,$tmp
2492
2493 lvx_u $in1,$x10,$inp
2494 vxor $twk1,$tweak,$rndkey0
2495 vsrab $tmp,$tweak,$seven # next tweak value
2496 vaddubm $tweak,$tweak,$tweak
2497 vsldoi $tmp,$tmp,$tmp,15
2498 le?vperm $in1,$in1,$in1,$leperm
2499 vand $tmp,$tmp,$eighty7
2500 vxor $out1,$in1,$twk1
2501 vxor $tweak,$tweak,$tmp
2502
2503 lvx_u $in2,$x20,$inp
2504 andi. $taillen,$len,15
2505 vxor $twk2,$tweak,$rndkey0
2506 vsrab $tmp,$tweak,$seven # next tweak value
2507 vaddubm $tweak,$tweak,$tweak
2508 vsldoi $tmp,$tmp,$tmp,15
2509 le?vperm $in2,$in2,$in2,$leperm
2510 vand $tmp,$tmp,$eighty7
2511 vxor $out2,$in2,$twk2
2512 vxor $tweak,$tweak,$tmp
2513
2514 lvx_u $in3,$x30,$inp
2515 sub $len,$len,$taillen
2516 vxor $twk3,$tweak,$rndkey0
2517 vsrab $tmp,$tweak,$seven # next tweak value
2518 vaddubm $tweak,$tweak,$tweak
2519 vsldoi $tmp,$tmp,$tmp,15
2520 le?vperm $in3,$in3,$in3,$leperm
2521 vand $tmp,$tmp,$eighty7
2522 vxor $out3,$in3,$twk3
2523 vxor $tweak,$tweak,$tmp
2524
2525 lvx_u $in4,$x40,$inp
2526 subi $len,$len,0x60
2527 vxor $twk4,$tweak,$rndkey0
2528 vsrab $tmp,$tweak,$seven # next tweak value
2529 vaddubm $tweak,$tweak,$tweak
2530 vsldoi $tmp,$tmp,$tmp,15
2531 le?vperm $in4,$in4,$in4,$leperm
2532 vand $tmp,$tmp,$eighty7
2533 vxor $out4,$in4,$twk4
2534 vxor $tweak,$tweak,$tmp
2535
2536 lvx_u $in5,$x50,$inp
2537 addi $inp,$inp,0x60
2538 vxor $twk5,$tweak,$rndkey0
2539 vsrab $tmp,$tweak,$seven # next tweak value
2540 vaddubm $tweak,$tweak,$tweak
2541 vsldoi $tmp,$tmp,$tmp,15
2542 le?vperm $in5,$in5,$in5,$leperm
2543 vand $tmp,$tmp,$eighty7
2544 vxor $out5,$in5,$twk5
2545 vxor $tweak,$tweak,$tmp
2546
2547 vxor v31,v31,$rndkey0
2548 mtctr $rounds
2549 b Loop_xts_enc6x
2550
2551.align 5
2552Loop_xts_enc6x:
2553 vcipher $out0,$out0,v24
2554 vcipher $out1,$out1,v24
2555 vcipher $out2,$out2,v24
2556 vcipher $out3,$out3,v24
2557 vcipher $out4,$out4,v24
2558 vcipher $out5,$out5,v24
2559 lvx v24,$x20,$key_ # round[3]
2560 addi $key_,$key_,0x20
2561
2562 vcipher $out0,$out0,v25
2563 vcipher $out1,$out1,v25
2564 vcipher $out2,$out2,v25
2565 vcipher $out3,$out3,v25
2566 vcipher $out4,$out4,v25
2567 vcipher $out5,$out5,v25
2568 lvx v25,$x10,$key_ # round[4]
2569 bdnz Loop_xts_enc6x
2570
2571 subic $len,$len,96 # $len-=96
2572 vxor $in0,$twk0,v31 # xor with last round key
2573 vcipher $out0,$out0,v24
2574 vcipher $out1,$out1,v24
2575 vsrab $tmp,$tweak,$seven # next tweak value
2576 vxor $twk0,$tweak,$rndkey0
2577 vaddubm $tweak,$tweak,$tweak
2578 vcipher $out2,$out2,v24
2579 vcipher $out3,$out3,v24
2580 vsldoi $tmp,$tmp,$tmp,15
2581 vcipher $out4,$out4,v24
2582 vcipher $out5,$out5,v24
2583
2584 subfe. r0,r0,r0 # borrow?-1:0
2585 vand $tmp,$tmp,$eighty7
2586 vcipher $out0,$out0,v25
2587 vcipher $out1,$out1,v25
2588 vxor $tweak,$tweak,$tmp
2589 vcipher $out2,$out2,v25
2590 vcipher $out3,$out3,v25
2591 vxor $in1,$twk1,v31
2592 vsrab $tmp,$tweak,$seven # next tweak value
2593 vxor $twk1,$tweak,$rndkey0
2594 vcipher $out4,$out4,v25
2595 vcipher $out5,$out5,v25
2596
2597 and r0,r0,$len
2598 vaddubm $tweak,$tweak,$tweak
2599 vsldoi $tmp,$tmp,$tmp,15
2600 vcipher $out0,$out0,v26
2601 vcipher $out1,$out1,v26
2602 vand $tmp,$tmp,$eighty7
2603 vcipher $out2,$out2,v26
2604 vcipher $out3,$out3,v26
2605 vxor $tweak,$tweak,$tmp
2606 vcipher $out4,$out4,v26
2607 vcipher $out5,$out5,v26
2608
2609 add $inp,$inp,r0 # $inp is adjusted in such
2610 # way that at exit from the
2611 # loop inX-in5 are loaded
2612 # with last "words"
2613 vxor $in2,$twk2,v31
2614 vsrab $tmp,$tweak,$seven # next tweak value
2615 vxor $twk2,$tweak,$rndkey0
2616 vaddubm $tweak,$tweak,$tweak
2617 vcipher $out0,$out0,v27
2618 vcipher $out1,$out1,v27
2619 vsldoi $tmp,$tmp,$tmp,15
2620 vcipher $out2,$out2,v27
2621 vcipher $out3,$out3,v27
2622 vand $tmp,$tmp,$eighty7
2623 vcipher $out4,$out4,v27
2624 vcipher $out5,$out5,v27
2625
2626 addi $key_,$sp,$FRAME+15 # rewind $key_
2627 vxor $tweak,$tweak,$tmp
2628 vcipher $out0,$out0,v28
2629 vcipher $out1,$out1,v28
2630 vxor $in3,$twk3,v31
2631 vsrab $tmp,$tweak,$seven # next tweak value
2632 vxor $twk3,$tweak,$rndkey0
2633 vcipher $out2,$out2,v28
2634 vcipher $out3,$out3,v28
2635 vaddubm $tweak,$tweak,$tweak
2636 vsldoi $tmp,$tmp,$tmp,15
2637 vcipher $out4,$out4,v28
2638 vcipher $out5,$out5,v28
2639 lvx v24,$x00,$key_ # re-pre-load round[1]
2640 vand $tmp,$tmp,$eighty7
2641
2642 vcipher $out0,$out0,v29
2643 vcipher $out1,$out1,v29
2644 vxor $tweak,$tweak,$tmp
2645 vcipher $out2,$out2,v29
2646 vcipher $out3,$out3,v29
2647 vxor $in4,$twk4,v31
2648 vsrab $tmp,$tweak,$seven # next tweak value
2649 vxor $twk4,$tweak,$rndkey0
2650 vcipher $out4,$out4,v29
2651 vcipher $out5,$out5,v29
2652 lvx v25,$x10,$key_ # re-pre-load round[2]
2653 vaddubm $tweak,$tweak,$tweak
2654 vsldoi $tmp,$tmp,$tmp,15
2655
2656 vcipher $out0,$out0,v30
2657 vcipher $out1,$out1,v30
2658 vand $tmp,$tmp,$eighty7
2659 vcipher $out2,$out2,v30
2660 vcipher $out3,$out3,v30
2661 vxor $tweak,$tweak,$tmp
2662 vcipher $out4,$out4,v30
2663 vcipher $out5,$out5,v30
2664 vxor $in5,$twk5,v31
2665 vsrab $tmp,$tweak,$seven # next tweak value
2666 vxor $twk5,$tweak,$rndkey0
2667
2668 vcipherlast $out0,$out0,$in0
2669 lvx_u $in0,$x00,$inp # load next input block
2670 vaddubm $tweak,$tweak,$tweak
2671 vsldoi $tmp,$tmp,$tmp,15
2672 vcipherlast $out1,$out1,$in1
2673 lvx_u $in1,$x10,$inp
2674 vcipherlast $out2,$out2,$in2
2675 le?vperm $in0,$in0,$in0,$leperm
2676 lvx_u $in2,$x20,$inp
2677 vand $tmp,$tmp,$eighty7
2678 vcipherlast $out3,$out3,$in3
2679 le?vperm $in1,$in1,$in1,$leperm
2680 lvx_u $in3,$x30,$inp
2681 vcipherlast $out4,$out4,$in4
2682 le?vperm $in2,$in2,$in2,$leperm
2683 lvx_u $in4,$x40,$inp
2684 vxor $tweak,$tweak,$tmp
2685 vcipherlast $tmp,$out5,$in5 # last block might be needed
2686 # in stealing mode
2687 le?vperm $in3,$in3,$in3,$leperm
2688 lvx_u $in5,$x50,$inp
2689 addi $inp,$inp,0x60
2690 le?vperm $in4,$in4,$in4,$leperm
2691 le?vperm $in5,$in5,$in5,$leperm
2692
2693 le?vperm $out0,$out0,$out0,$leperm
2694 le?vperm $out1,$out1,$out1,$leperm
2695 stvx_u $out0,$x00,$out # store output
2696 vxor $out0,$in0,$twk0
2697 le?vperm $out2,$out2,$out2,$leperm
2698 stvx_u $out1,$x10,$out
2699 vxor $out1,$in1,$twk1
2700 le?vperm $out3,$out3,$out3,$leperm
2701 stvx_u $out2,$x20,$out
2702 vxor $out2,$in2,$twk2
2703 le?vperm $out4,$out4,$out4,$leperm
2704 stvx_u $out3,$x30,$out
2705 vxor $out3,$in3,$twk3
2706 le?vperm $out5,$tmp,$tmp,$leperm
2707 stvx_u $out4,$x40,$out
2708 vxor $out4,$in4,$twk4
2709 le?stvx_u $out5,$x50,$out
2710 be?stvx_u $tmp, $x50,$out
2711 vxor $out5,$in5,$twk5
2712 addi $out,$out,0x60
2713
2714 mtctr $rounds
2715 beq Loop_xts_enc6x # did $len-=96 borrow?
2716
2717 addic. $len,$len,0x60
2718 beq Lxts_enc6x_zero
2719 cmpwi $len,0x20
2720 blt Lxts_enc6x_one
2721 nop
2722 beq Lxts_enc6x_two
2723 cmpwi $len,0x40
2724 blt Lxts_enc6x_three
2725 nop
2726 beq Lxts_enc6x_four
2727
2728Lxts_enc6x_five:
2729 vxor $out0,$in1,$twk0
2730 vxor $out1,$in2,$twk1
2731 vxor $out2,$in3,$twk2
2732 vxor $out3,$in4,$twk3
2733 vxor $out4,$in5,$twk4
2734
2735 bl _aesp8_xts_enc5x
2736
2737 le?vperm $out0,$out0,$out0,$leperm
2738 vmr $twk0,$twk5 # unused tweak
2739 le?vperm $out1,$out1,$out1,$leperm
2740 stvx_u $out0,$x00,$out # store output
2741 le?vperm $out2,$out2,$out2,$leperm
2742 stvx_u $out1,$x10,$out
2743 le?vperm $out3,$out3,$out3,$leperm
2744 stvx_u $out2,$x20,$out
2745 vxor $tmp,$out4,$twk5 # last block prep for stealing
2746 le?vperm $out4,$out4,$out4,$leperm
2747 stvx_u $out3,$x30,$out
2748 stvx_u $out4,$x40,$out
2749 addi $out,$out,0x50
2750 bne Lxts_enc6x_steal
2751 b Lxts_enc6x_done
2752
2753.align 4
2754Lxts_enc6x_four:
2755 vxor $out0,$in2,$twk0
2756 vxor $out1,$in3,$twk1
2757 vxor $out2,$in4,$twk2
2758 vxor $out3,$in5,$twk3
2759 vxor $out4,$out4,$out4
2760
2761 bl _aesp8_xts_enc5x
2762
2763 le?vperm $out0,$out0,$out0,$leperm
2764 vmr $twk0,$twk4 # unused tweak
2765 le?vperm $out1,$out1,$out1,$leperm
2766 stvx_u $out0,$x00,$out # store output
2767 le?vperm $out2,$out2,$out2,$leperm
2768 stvx_u $out1,$x10,$out
2769 vxor $tmp,$out3,$twk4 # last block prep for stealing
2770 le?vperm $out3,$out3,$out3,$leperm
2771 stvx_u $out2,$x20,$out
2772 stvx_u $out3,$x30,$out
2773 addi $out,$out,0x40
2774 bne Lxts_enc6x_steal
2775 b Lxts_enc6x_done
2776
2777.align 4
2778Lxts_enc6x_three:
2779 vxor $out0,$in3,$twk0
2780 vxor $out1,$in4,$twk1
2781 vxor $out2,$in5,$twk2
2782 vxor $out3,$out3,$out3
2783 vxor $out4,$out4,$out4
2784
2785 bl _aesp8_xts_enc5x
2786
2787 le?vperm $out0,$out0,$out0,$leperm
2788 vmr $twk0,$twk3 # unused tweak
2789 le?vperm $out1,$out1,$out1,$leperm
2790 stvx_u $out0,$x00,$out # store output
2791 vxor $tmp,$out2,$twk3 # last block prep for stealing
2792 le?vperm $out2,$out2,$out2,$leperm
2793 stvx_u $out1,$x10,$out
2794 stvx_u $out2,$x20,$out
2795 addi $out,$out,0x30
2796 bne Lxts_enc6x_steal
2797 b Lxts_enc6x_done
2798
2799.align 4
2800Lxts_enc6x_two:
2801 vxor $out0,$in4,$twk0
2802 vxor $out1,$in5,$twk1
2803 vxor $out2,$out2,$out2
2804 vxor $out3,$out3,$out3
2805 vxor $out4,$out4,$out4
2806
2807 bl _aesp8_xts_enc5x
2808
2809 le?vperm $out0,$out0,$out0,$leperm
2810 vmr $twk0,$twk2 # unused tweak
2811 vxor $tmp,$out1,$twk2 # last block prep for stealing
2812 le?vperm $out1,$out1,$out1,$leperm
2813 stvx_u $out0,$x00,$out # store output
2814 stvx_u $out1,$x10,$out
2815 addi $out,$out,0x20
2816 bne Lxts_enc6x_steal
2817 b Lxts_enc6x_done
2818
2819.align 4
2820Lxts_enc6x_one:
2821 vxor $out0,$in5,$twk0
2822 nop
2823Loop_xts_enc1x:
2824 vcipher $out0,$out0,v24
2825 lvx v24,$x20,$key_ # round[3]
2826 addi $key_,$key_,0x20
2827
2828 vcipher $out0,$out0,v25
2829 lvx v25,$x10,$key_ # round[4]
2830 bdnz Loop_xts_enc1x
2831
2832 add $inp,$inp,$taillen
2833 cmpwi $taillen,0
2834 vcipher $out0,$out0,v24
2835
2836 subi $inp,$inp,16
2837 vcipher $out0,$out0,v25
2838
2839 lvsr $inpperm,0,$taillen
2840 vcipher $out0,$out0,v26
2841
2842 lvx_u $in0,0,$inp
2843 vcipher $out0,$out0,v27
2844
2845 addi $key_,$sp,$FRAME+15 # rewind $key_
2846 vcipher $out0,$out0,v28
2847 lvx v24,$x00,$key_ # re-pre-load round[1]
2848
2849 vcipher $out0,$out0,v29
2850 lvx v25,$x10,$key_ # re-pre-load round[2]
2851 vxor $twk0,$twk0,v31
2852
2853 le?vperm $in0,$in0,$in0,$leperm
2854 vcipher $out0,$out0,v30
2855
2856 vperm $in0,$in0,$in0,$inpperm
2857 vcipherlast $out0,$out0,$twk0
2858
2859 vmr $twk0,$twk1 # unused tweak
2860 vxor $tmp,$out0,$twk1 # last block prep for stealing
2861 le?vperm $out0,$out0,$out0,$leperm
2862 stvx_u $out0,$x00,$out # store output
2863 addi $out,$out,0x10
2864 bne Lxts_enc6x_steal
2865 b Lxts_enc6x_done
2866
2867.align 4
2868Lxts_enc6x_zero:
2869 cmpwi $taillen,0
2870 beq Lxts_enc6x_done
2871
2872 add $inp,$inp,$taillen
2873 subi $inp,$inp,16
2874 lvx_u $in0,0,$inp
2875 lvsr $inpperm,0,$taillen # $in5 is no more
2876 le?vperm $in0,$in0,$in0,$leperm
2877 vperm $in0,$in0,$in0,$inpperm
2878 vxor $tmp,$tmp,$twk0
2879Lxts_enc6x_steal:
2880 vxor $in0,$in0,$twk0
2881 vxor $out0,$out0,$out0
2882 vspltisb $out1,-1
2883 vperm $out0,$out0,$out1,$inpperm
2884 vsel $out0,$in0,$tmp,$out0 # $tmp is last block, remember?
2885
2886 subi r30,$out,17
2887 subi $out,$out,16
2888 mtctr $taillen
2889Loop_xts_enc6x_steal:
2890 lbzu r0,1(r30)
2891 stb r0,16(r30)
2892 bdnz Loop_xts_enc6x_steal
2893
2894 li $taillen,0
2895 mtctr $rounds
2896 b Loop_xts_enc1x # one more time...
2897
2898.align 4
2899Lxts_enc6x_done:
2900 ${UCMP}i $ivp,0
2901 beq Lxts_enc6x_ret
2902
2903 vxor $tweak,$twk0,$rndkey0
2904 le?vperm $tweak,$tweak,$tweak,$leperm
2905 stvx_u $tweak,0,$ivp
2906
2907Lxts_enc6x_ret:
2908 mtlr r11
2909 li r10,`$FRAME+15`
2910 li r11,`$FRAME+31`
2911 stvx $seven,r10,$sp # wipe copies of round keys
2912 addi r10,r10,32
2913 stvx $seven,r11,$sp
2914 addi r11,r11,32
2915 stvx $seven,r10,$sp
2916 addi r10,r10,32
2917 stvx $seven,r11,$sp
2918 addi r11,r11,32
2919 stvx $seven,r10,$sp
2920 addi r10,r10,32
2921 stvx $seven,r11,$sp
2922 addi r11,r11,32
2923 stvx $seven,r10,$sp
2924 addi r10,r10,32
2925 stvx $seven,r11,$sp
2926 addi r11,r11,32
2927
2928 mtspr 256,$vrsave
2929 lvx v20,r10,$sp # ABI says so
2930 addi r10,r10,32
2931 lvx v21,r11,$sp
2932 addi r11,r11,32
2933 lvx v22,r10,$sp
2934 addi r10,r10,32
2935 lvx v23,r11,$sp
2936 addi r11,r11,32
2937 lvx v24,r10,$sp
2938 addi r10,r10,32
2939 lvx v25,r11,$sp
2940 addi r11,r11,32
2941 lvx v26,r10,$sp
2942 addi r10,r10,32
2943 lvx v27,r11,$sp
2944 addi r11,r11,32
2945 lvx v28,r10,$sp
2946 addi r10,r10,32
2947 lvx v29,r11,$sp
2948 addi r11,r11,32
2949 lvx v30,r10,$sp
2950 lvx v31,r11,$sp
2951 $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
2952 $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
2953 $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
2954 $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
2955 $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
2956 $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
2957 addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
2958 blr
2959 .long 0
2960 .byte 0,12,0x04,1,0x80,6,6,0
2961 .long 0
2962
2963.align 5
2964_aesp8_xts_enc5x:
2965 vcipher $out0,$out0,v24
2966 vcipher $out1,$out1,v24
2967 vcipher $out2,$out2,v24
2968 vcipher $out3,$out3,v24
2969 vcipher $out4,$out4,v24
2970 lvx v24,$x20,$key_ # round[3]
2971 addi $key_,$key_,0x20
2972
2973 vcipher $out0,$out0,v25
2974 vcipher $out1,$out1,v25
2975 vcipher $out2,$out2,v25
2976 vcipher $out3,$out3,v25
2977 vcipher $out4,$out4,v25
2978 lvx v25,$x10,$key_ # round[4]
2979 bdnz _aesp8_xts_enc5x
2980
2981 add $inp,$inp,$taillen
2982 cmpwi $taillen,0
2983 vcipher $out0,$out0,v24
2984 vcipher $out1,$out1,v24
2985 vcipher $out2,$out2,v24
2986 vcipher $out3,$out3,v24
2987 vcipher $out4,$out4,v24
2988
2989 subi $inp,$inp,16
2990 vcipher $out0,$out0,v25
2991 vcipher $out1,$out1,v25
2992 vcipher $out2,$out2,v25
2993 vcipher $out3,$out3,v25
2994 vcipher $out4,$out4,v25
2995 vxor $twk0,$twk0,v31
2996
2997 vcipher $out0,$out0,v26
2998 lvsr $inpperm,r0,$taillen # $in5 is no more
2999 vcipher $out1,$out1,v26
3000 vcipher $out2,$out2,v26
3001 vcipher $out3,$out3,v26
3002 vcipher $out4,$out4,v26
3003 vxor $in1,$twk1,v31
3004
3005 vcipher $out0,$out0,v27
3006 lvx_u $in0,0,$inp
3007 vcipher $out1,$out1,v27
3008 vcipher $out2,$out2,v27
3009 vcipher $out3,$out3,v27
3010 vcipher $out4,$out4,v27
3011 vxor $in2,$twk2,v31
3012
3013 addi $key_,$sp,$FRAME+15 # rewind $key_
3014 vcipher $out0,$out0,v28
3015 vcipher $out1,$out1,v28
3016 vcipher $out2,$out2,v28
3017 vcipher $out3,$out3,v28
3018 vcipher $out4,$out4,v28
3019 lvx v24,$x00,$key_ # re-pre-load round[1]
3020 vxor $in3,$twk3,v31
3021
3022 vcipher $out0,$out0,v29
3023 le?vperm $in0,$in0,$in0,$leperm
3024 vcipher $out1,$out1,v29
3025 vcipher $out2,$out2,v29
3026 vcipher $out3,$out3,v29
3027 vcipher $out4,$out4,v29
3028 lvx v25,$x10,$key_ # re-pre-load round[2]
3029 vxor $in4,$twk4,v31
3030
3031 vcipher $out0,$out0,v30
3032 vperm $in0,$in0,$in0,$inpperm
3033 vcipher $out1,$out1,v30
3034 vcipher $out2,$out2,v30
3035 vcipher $out3,$out3,v30
3036 vcipher $out4,$out4,v30
3037
3038 vcipherlast $out0,$out0,$twk0
3039 vcipherlast $out1,$out1,$in1
3040 vcipherlast $out2,$out2,$in2
3041 vcipherlast $out3,$out3,$in3
3042 vcipherlast $out4,$out4,$in4
3043 blr
3044 .long 0
3045 .byte 0,12,0x14,0,0,0,0,0
3046
3047.align 5
3048_aesp8_xts_decrypt6x:
3049 $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
3050 mflr r11
3051 li r7,`$FRAME+8*16+15`
3052 li r3,`$FRAME+8*16+31`
3053 $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
3054 stvx v20,r7,$sp # ABI says so
3055 addi r7,r7,32
3056 stvx v21,r3,$sp
3057 addi r3,r3,32
3058 stvx v22,r7,$sp
3059 addi r7,r7,32
3060 stvx v23,r3,$sp
3061 addi r3,r3,32
3062 stvx v24,r7,$sp
3063 addi r7,r7,32
3064 stvx v25,r3,$sp
3065 addi r3,r3,32
3066 stvx v26,r7,$sp
3067 addi r7,r7,32
3068 stvx v27,r3,$sp
3069 addi r3,r3,32
3070 stvx v28,r7,$sp
3071 addi r7,r7,32
3072 stvx v29,r3,$sp
3073 addi r3,r3,32
3074 stvx v30,r7,$sp
3075 stvx v31,r3,$sp
3076 li r0,-1
3077 stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
3078 li $x10,0x10
3079 $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
3080 li $x20,0x20
3081 $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
3082 li $x30,0x30
3083 $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
3084 li $x40,0x40
3085 $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
3086 li $x50,0x50
3087 $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
3088 li $x60,0x60
3089 $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
3090 li $x70,0x70
3091 mtspr 256,r0
3092
3093 subi $rounds,$rounds,3 # -4 in total
3094
3095 lvx $rndkey0,$x00,$key1 # load key schedule
3096 lvx v30,$x10,$key1
3097 addi $key1,$key1,0x20
3098 lvx v31,$x00,$key1
3099 ?vperm $rndkey0,$rndkey0,v30,$keyperm
3100 addi $key_,$sp,$FRAME+15
3101 mtctr $rounds
3102
3103Load_xts_dec_key:
3104 ?vperm v24,v30,v31,$keyperm
3105 lvx v30,$x10,$key1
3106 addi $key1,$key1,0x20
3107 stvx v24,$x00,$key_ # off-load round[1]
3108 ?vperm v25,v31,v30,$keyperm
3109 lvx v31,$x00,$key1
3110 stvx v25,$x10,$key_ # off-load round[2]
3111 addi $key_,$key_,0x20
3112 bdnz Load_xts_dec_key
3113
3114 lvx v26,$x10,$key1
3115 ?vperm v24,v30,v31,$keyperm
3116 lvx v27,$x20,$key1
3117 stvx v24,$x00,$key_ # off-load round[3]
3118 ?vperm v25,v31,v26,$keyperm
3119 lvx v28,$x30,$key1
3120 stvx v25,$x10,$key_ # off-load round[4]
3121 addi $key_,$sp,$FRAME+15 # rewind $key_
3122 ?vperm v26,v26,v27,$keyperm
3123 lvx v29,$x40,$key1
3124 ?vperm v27,v27,v28,$keyperm
3125 lvx v30,$x50,$key1
3126 ?vperm v28,v28,v29,$keyperm
3127 lvx v31,$x60,$key1
3128 ?vperm v29,v29,v30,$keyperm
3129 lvx $twk5,$x70,$key1 # borrow $twk5
3130 ?vperm v30,v30,v31,$keyperm
3131 lvx v24,$x00,$key_ # pre-load round[1]
3132 ?vperm v31,v31,$twk5,$keyperm
3133 lvx v25,$x10,$key_ # pre-load round[2]
3134
3135 vperm $in0,$inout,$inptail,$inpperm
3136 subi $inp,$inp,31 # undo "caller"
3137 vxor $twk0,$tweak,$rndkey0
3138 vsrab $tmp,$tweak,$seven # next tweak value
3139 vaddubm $tweak,$tweak,$tweak
3140 vsldoi $tmp,$tmp,$tmp,15
3141 vand $tmp,$tmp,$eighty7
3142 vxor $out0,$in0,$twk0
3143 vxor $tweak,$tweak,$tmp
3144
3145 lvx_u $in1,$x10,$inp
3146 vxor $twk1,$tweak,$rndkey0
3147 vsrab $tmp,$tweak,$seven # next tweak value
3148 vaddubm $tweak,$tweak,$tweak
3149 vsldoi $tmp,$tmp,$tmp,15
3150 le?vperm $in1,$in1,$in1,$leperm
3151 vand $tmp,$tmp,$eighty7
3152 vxor $out1,$in1,$twk1
3153 vxor $tweak,$tweak,$tmp
3154
3155 lvx_u $in2,$x20,$inp
3156 andi. $taillen,$len,15
3157 vxor $twk2,$tweak,$rndkey0
3158 vsrab $tmp,$tweak,$seven # next tweak value
3159 vaddubm $tweak,$tweak,$tweak
3160 vsldoi $tmp,$tmp,$tmp,15
3161 le?vperm $in2,$in2,$in2,$leperm
3162 vand $tmp,$tmp,$eighty7
3163 vxor $out2,$in2,$twk2
3164 vxor $tweak,$tweak,$tmp
3165
3166 lvx_u $in3,$x30,$inp
3167 sub $len,$len,$taillen
3168 vxor $twk3,$tweak,$rndkey0
3169 vsrab $tmp,$tweak,$seven # next tweak value
3170 vaddubm $tweak,$tweak,$tweak
3171 vsldoi $tmp,$tmp,$tmp,15
3172 le?vperm $in3,$in3,$in3,$leperm
3173 vand $tmp,$tmp,$eighty7
3174 vxor $out3,$in3,$twk3
3175 vxor $tweak,$tweak,$tmp
3176
3177 lvx_u $in4,$x40,$inp
3178 subi $len,$len,0x60
3179 vxor $twk4,$tweak,$rndkey0
3180 vsrab $tmp,$tweak,$seven # next tweak value
3181 vaddubm $tweak,$tweak,$tweak
3182 vsldoi $tmp,$tmp,$tmp,15
3183 le?vperm $in4,$in4,$in4,$leperm
3184 vand $tmp,$tmp,$eighty7
3185 vxor $out4,$in4,$twk4
3186 vxor $tweak,$tweak,$tmp
3187
3188 lvx_u $in5,$x50,$inp
3189 addi $inp,$inp,0x60
3190 vxor $twk5,$tweak,$rndkey0
3191 vsrab $tmp,$tweak,$seven # next tweak value
3192 vaddubm $tweak,$tweak,$tweak
3193 vsldoi $tmp,$tmp,$tmp,15
3194 le?vperm $in5,$in5,$in5,$leperm
3195 vand $tmp,$tmp,$eighty7
3196 vxor $out5,$in5,$twk5
3197 vxor $tweak,$tweak,$tmp
3198
3199 vxor v31,v31,$rndkey0
3200 mtctr $rounds
3201 b Loop_xts_dec6x
3202
3203.align 5
3204Loop_xts_dec6x:
3205 vncipher $out0,$out0,v24
3206 vncipher $out1,$out1,v24
3207 vncipher $out2,$out2,v24
3208 vncipher $out3,$out3,v24
3209 vncipher $out4,$out4,v24
3210 vncipher $out5,$out5,v24
3211 lvx v24,$x20,$key_ # round[3]
3212 addi $key_,$key_,0x20
3213
3214 vncipher $out0,$out0,v25
3215 vncipher $out1,$out1,v25
3216 vncipher $out2,$out2,v25
3217 vncipher $out3,$out3,v25
3218 vncipher $out4,$out4,v25
3219 vncipher $out5,$out5,v25
3220 lvx v25,$x10,$key_ # round[4]
3221 bdnz Loop_xts_dec6x
3222
3223 subic $len,$len,96 # $len-=96
3224 vxor $in0,$twk0,v31 # xor with last round key
3225 vncipher $out0,$out0,v24
3226 vncipher $out1,$out1,v24
3227 vsrab $tmp,$tweak,$seven # next tweak value
3228 vxor $twk0,$tweak,$rndkey0
3229 vaddubm $tweak,$tweak,$tweak
3230 vncipher $out2,$out2,v24
3231 vncipher $out3,$out3,v24
3232 vsldoi $tmp,$tmp,$tmp,15
3233 vncipher $out4,$out4,v24
3234 vncipher $out5,$out5,v24
3235
3236 subfe. r0,r0,r0 # borrow?-1:0
3237 vand $tmp,$tmp,$eighty7
3238 vncipher $out0,$out0,v25
3239 vncipher $out1,$out1,v25
3240 vxor $tweak,$tweak,$tmp
3241 vncipher $out2,$out2,v25
3242 vncipher $out3,$out3,v25
3243 vxor $in1,$twk1,v31
3244 vsrab $tmp,$tweak,$seven # next tweak value
3245 vxor $twk1,$tweak,$rndkey0
3246 vncipher $out4,$out4,v25
3247 vncipher $out5,$out5,v25
3248
3249 and r0,r0,$len
3250 vaddubm $tweak,$tweak,$tweak
3251 vsldoi $tmp,$tmp,$tmp,15
3252 vncipher $out0,$out0,v26
3253 vncipher $out1,$out1,v26
3254 vand $tmp,$tmp,$eighty7
3255 vncipher $out2,$out2,v26
3256 vncipher $out3,$out3,v26
3257 vxor $tweak,$tweak,$tmp
3258 vncipher $out4,$out4,v26
3259 vncipher $out5,$out5,v26
3260
3261 add $inp,$inp,r0 # $inp is adjusted in such
3262 # way that at exit from the
3263 # loop inX-in5 are loaded
3264 # with last "words"
3265 vxor $in2,$twk2,v31
3266 vsrab $tmp,$tweak,$seven # next tweak value
3267 vxor $twk2,$tweak,$rndkey0
3268 vaddubm $tweak,$tweak,$tweak
3269 vncipher $out0,$out0,v27
3270 vncipher $out1,$out1,v27
3271 vsldoi $tmp,$tmp,$tmp,15
3272 vncipher $out2,$out2,v27
3273 vncipher $out3,$out3,v27
3274 vand $tmp,$tmp,$eighty7
3275 vncipher $out4,$out4,v27
3276 vncipher $out5,$out5,v27
3277
3278 addi $key_,$sp,$FRAME+15 # rewind $key_
3279 vxor $tweak,$tweak,$tmp
3280 vncipher $out0,$out0,v28
3281 vncipher $out1,$out1,v28
3282 vxor $in3,$twk3,v31
3283 vsrab $tmp,$tweak,$seven # next tweak value
3284 vxor $twk3,$tweak,$rndkey0
3285 vncipher $out2,$out2,v28
3286 vncipher $out3,$out3,v28
3287 vaddubm $tweak,$tweak,$tweak
3288 vsldoi $tmp,$tmp,$tmp,15
3289 vncipher $out4,$out4,v28
3290 vncipher $out5,$out5,v28
3291 lvx v24,$x00,$key_ # re-pre-load round[1]
3292 vand $tmp,$tmp,$eighty7
3293
3294 vncipher $out0,$out0,v29
3295 vncipher $out1,$out1,v29
3296 vxor $tweak,$tweak,$tmp
3297 vncipher $out2,$out2,v29
3298 vncipher $out3,$out3,v29
3299 vxor $in4,$twk4,v31
3300 vsrab $tmp,$tweak,$seven # next tweak value
3301 vxor $twk4,$tweak,$rndkey0
3302 vncipher $out4,$out4,v29
3303 vncipher $out5,$out5,v29
3304 lvx v25,$x10,$key_ # re-pre-load round[2]
3305 vaddubm $tweak,$tweak,$tweak
3306 vsldoi $tmp,$tmp,$tmp,15
3307
3308 vncipher $out0,$out0,v30
3309 vncipher $out1,$out1,v30
3310 vand $tmp,$tmp,$eighty7
3311 vncipher $out2,$out2,v30
3312 vncipher $out3,$out3,v30
3313 vxor $tweak,$tweak,$tmp
3314 vncipher $out4,$out4,v30
3315 vncipher $out5,$out5,v30
3316 vxor $in5,$twk5,v31
3317 vsrab $tmp,$tweak,$seven # next tweak value
3318 vxor $twk5,$tweak,$rndkey0
3319
3320 vncipherlast $out0,$out0,$in0
3321 lvx_u $in0,$x00,$inp # load next input block
3322 vaddubm $tweak,$tweak,$tweak
3323 vsldoi $tmp,$tmp,$tmp,15
3324 vncipherlast $out1,$out1,$in1
3325 lvx_u $in1,$x10,$inp
3326 vncipherlast $out2,$out2,$in2
3327 le?vperm $in0,$in0,$in0,$leperm
3328 lvx_u $in2,$x20,$inp
3329 vand $tmp,$tmp,$eighty7
3330 vncipherlast $out3,$out3,$in3
3331 le?vperm $in1,$in1,$in1,$leperm
3332 lvx_u $in3,$x30,$inp
3333 vncipherlast $out4,$out4,$in4
3334 le?vperm $in2,$in2,$in2,$leperm
3335 lvx_u $in4,$x40,$inp
3336 vxor $tweak,$tweak,$tmp
3337 vncipherlast $out5,$out5,$in5
3338 le?vperm $in3,$in3,$in3,$leperm
3339 lvx_u $in5,$x50,$inp
3340 addi $inp,$inp,0x60
3341 le?vperm $in4,$in4,$in4,$leperm
3342 le?vperm $in5,$in5,$in5,$leperm
3343
3344 le?vperm $out0,$out0,$out0,$leperm
3345 le?vperm $out1,$out1,$out1,$leperm
3346 stvx_u $out0,$x00,$out # store output
3347 vxor $out0,$in0,$twk0
3348 le?vperm $out2,$out2,$out2,$leperm
3349 stvx_u $out1,$x10,$out
3350 vxor $out1,$in1,$twk1
3351 le?vperm $out3,$out3,$out3,$leperm
3352 stvx_u $out2,$x20,$out
3353 vxor $out2,$in2,$twk2
3354 le?vperm $out4,$out4,$out4,$leperm
3355 stvx_u $out3,$x30,$out
3356 vxor $out3,$in3,$twk3
3357 le?vperm $out5,$out5,$out5,$leperm
3358 stvx_u $out4,$x40,$out
3359 vxor $out4,$in4,$twk4
3360 stvx_u $out5,$x50,$out
3361 vxor $out5,$in5,$twk5
3362 addi $out,$out,0x60
3363
3364 mtctr $rounds
3365 beq Loop_xts_dec6x # did $len-=96 borrow?
3366
3367 addic. $len,$len,0x60
3368 beq Lxts_dec6x_zero
3369 cmpwi $len,0x20
3370 blt Lxts_dec6x_one
3371 nop
3372 beq Lxts_dec6x_two
3373 cmpwi $len,0x40
3374 blt Lxts_dec6x_three
3375 nop
3376 beq Lxts_dec6x_four
3377
3378Lxts_dec6x_five:
3379 vxor $out0,$in1,$twk0
3380 vxor $out1,$in2,$twk1
3381 vxor $out2,$in3,$twk2
3382 vxor $out3,$in4,$twk3
3383 vxor $out4,$in5,$twk4
3384
3385 bl _aesp8_xts_dec5x
3386
3387 le?vperm $out0,$out0,$out0,$leperm
3388 vmr $twk0,$twk5 # unused tweak
3389 vxor $twk1,$tweak,$rndkey0
3390 le?vperm $out1,$out1,$out1,$leperm
3391 stvx_u $out0,$x00,$out # store output
3392 vxor $out0,$in0,$twk1
3393 le?vperm $out2,$out2,$out2,$leperm
3394 stvx_u $out1,$x10,$out
3395 le?vperm $out3,$out3,$out3,$leperm
3396 stvx_u $out2,$x20,$out
3397 le?vperm $out4,$out4,$out4,$leperm
3398 stvx_u $out3,$x30,$out
3399 stvx_u $out4,$x40,$out
3400 addi $out,$out,0x50
3401 bne Lxts_dec6x_steal
3402 b Lxts_dec6x_done
3403
3404.align 4
3405Lxts_dec6x_four:
3406 vxor $out0,$in2,$twk0
3407 vxor $out1,$in3,$twk1
3408 vxor $out2,$in4,$twk2
3409 vxor $out3,$in5,$twk3
3410 vxor $out4,$out4,$out4
3411
3412 bl _aesp8_xts_dec5x
3413
3414 le?vperm $out0,$out0,$out0,$leperm
3415 vmr $twk0,$twk4 # unused tweak
3416 vmr $twk1,$twk5
3417 le?vperm $out1,$out1,$out1,$leperm
3418 stvx_u $out0,$x00,$out # store output
3419 vxor $out0,$in0,$twk5
3420 le?vperm $out2,$out2,$out2,$leperm
3421 stvx_u $out1,$x10,$out
3422 le?vperm $out3,$out3,$out3,$leperm
3423 stvx_u $out2,$x20,$out
3424 stvx_u $out3,$x30,$out
3425 addi $out,$out,0x40
3426 bne Lxts_dec6x_steal
3427 b Lxts_dec6x_done
3428
3429.align 4
3430Lxts_dec6x_three:
3431 vxor $out0,$in3,$twk0
3432 vxor $out1,$in4,$twk1
3433 vxor $out2,$in5,$twk2
3434 vxor $out3,$out3,$out3
3435 vxor $out4,$out4,$out4
3436
3437 bl _aesp8_xts_dec5x
3438
3439 le?vperm $out0,$out0,$out0,$leperm
3440 vmr $twk0,$twk3 # unused tweak
3441 vmr $twk1,$twk4
3442 le?vperm $out1,$out1,$out1,$leperm
3443 stvx_u $out0,$x00,$out # store output
3444 vxor $out0,$in0,$twk4
3445 le?vperm $out2,$out2,$out2,$leperm
3446 stvx_u $out1,$x10,$out
3447 stvx_u $out2,$x20,$out
3448 addi $out,$out,0x30
3449 bne Lxts_dec6x_steal
3450 b Lxts_dec6x_done
3451
3452.align 4
3453Lxts_dec6x_two:
3454 vxor $out0,$in4,$twk0
3455 vxor $out1,$in5,$twk1
3456 vxor $out2,$out2,$out2
3457 vxor $out3,$out3,$out3
3458 vxor $out4,$out4,$out4
3459
3460 bl _aesp8_xts_dec5x
3461
3462 le?vperm $out0,$out0,$out0,$leperm
3463 vmr $twk0,$twk2 # unused tweak
3464 vmr $twk1,$twk3
3465 le?vperm $out1,$out1,$out1,$leperm
3466 stvx_u $out0,$x00,$out # store output
3467 vxor $out0,$in0,$twk3
3468 stvx_u $out1,$x10,$out
3469 addi $out,$out,0x20
3470 bne Lxts_dec6x_steal
3471 b Lxts_dec6x_done
3472
3473.align 4
3474Lxts_dec6x_one:
3475 vxor $out0,$in5,$twk0
3476 nop
3477Loop_xts_dec1x:
3478 vncipher $out0,$out0,v24
3479 lvx v24,$x20,$key_ # round[3]
3480 addi $key_,$key_,0x20
3481
3482 vncipher $out0,$out0,v25
3483 lvx v25,$x10,$key_ # round[4]
3484 bdnz Loop_xts_dec1x
3485
3486 subi r0,$taillen,1
3487 vncipher $out0,$out0,v24
3488
3489 andi. r0,r0,16
3490 cmpwi $taillen,0
3491 vncipher $out0,$out0,v25
3492
3493 sub $inp,$inp,r0
3494 vncipher $out0,$out0,v26
3495
3496 lvx_u $in0,0,$inp
3497 vncipher $out0,$out0,v27
3498
3499 addi $key_,$sp,$FRAME+15 # rewind $key_
3500 vncipher $out0,$out0,v28
3501 lvx v24,$x00,$key_ # re-pre-load round[1]
3502
3503 vncipher $out0,$out0,v29
3504 lvx v25,$x10,$key_ # re-pre-load round[2]
3505 vxor $twk0,$twk0,v31
3506
3507 le?vperm $in0,$in0,$in0,$leperm
3508 vncipher $out0,$out0,v30
3509
3510 mtctr $rounds
3511 vncipherlast $out0,$out0,$twk0
3512
3513 vmr $twk0,$twk1 # unused tweak
3514 vmr $twk1,$twk2
3515 le?vperm $out0,$out0,$out0,$leperm
3516 stvx_u $out0,$x00,$out # store output
3517 addi $out,$out,0x10
3518 vxor $out0,$in0,$twk2
3519 bne Lxts_dec6x_steal
3520 b Lxts_dec6x_done
3521
3522.align 4
3523Lxts_dec6x_zero:
3524 cmpwi $taillen,0
3525 beq Lxts_dec6x_done
3526
3527 lvx_u $in0,0,$inp
3528 le?vperm $in0,$in0,$in0,$leperm
3529 vxor $out0,$in0,$twk1
3530Lxts_dec6x_steal:
3531 vncipher $out0,$out0,v24
3532 lvx v24,$x20,$key_ # round[3]
3533 addi $key_,$key_,0x20
3534
3535 vncipher $out0,$out0,v25
3536 lvx v25,$x10,$key_ # round[4]
3537 bdnz Lxts_dec6x_steal
3538
3539 add $inp,$inp,$taillen
3540 vncipher $out0,$out0,v24
3541
3542 cmpwi $taillen,0
3543 vncipher $out0,$out0,v25
3544
3545 lvx_u $in0,0,$inp
3546 vncipher $out0,$out0,v26
3547
3548 lvsr $inpperm,0,$taillen # $in5 is no more
3549 vncipher $out0,$out0,v27
3550
3551 addi $key_,$sp,$FRAME+15 # rewind $key_
3552 vncipher $out0,$out0,v28
3553 lvx v24,$x00,$key_ # re-pre-load round[1]
3554
3555 vncipher $out0,$out0,v29
3556 lvx v25,$x10,$key_ # re-pre-load round[2]
3557 vxor $twk1,$twk1,v31
3558
3559 le?vperm $in0,$in0,$in0,$leperm
3560 vncipher $out0,$out0,v30
3561
3562 vperm $in0,$in0,$in0,$inpperm
3563 vncipherlast $tmp,$out0,$twk1
3564
3565 le?vperm $out0,$tmp,$tmp,$leperm
3566 le?stvx_u $out0,0,$out
3567 be?stvx_u $tmp,0,$out
3568
3569 vxor $out0,$out0,$out0
3570 vspltisb $out1,-1
3571 vperm $out0,$out0,$out1,$inpperm
3572 vsel $out0,$in0,$tmp,$out0
3573 vxor $out0,$out0,$twk0
3574
3575 subi r30,$out,1
3576 mtctr $taillen
3577Loop_xts_dec6x_steal:
3578 lbzu r0,1(r30)
3579 stb r0,16(r30)
3580 bdnz Loop_xts_dec6x_steal
3581
3582 li $taillen,0
3583 mtctr $rounds
3584 b Loop_xts_dec1x # one more time...
3585
3586.align 4
3587Lxts_dec6x_done:
3588 ${UCMP}i $ivp,0
3589 beq Lxts_dec6x_ret
3590
3591 vxor $tweak,$twk0,$rndkey0
3592 le?vperm $tweak,$tweak,$tweak,$leperm
3593 stvx_u $tweak,0,$ivp
3594
3595Lxts_dec6x_ret:
3596 mtlr r11
3597 li r10,`$FRAME+15`
3598 li r11,`$FRAME+31`
3599 stvx $seven,r10,$sp # wipe copies of round keys
3600 addi r10,r10,32
3601 stvx $seven,r11,$sp
3602 addi r11,r11,32
3603 stvx $seven,r10,$sp
3604 addi r10,r10,32
3605 stvx $seven,r11,$sp
3606 addi r11,r11,32
3607 stvx $seven,r10,$sp
3608 addi r10,r10,32
3609 stvx $seven,r11,$sp
3610 addi r11,r11,32
3611 stvx $seven,r10,$sp
3612 addi r10,r10,32
3613 stvx $seven,r11,$sp
3614 addi r11,r11,32
3615
3616 mtspr 256,$vrsave
3617 lvx v20,r10,$sp # ABI says so
3618 addi r10,r10,32
3619 lvx v21,r11,$sp
3620 addi r11,r11,32
3621 lvx v22,r10,$sp
3622 addi r10,r10,32
3623 lvx v23,r11,$sp
3624 addi r11,r11,32
3625 lvx v24,r10,$sp
3626 addi r10,r10,32
3627 lvx v25,r11,$sp
3628 addi r11,r11,32
3629 lvx v26,r10,$sp
3630 addi r10,r10,32
3631 lvx v27,r11,$sp
3632 addi r11,r11,32
3633 lvx v28,r10,$sp
3634 addi r10,r10,32
3635 lvx v29,r11,$sp
3636 addi r11,r11,32
3637 lvx v30,r10,$sp
3638 lvx v31,r11,$sp
3639 $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
3640 $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
3641 $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
3642 $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
3643 $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
3644 $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
3645 addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
3646 blr
3647 .long 0
3648 .byte 0,12,0x04,1,0x80,6,6,0
3649 .long 0
3650
3651.align 5
3652_aesp8_xts_dec5x:
3653 vncipher $out0,$out0,v24
3654 vncipher $out1,$out1,v24
3655 vncipher $out2,$out2,v24
3656 vncipher $out3,$out3,v24
3657 vncipher $out4,$out4,v24
3658 lvx v24,$x20,$key_ # round[3]
3659 addi $key_,$key_,0x20
3660
3661 vncipher $out0,$out0,v25
3662 vncipher $out1,$out1,v25
3663 vncipher $out2,$out2,v25
3664 vncipher $out3,$out3,v25
3665 vncipher $out4,$out4,v25
3666 lvx v25,$x10,$key_ # round[4]
3667 bdnz _aesp8_xts_dec5x
3668
3669 subi r0,$taillen,1
3670 vncipher $out0,$out0,v24
3671 vncipher $out1,$out1,v24
3672 vncipher $out2,$out2,v24
3673 vncipher $out3,$out3,v24
3674 vncipher $out4,$out4,v24
3675
3676 andi. r0,r0,16
3677 cmpwi $taillen,0
3678 vncipher $out0,$out0,v25
3679 vncipher $out1,$out1,v25
3680 vncipher $out2,$out2,v25
3681 vncipher $out3,$out3,v25
3682 vncipher $out4,$out4,v25
3683 vxor $twk0,$twk0,v31
3684
3685 sub $inp,$inp,r0
3686 vncipher $out0,$out0,v26
3687 vncipher $out1,$out1,v26
3688 vncipher $out2,$out2,v26
3689 vncipher $out3,$out3,v26
3690 vncipher $out4,$out4,v26
3691 vxor $in1,$twk1,v31
3692
3693 vncipher $out0,$out0,v27
3694 lvx_u $in0,0,$inp
3695 vncipher $out1,$out1,v27
3696 vncipher $out2,$out2,v27
3697 vncipher $out3,$out3,v27
3698 vncipher $out4,$out4,v27
3699 vxor $in2,$twk2,v31
3700
3701 addi $key_,$sp,$FRAME+15 # rewind $key_
3702 vncipher $out0,$out0,v28
3703 vncipher $out1,$out1,v28
3704 vncipher $out2,$out2,v28
3705 vncipher $out3,$out3,v28
3706 vncipher $out4,$out4,v28
3707 lvx v24,$x00,$key_ # re-pre-load round[1]
3708 vxor $in3,$twk3,v31
3709
3710 vncipher $out0,$out0,v29
3711 le?vperm $in0,$in0,$in0,$leperm
3712 vncipher $out1,$out1,v29
3713 vncipher $out2,$out2,v29
3714 vncipher $out3,$out3,v29
3715 vncipher $out4,$out4,v29
3716 lvx v25,$x10,$key_ # re-pre-load round[2]
3717 vxor $in4,$twk4,v31
3718
3719 vncipher $out0,$out0,v30
3720 vncipher $out1,$out1,v30
3721 vncipher $out2,$out2,v30
3722 vncipher $out3,$out3,v30
3723 vncipher $out4,$out4,v30
3724
3725 vncipherlast $out0,$out0,$twk0
3726 vncipherlast $out1,$out1,$in1
3727 vncipherlast $out2,$out2,$in2
3728 vncipherlast $out3,$out3,$in3
3729 vncipherlast $out4,$out4,$in4
3730 mtctr $rounds
3731 blr
3732 .long 0
3733 .byte 0,12,0x14,0,0,0,0,0
3734___
3735}} }}}
3736
1878my $consts=1; 3737my $consts=1;
1879foreach(split("\n",$code)) { 3738foreach(split("\n",$code)) {
1880 s/\`([^\`]*)\`/eval($1)/geo; 3739 s/\`([^\`]*)\`/eval($1)/geo;
@@ -1898,7 +3757,7 @@ foreach(split("\n",$code)) {
1898 if ($flavour =~ /le$/o) { 3757 if ($flavour =~ /le$/o) {
1899 SWITCH: for($conv) { 3758 SWITCH: for($conv) {
1900 /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; }; 3759 /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; };
1901 /\?rev/ && do { @bytes=reverse(@bytes); last; }; 3760 /\?rev/ && do { @bytes=reverse(@bytes); last; };
1902 } 3761 }
1903 } 3762 }
1904 3763
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index e163d5770438..f688c32fbcc7 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -31,10 +31,12 @@ extern struct shash_alg p8_ghash_alg;
31extern struct crypto_alg p8_aes_alg; 31extern struct crypto_alg p8_aes_alg;
32extern struct crypto_alg p8_aes_cbc_alg; 32extern struct crypto_alg p8_aes_cbc_alg;
33extern struct crypto_alg p8_aes_ctr_alg; 33extern struct crypto_alg p8_aes_ctr_alg;
34extern struct crypto_alg p8_aes_xts_alg;
34static struct crypto_alg *algs[] = { 35static struct crypto_alg *algs[] = {
35 &p8_aes_alg, 36 &p8_aes_alg,
36 &p8_aes_cbc_alg, 37 &p8_aes_cbc_alg,
37 &p8_aes_ctr_alg, 38 &p8_aes_ctr_alg,
39 &p8_aes_xts_alg,
38 NULL, 40 NULL,
39}; 41};
40 42
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index e9e5ae521fa6..6e705971d637 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -260,11 +260,12 @@ config USB_CHAOSKEY
260 tristate "ChaosKey random number generator driver support" 260 tristate "ChaosKey random number generator driver support"
261 depends on HW_RANDOM 261 depends on HW_RANDOM
262 help 262 help
263 Say Y here if you want to connect an AltusMetrum ChaosKey to 263 Say Y here if you want to connect an AltusMetrum ChaosKey or
264 your computer's USB port. The ChaosKey is a hardware random 264 Araneus Alea I to your computer's USB port. These devices
265 number generator which hooks into the kernel entropy pool to 265 are hardware random number generators which hook into the
266 ensure a large supply of entropy for /dev/random and 266 kernel entropy pool to ensure a large supply of entropy for
267 /dev/urandom and also provides direct access via /dev/chaoskeyX 267 /dev/random and /dev/urandom and also provides direct access
268 via /dev/chaoskeyX
268 269
269 To compile this driver as a module, choose M here: the 270 To compile this driver as a module, choose M here: the
270 module will be called chaoskey. 271 module will be called chaoskey.
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 76350e4ee807..6ddd08a32777 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -55,9 +55,13 @@ MODULE_LICENSE("GPL");
55#define CHAOSKEY_VENDOR_ID 0x1d50 /* OpenMoko */ 55#define CHAOSKEY_VENDOR_ID 0x1d50 /* OpenMoko */
56#define CHAOSKEY_PRODUCT_ID 0x60c6 /* ChaosKey */ 56#define CHAOSKEY_PRODUCT_ID 0x60c6 /* ChaosKey */
57 57
58#define ALEA_VENDOR_ID 0x12d8 /* Araneus */
59#define ALEA_PRODUCT_ID 0x0001 /* Alea I */
60
58#define CHAOSKEY_BUF_LEN 64 /* max size of USB full speed packet */ 61#define CHAOSKEY_BUF_LEN 64 /* max size of USB full speed packet */
59 62
60#define NAK_TIMEOUT (HZ) /* stall/wait timeout for device */ 63#define NAK_TIMEOUT (HZ) /* normal stall/wait timeout */
64#define ALEA_FIRST_TIMEOUT (HZ*3) /* first stall/wait timeout for Alea */
61 65
62#ifdef CONFIG_USB_DYNAMIC_MINORS 66#ifdef CONFIG_USB_DYNAMIC_MINORS
63#define USB_CHAOSKEY_MINOR_BASE 0 67#define USB_CHAOSKEY_MINOR_BASE 0
@@ -69,6 +73,7 @@ MODULE_LICENSE("GPL");
69 73
70static const struct usb_device_id chaoskey_table[] = { 74static const struct usb_device_id chaoskey_table[] = {
71 { USB_DEVICE(CHAOSKEY_VENDOR_ID, CHAOSKEY_PRODUCT_ID) }, 75 { USB_DEVICE(CHAOSKEY_VENDOR_ID, CHAOSKEY_PRODUCT_ID) },
76 { USB_DEVICE(ALEA_VENDOR_ID, ALEA_PRODUCT_ID) },
72 { }, 77 { },
73}; 78};
74MODULE_DEVICE_TABLE(usb, chaoskey_table); 79MODULE_DEVICE_TABLE(usb, chaoskey_table);
@@ -84,6 +89,7 @@ struct chaoskey {
84 int open; /* open count */ 89 int open; /* open count */
85 bool present; /* device not disconnected */ 90 bool present; /* device not disconnected */
86 bool reading; /* ongoing IO */ 91 bool reading; /* ongoing IO */
92 bool reads_started; /* track first read for Alea */
87 int size; /* size of buf */ 93 int size; /* size of buf */
88 int valid; /* bytes of buf read */ 94 int valid; /* bytes of buf read */
89 int used; /* bytes of buf consumed */ 95 int used; /* bytes of buf consumed */
@@ -188,6 +194,9 @@ static int chaoskey_probe(struct usb_interface *interface,
188 194
189 dev->in_ep = in_ep; 195 dev->in_ep = in_ep;
190 196
197 if (udev->descriptor.idVendor != ALEA_VENDOR_ID)
198 dev->reads_started = 1;
199
191 dev->size = size; 200 dev->size = size;
192 dev->present = 1; 201 dev->present = 1;
193 202
@@ -357,6 +366,7 @@ static int _chaoskey_fill(struct chaoskey *dev)
357{ 366{
358 DEFINE_WAIT(wait); 367 DEFINE_WAIT(wait);
359 int result; 368 int result;
369 bool started;
360 370
361 usb_dbg(dev->interface, "fill"); 371 usb_dbg(dev->interface, "fill");
362 372
@@ -389,10 +399,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
389 goto out; 399 goto out;
390 } 400 }
391 401
402 /* The first read on the Alea takes a little under 2 seconds.
403 * Reads after the first read take only a few microseconds
404 * though. Presumably the entropy-generating circuit needs
405 * time to ramp up. So, we wait longer on the first read.
406 */
407 started = dev->reads_started;
408 dev->reads_started = true;
392 result = wait_event_interruptible_timeout( 409 result = wait_event_interruptible_timeout(
393 dev->wait_q, 410 dev->wait_q,
394 !dev->reading, 411 !dev->reading,
395 NAK_TIMEOUT); 412 (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
396 413
397 if (result < 0) 414 if (result < 0)
398 goto out; 415 goto out;
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 002b81f6f2bc..7ef015eb3403 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -585,6 +585,16 @@ static inline u32 ioread32(const volatile void __iomem *addr)
585} 585}
586#endif 586#endif
587 587
588#ifdef CONFIG_64BIT
589#ifndef ioread64
590#define ioread64 ioread64
591static inline u64 ioread64(const volatile void __iomem *addr)
592{
593 return readq(addr);
594}
595#endif
596#endif /* CONFIG_64BIT */
597
588#ifndef iowrite8 598#ifndef iowrite8
589#define iowrite8 iowrite8 599#define iowrite8 iowrite8
590static inline void iowrite8(u8 value, volatile void __iomem *addr) 600static inline void iowrite8(u8 value, volatile void __iomem *addr)
@@ -609,11 +619,21 @@ static inline void iowrite32(u32 value, volatile void __iomem *addr)
609} 619}
610#endif 620#endif
611 621
622#ifdef CONFIG_64BIT
623#ifndef iowrite64
624#define iowrite64 iowrite64
625static inline void iowrite64(u64 value, volatile void __iomem *addr)
626{
627 writeq(value, addr);
628}
629#endif
630#endif /* CONFIG_64BIT */
631
612#ifndef ioread16be 632#ifndef ioread16be
613#define ioread16be ioread16be 633#define ioread16be ioread16be
614static inline u16 ioread16be(const volatile void __iomem *addr) 634static inline u16 ioread16be(const volatile void __iomem *addr)
615{ 635{
616 return __be16_to_cpu(__raw_readw(addr)); 636 return swab16(readw(addr));
617} 637}
618#endif 638#endif
619 639
@@ -621,15 +641,25 @@ static inline u16 ioread16be(const volatile void __iomem *addr)
621#define ioread32be ioread32be 641#define ioread32be ioread32be
622static inline u32 ioread32be(const volatile void __iomem *addr) 642static inline u32 ioread32be(const volatile void __iomem *addr)
623{ 643{
624 return __be32_to_cpu(__raw_readl(addr)); 644 return swab32(readl(addr));
645}
646#endif
647
648#ifdef CONFIG_64BIT
649#ifndef ioread64be
650#define ioread64be ioread64be
651static inline u64 ioread64be(const volatile void __iomem *addr)
652{
653 return swab64(readq(addr));
625} 654}
626#endif 655#endif
656#endif /* CONFIG_64BIT */
627 657
628#ifndef iowrite16be 658#ifndef iowrite16be
629#define iowrite16be iowrite16be 659#define iowrite16be iowrite16be
630static inline void iowrite16be(u16 value, void volatile __iomem *addr) 660static inline void iowrite16be(u16 value, void volatile __iomem *addr)
631{ 661{
632 __raw_writew(__cpu_to_be16(value), addr); 662 writew(swab16(value), addr);
633} 663}
634#endif 664#endif
635 665
@@ -637,10 +667,20 @@ static inline void iowrite16be(u16 value, void volatile __iomem *addr)
637#define iowrite32be iowrite32be 667#define iowrite32be iowrite32be
638static inline void iowrite32be(u32 value, volatile void __iomem *addr) 668static inline void iowrite32be(u32 value, volatile void __iomem *addr)
639{ 669{
640 __raw_writel(__cpu_to_be32(value), addr); 670 writel(swab32(value), addr);
641} 671}
642#endif 672#endif
643 673
674#ifdef CONFIG_64BIT
675#ifndef iowrite64be
676#define iowrite64be iowrite64be
677static inline void iowrite64be(u64 value, volatile void __iomem *addr)
678{
679 writeq(swab64(value), addr);
680}
681#endif
682#endif /* CONFIG_64BIT */
683
644#ifndef ioread8_rep 684#ifndef ioread8_rep
645#define ioread8_rep ioread8_rep 685#define ioread8_rep ioread8_rep
646static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, 686static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
@@ -668,6 +708,17 @@ static inline void ioread32_rep(const volatile void __iomem *addr,
668} 708}
669#endif 709#endif
670 710
711#ifdef CONFIG_64BIT
712#ifndef ioread64_rep
713#define ioread64_rep ioread64_rep
714static inline void ioread64_rep(const volatile void __iomem *addr,
715 void *buffer, unsigned int count)
716{
717 readsq(addr, buffer, count);
718}
719#endif
720#endif /* CONFIG_64BIT */
721
671#ifndef iowrite8_rep 722#ifndef iowrite8_rep
672#define iowrite8_rep iowrite8_rep 723#define iowrite8_rep iowrite8_rep
673static inline void iowrite8_rep(volatile void __iomem *addr, 724static inline void iowrite8_rep(volatile void __iomem *addr,
@@ -697,6 +748,18 @@ static inline void iowrite32_rep(volatile void __iomem *addr,
697 writesl(addr, buffer, count); 748 writesl(addr, buffer, count);
698} 749}
699#endif 750#endif
751
752#ifdef CONFIG_64BIT
753#ifndef iowrite64_rep
754#define iowrite64_rep iowrite64_rep
755static inline void iowrite64_rep(volatile void __iomem *addr,
756 const void *buffer,
757 unsigned int count)
758{
759 writesq(addr, buffer, count);
760}
761#endif
762#endif /* CONFIG_64BIT */
700#endif /* CONFIG_GENERIC_IOMAP */ 763#endif /* CONFIG_GENERIC_IOMAP */
701 764
702#ifdef __KERNEL__ 765#ifdef __KERNEL__
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index d8f8622fa044..650fede33c25 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -30,12 +30,20 @@ extern unsigned int ioread16(void __iomem *);
30extern unsigned int ioread16be(void __iomem *); 30extern unsigned int ioread16be(void __iomem *);
31extern unsigned int ioread32(void __iomem *); 31extern unsigned int ioread32(void __iomem *);
32extern unsigned int ioread32be(void __iomem *); 32extern unsigned int ioread32be(void __iomem *);
33#ifdef CONFIG_64BIT
34extern u64 ioread64(void __iomem *);
35extern u64 ioread64be(void __iomem *);
36#endif
33 37
34extern void iowrite8(u8, void __iomem *); 38extern void iowrite8(u8, void __iomem *);
35extern void iowrite16(u16, void __iomem *); 39extern void iowrite16(u16, void __iomem *);
36extern void iowrite16be(u16, void __iomem *); 40extern void iowrite16be(u16, void __iomem *);
37extern void iowrite32(u32, void __iomem *); 41extern void iowrite32(u32, void __iomem *);
38extern void iowrite32be(u32, void __iomem *); 42extern void iowrite32be(u32, void __iomem *);
43#ifdef CONFIG_64BIT
44extern void iowrite64(u64, void __iomem *);
45extern void iowrite64be(u64, void __iomem *);
46#endif
39 47
40/* 48/*
41 * "string" versions of the above. Note that they 49 * "string" versions of the above. Note that they
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 75174f80a106..12f84327ca36 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -112,11 +112,12 @@ struct aead_request {
112 * supplied during the decryption operation. This function is also 112 * supplied during the decryption operation. This function is also
113 * responsible for checking the authentication tag size for 113 * responsible for checking the authentication tag size for
114 * validity. 114 * validity.
115 * @setkey: see struct ablkcipher_alg 115 * @setkey: see struct skcipher_alg
116 * @encrypt: see struct ablkcipher_alg 116 * @encrypt: see struct skcipher_alg
117 * @decrypt: see struct ablkcipher_alg 117 * @decrypt: see struct skcipher_alg
118 * @geniv: see struct ablkcipher_alg 118 * @geniv: see struct skcipher_alg
119 * @ivsize: see struct ablkcipher_alg 119 * @ivsize: see struct skcipher_alg
120 * @chunksize: see struct skcipher_alg
120 * @init: Initialize the cryptographic transformation object. This function 121 * @init: Initialize the cryptographic transformation object. This function
121 * is used to initialize the cryptographic transformation object. 122 * is used to initialize the cryptographic transformation object.
122 * This function is called only once at the instantiation time, right 123 * This function is called only once at the instantiation time, right
@@ -145,6 +146,7 @@ struct aead_alg {
145 146
146 unsigned int ivsize; 147 unsigned int ivsize;
147 unsigned int maxauthsize; 148 unsigned int maxauthsize;
149 unsigned int chunksize;
148 150
149 struct crypto_alg base; 151 struct crypto_alg base;
150}; 152};
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index eeafd21afb44..8637cdfe382a 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -244,6 +244,8 @@ static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
244} 244}
245 245
246int crypto_attr_u32(struct rtattr *rta, u32 *num); 246int crypto_attr_u32(struct rtattr *rta, u32 *num);
247int crypto_inst_setname(struct crypto_instance *inst, const char *name,
248 struct crypto_alg *alg);
247void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, 249void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
248 unsigned int head); 250 unsigned int head);
249struct crypto_instance *crypto_alloc_instance(const char *name, 251struct crypto_instance *crypto_alloc_instance(const char *name,
@@ -440,8 +442,10 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size)
440 442
441static inline void crypto_yield(u32 flags) 443static inline void crypto_yield(u32 flags)
442{ 444{
445#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
443 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) 446 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
444 cond_resched(); 447 cond_resched();
448#endif
445} 449}
446 450
447#endif /* _CRYPTO_ALGAPI_H */ 451#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 1547f540c920..bc792d5a9e88 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -31,6 +31,7 @@ static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
31struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, 31struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
32 u32 type, u32 mask); 32 u32 type, u32 mask);
33struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); 33struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
34bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
34void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); 35void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
35 36
36struct cryptd_ahash { 37struct cryptd_ahash {
@@ -48,6 +49,8 @@ struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
48 u32 type, u32 mask); 49 u32 type, u32 mask);
49struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); 50struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
50struct shash_desc *cryptd_shash_desc(struct ahash_request *req); 51struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
52/* Must be called without moving CPUs. */
53bool cryptd_ahash_queued(struct cryptd_ahash *tfm);
51void cryptd_free_ahash(struct cryptd_ahash *tfm); 54void cryptd_free_ahash(struct cryptd_ahash *tfm);
52 55
53struct cryptd_aead { 56struct cryptd_aead {
@@ -64,6 +67,8 @@ struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
64 u32 type, u32 mask); 67 u32 type, u32 mask);
65 68
66struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); 69struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm);
70/* Must be called without moving CPUs. */
71bool cryptd_aead_queued(struct cryptd_aead *tfm);
67 72
68void cryptd_free_aead(struct cryptd_aead *tfm); 73void cryptd_free_aead(struct cryptd_aead *tfm);
69 74
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
new file mode 100644
index 000000000000..5102a8f282e6
--- /dev/null
+++ b/include/crypto/dh.h
@@ -0,0 +1,29 @@
1/*
2 * Diffie-Hellman secret to be used with kpp API along with helper functions
3 *
4 * Copyright (c) 2016, Intel Corporation
5 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13#ifndef _CRYPTO_DH_
14#define _CRYPTO_DH_
15
16struct dh {
17 void *key;
18 void *p;
19 void *g;
20 unsigned int key_size;
21 unsigned int p_size;
22 unsigned int g_size;
23};
24
25int crypto_dh_key_len(const struct dh *params);
26int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
27int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
28
29#endif
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index d961b2b16f55..61580b19f9f6 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -43,6 +43,7 @@
43#include <linux/random.h> 43#include <linux/random.h>
44#include <linux/scatterlist.h> 44#include <linux/scatterlist.h>
45#include <crypto/hash.h> 45#include <crypto/hash.h>
46#include <crypto/skcipher.h>
46#include <linux/module.h> 47#include <linux/module.h>
47#include <linux/crypto.h> 48#include <linux/crypto.h>
48#include <linux/slab.h> 49#include <linux/slab.h>
@@ -107,14 +108,25 @@ struct drbg_test_data {
107struct drbg_state { 108struct drbg_state {
108 struct mutex drbg_mutex; /* lock around DRBG */ 109 struct mutex drbg_mutex; /* lock around DRBG */
109 unsigned char *V; /* internal state 10.1.1.1 1a) */ 110 unsigned char *V; /* internal state 10.1.1.1 1a) */
111 unsigned char *Vbuf;
110 /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ 112 /* hash: static value 10.1.1.1 1b) hmac / ctr: key */
111 unsigned char *C; 113 unsigned char *C;
114 unsigned char *Cbuf;
112 /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ 115 /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
113 size_t reseed_ctr; 116 size_t reseed_ctr;
114 size_t reseed_threshold; 117 size_t reseed_threshold;
115 /* some memory the DRBG can use for its operation */ 118 /* some memory the DRBG can use for its operation */
116 unsigned char *scratchpad; 119 unsigned char *scratchpad;
120 unsigned char *scratchpadbuf;
117 void *priv_data; /* Cipher handle */ 121 void *priv_data; /* Cipher handle */
122
123 struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */
124 struct skcipher_request *ctr_req; /* CTR mode request handle */
125 __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */
126 __u8 *ctr_null_value; /* CTR mode aligned zero buf */
127 struct completion ctr_completion; /* CTR mode async handler */
128 int ctr_async_err; /* CTR mode async error */
129
118 bool seeded; /* DRBG fully seeded? */ 130 bool seeded; /* DRBG fully seeded? */
119 bool pr; /* Prediction resistance enabled? */ 131 bool pr; /* Prediction resistance enabled? */
120 struct work_struct seed_work; /* asynchronous seeding support */ 132 struct work_struct seed_work; /* asynchronous seeding support */
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
new file mode 100644
index 000000000000..84bad548d194
--- /dev/null
+++ b/include/crypto/ecdh.h
@@ -0,0 +1,30 @@
1/*
2 * ECDH params to be used with kpp API
3 *
4 * Copyright (c) 2016, Intel Corporation
5 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13#ifndef _CRYPTO_ECDH_
14#define _CRYPTO_ECDH_
15
16/* Curves IDs */
17#define ECC_CURVE_NIST_P192 0x0001
18#define ECC_CURVE_NIST_P256 0x0002
19
20struct ecdh {
21 unsigned short curve_id;
22 char *key;
23 unsigned short key_size;
24};
25
26int crypto_ecdh_key_len(const struct ecdh *params);
27int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p);
28int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p);
29
30#endif
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index da3864991d4c..6ad8e31d3868 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -159,6 +159,27 @@ static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
159 return req ? container_of(req, struct aead_request, base) : NULL; 159 return req ? container_of(req, struct aead_request, base) : NULL;
160} 160}
161 161
162static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
163{
164 return alg->chunksize;
165}
166
167/**
168 * crypto_aead_chunksize() - obtain chunk size
169 * @tfm: cipher handle
170 *
171 * The block size is set to one for ciphers such as CCM. However,
172 * you still need to provide incremental updates in multiples of
173 * the underlying block size as the IV does not have sub-block
174 * granularity. This is known in this API as the chunk size.
175 *
176 * Return: chunk size in bytes
177 */
178static inline unsigned int crypto_aead_chunksize(struct crypto_aead *tfm)
179{
180 return crypto_aead_alg_chunksize(crypto_aead_alg(tfm));
181}
182
162int crypto_register_aead(struct aead_alg *alg); 183int crypto_register_aead(struct aead_alg *alg);
163void crypto_unregister_aead(struct aead_alg *alg); 184void crypto_unregister_aead(struct aead_alg *alg);
164int crypto_register_aeads(struct aead_alg *algs, int count); 185int crypto_register_aeads(struct aead_alg *algs, int count);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 59333635e712..2bcfb931bc5b 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -20,7 +20,7 @@
20struct aead_geniv_ctx { 20struct aead_geniv_ctx {
21 spinlock_t lock; 21 spinlock_t lock;
22 struct crypto_aead *child; 22 struct crypto_aead *child;
23 struct crypto_blkcipher *null; 23 struct crypto_skcipher *sknull;
24 u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); 24 u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
25}; 25};
26 26
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 49dae16f8929..1d4f365d8f03 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -114,14 +114,10 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
114int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); 114int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
115int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); 115int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
116 116
117int shash_ahash_mcryptd_update(struct ahash_request *req, 117int ahash_mcryptd_update(struct ahash_request *desc);
118 struct shash_desc *desc); 118int ahash_mcryptd_final(struct ahash_request *desc);
119int shash_ahash_mcryptd_final(struct ahash_request *req, 119int ahash_mcryptd_finup(struct ahash_request *desc);
120 struct shash_desc *desc); 120int ahash_mcryptd_digest(struct ahash_request *desc);
121int shash_ahash_mcryptd_finup(struct ahash_request *req,
122 struct shash_desc *desc);
123int shash_ahash_mcryptd_digest(struct ahash_request *req,
124 struct shash_desc *desc);
125 121
126int crypto_init_shash_ops_async(struct crypto_tfm *tfm); 122int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
127 123
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
new file mode 100644
index 000000000000..ad3acf3649be
--- /dev/null
+++ b/include/crypto/internal/kpp.h
@@ -0,0 +1,64 @@
1/*
2 * Key-agreement Protocol Primitives (KPP)
3 *
4 * Copyright (c) 2016, Intel Corporation
5 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13#ifndef _CRYPTO_KPP_INT_H
14#define _CRYPTO_KPP_INT_H
15#include <crypto/kpp.h>
16#include <crypto/algapi.h>
17
18/*
19 * Transform internal helpers.
20 */
21static inline void *kpp_request_ctx(struct kpp_request *req)
22{
23 return req->__ctx;
24}
25
26static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
27{
28 return tfm->base.__crt_ctx;
29}
30
31static inline void kpp_request_complete(struct kpp_request *req, int err)
32{
33 req->base.complete(&req->base, err);
34}
35
36static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
37{
38 return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
39}
40
41/**
42 * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
43 *
44 * Function registers an implementation of a key-agreement protocol primitive
45 * algorithm
46 *
47 * @alg: algorithm definition
48 *
49 * Return: zero on success; error code in case of error
50 */
51int crypto_register_kpp(struct kpp_alg *alg);
52
53/**
54 * crypto_unregister_kpp() -- Unregister key-agreement protocol primitive
55 * algorithm
56 *
57 * Function unregisters an implementation of a key-agreement protocol primitive
58 * algorithm
59 *
60 * @alg: algorithm definition
61 */
62void crypto_unregister_kpp(struct kpp_alg *alg);
63
64#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
index c7585bdecbc2..9e8f1590de98 100644
--- a/include/crypto/internal/rsa.h
+++ b/include/crypto/internal/rsa.h
@@ -12,12 +12,44 @@
12 */ 12 */
13#ifndef _RSA_HELPER_ 13#ifndef _RSA_HELPER_
14#define _RSA_HELPER_ 14#define _RSA_HELPER_
15#include <linux/mpi.h> 15#include <linux/types.h>
16 16
17/**
18 * rsa_key - RSA key structure
19 * @n : RSA modulus raw byte stream
20 * @e : RSA public exponent raw byte stream
21 * @d : RSA private exponent raw byte stream
22 * @p : RSA prime factor p of n raw byte stream
23 * @q : RSA prime factor q of n raw byte stream
24 * @dp : RSA exponent d mod (p - 1) raw byte stream
25 * @dq : RSA exponent d mod (q - 1) raw byte stream
26 * @qinv : RSA CRT coefficient q^(-1) mod p raw byte stream
27 * @n_sz : length in bytes of RSA modulus n
28 * @e_sz : length in bytes of RSA public exponent
29 * @d_sz : length in bytes of RSA private exponent
30 * @p_sz : length in bytes of p field
31 * @q_sz : length in bytes of q field
32 * @dp_sz : length in bytes of dp field
33 * @dq_sz : length in bytes of dq field
34 * @qinv_sz : length in bytes of qinv field
35 */
17struct rsa_key { 36struct rsa_key {
18 MPI n; 37 const u8 *n;
19 MPI e; 38 const u8 *e;
20 MPI d; 39 const u8 *d;
40 const u8 *p;
41 const u8 *q;
42 const u8 *dp;
43 const u8 *dq;
44 const u8 *qinv;
45 size_t n_sz;
46 size_t e_sz;
47 size_t d_sz;
48 size_t p_sz;
49 size_t q_sz;
50 size_t dp_sz;
51 size_t dq_sz;
52 size_t qinv_sz;
21}; 53};
22 54
23int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, 55int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
@@ -26,7 +58,5 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
26int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, 58int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
27 unsigned int key_len); 59 unsigned int key_len);
28 60
29void rsa_free_key(struct rsa_key *rsa_key);
30
31extern struct crypto_template rsa_pkcs1pad_tmpl; 61extern struct crypto_template rsa_pkcs1pad_tmpl;
32#endif 62#endif
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 2cf7a61ece59..a21a95e1a375 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -19,12 +19,46 @@
19 19
20struct rtattr; 20struct rtattr;
21 21
22struct skcipher_instance {
23 void (*free)(struct skcipher_instance *inst);
24 union {
25 struct {
26 char head[offsetof(struct skcipher_alg, base)];
27 struct crypto_instance base;
28 } s;
29 struct skcipher_alg alg;
30 };
31};
32
22struct crypto_skcipher_spawn { 33struct crypto_skcipher_spawn {
23 struct crypto_spawn base; 34 struct crypto_spawn base;
24}; 35};
25 36
26extern const struct crypto_type crypto_givcipher_type; 37extern const struct crypto_type crypto_givcipher_type;
27 38
39static inline struct crypto_instance *skcipher_crypto_instance(
40 struct skcipher_instance *inst)
41{
42 return &inst->s.base;
43}
44
45static inline struct skcipher_instance *skcipher_alg_instance(
46 struct crypto_skcipher *skcipher)
47{
48 return container_of(crypto_skcipher_alg(skcipher),
49 struct skcipher_instance, alg);
50}
51
52static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
53{
54 return crypto_instance_ctx(skcipher_crypto_instance(inst));
55}
56
57static inline void skcipher_request_complete(struct skcipher_request *req, int err)
58{
59 req->base.complete(&req->base, err);
60}
61
28static inline void crypto_set_skcipher_spawn( 62static inline void crypto_set_skcipher_spawn(
29 struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst) 63 struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
30{ 64{
@@ -34,6 +68,12 @@ static inline void crypto_set_skcipher_spawn(
34int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, 68int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
35 u32 type, u32 mask); 69 u32 type, u32 mask);
36 70
71static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn,
72 const char *name, u32 type, u32 mask)
73{
74 return crypto_grab_skcipher(spawn, name, type, mask);
75}
76
37struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask); 77struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
38 78
39static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn) 79static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
@@ -41,54 +81,42 @@ static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
41 crypto_drop_spawn(&spawn->base); 81 crypto_drop_spawn(&spawn->base);
42} 82}
43 83
44static inline struct crypto_alg *crypto_skcipher_spawn_alg( 84static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
45 struct crypto_skcipher_spawn *spawn) 85 struct crypto_skcipher_spawn *spawn)
46{ 86{
47 return spawn->base.alg; 87 return container_of(spawn->base.alg, struct skcipher_alg, base);
48} 88}
49 89
50static inline struct crypto_ablkcipher *crypto_spawn_skcipher( 90static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
51 struct crypto_skcipher_spawn *spawn) 91 struct crypto_skcipher_spawn *spawn)
52{ 92{
53 return __crypto_ablkcipher_cast( 93 return crypto_skcipher_spawn_alg(spawn);
54 crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0),
55 crypto_skcipher_mask(0)));
56} 94}
57 95
58int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req); 96static inline struct crypto_skcipher *crypto_spawn_skcipher(
59int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req); 97 struct crypto_skcipher_spawn *spawn)
60const char *crypto_default_geniv(const struct crypto_alg *alg);
61
62struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
63 struct rtattr **tb, u32 type,
64 u32 mask);
65void skcipher_geniv_free(struct crypto_instance *inst);
66int skcipher_geniv_init(struct crypto_tfm *tfm);
67void skcipher_geniv_exit(struct crypto_tfm *tfm);
68
69static inline struct crypto_ablkcipher *skcipher_geniv_cipher(
70 struct crypto_ablkcipher *geniv)
71{ 98{
72 return crypto_ablkcipher_crt(geniv)->base; 99 return crypto_spawn_tfm2(&spawn->base);
73} 100}
74 101
75static inline int skcipher_enqueue_givcrypt( 102static inline struct crypto_skcipher *crypto_spawn_skcipher2(
76 struct crypto_queue *queue, struct skcipher_givcrypt_request *request) 103 struct crypto_skcipher_spawn *spawn)
77{ 104{
78 return ablkcipher_enqueue_request(queue, &request->creq); 105 return crypto_spawn_skcipher(spawn);
79} 106}
80 107
81static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( 108static inline void crypto_skcipher_set_reqsize(
82 struct crypto_queue *queue) 109 struct crypto_skcipher *skcipher, unsigned int reqsize)
83{ 110{
84 return skcipher_givcrypt_cast(crypto_dequeue_request(queue)); 111 skcipher->reqsize = reqsize;
85} 112}
86 113
87static inline void *skcipher_givcrypt_reqctx( 114int crypto_register_skcipher(struct skcipher_alg *alg);
88 struct skcipher_givcrypt_request *req) 115void crypto_unregister_skcipher(struct skcipher_alg *alg);
89{ 116int crypto_register_skciphers(struct skcipher_alg *algs, int count);
90 return ablkcipher_request_ctx(&req->creq); 117void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
91} 118int skcipher_register_instance(struct crypto_template *tmpl,
119 struct skcipher_instance *inst);
92 120
93static inline void ablkcipher_request_complete(struct ablkcipher_request *req, 121static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
94 int err) 122 int err)
@@ -96,12 +124,6 @@ static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
96 req->base.complete(&req->base, err); 124 req->base.complete(&req->base, err);
97} 125}
98 126
99static inline void skcipher_givcrypt_complete(
100 struct skcipher_givcrypt_request *req, int err)
101{
102 ablkcipher_request_complete(&req->creq, err);
103}
104
105static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) 127static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
106{ 128{
107 return req->base.flags; 129 return req->base.flags;
@@ -122,5 +144,31 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req)
122 return req->base.flags; 144 return req->base.flags;
123} 145}
124 146
147static inline unsigned int crypto_skcipher_alg_min_keysize(
148 struct skcipher_alg *alg)
149{
150 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
151 CRYPTO_ALG_TYPE_BLKCIPHER)
152 return alg->base.cra_blkcipher.min_keysize;
153
154 if (alg->base.cra_ablkcipher.encrypt)
155 return alg->base.cra_ablkcipher.min_keysize;
156
157 return alg->min_keysize;
158}
159
160static inline unsigned int crypto_skcipher_alg_max_keysize(
161 struct skcipher_alg *alg)
162{
163 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
164 CRYPTO_ALG_TYPE_BLKCIPHER)
165 return alg->base.cra_blkcipher.max_keysize;
166
167 if (alg->base.cra_ablkcipher.encrypt)
168 return alg->base.cra_ablkcipher.max_keysize;
169
170 return alg->max_keysize;
171}
172
125#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */ 173#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
126 174
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
new file mode 100644
index 000000000000..30791f75c180
--- /dev/null
+++ b/include/crypto/kpp.h
@@ -0,0 +1,330 @@
1/*
2 * Key-agreement Protocol Primitives (KPP)
3 *
4 * Copyright (c) 2016, Intel Corporation
5 * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 */
13
14#ifndef _CRYPTO_KPP_
15#define _CRYPTO_KPP_
16#include <linux/crypto.h>
17
18/**
19 * struct kpp_request
20 *
21 * @base: Common attributes for async crypto requests
22 * @src: Source data
23 * @dst: Destination data
24 * @src_len: Size of the input buffer
25 * @dst_len: Size of the output buffer. It needs to be at least
26 * as big as the expected result depending on the operation
27 * After operation it will be updated with the actual size of the
28 * result. In case of error where the dst sgl size was insufficient,
29 * it will be updated to the size required for the operation.
30 * @__ctx: Start of private context data
31 */
32struct kpp_request {
33 struct crypto_async_request base;
34 struct scatterlist *src;
35 struct scatterlist *dst;
36 unsigned int src_len;
37 unsigned int dst_len;
38 void *__ctx[] CRYPTO_MINALIGN_ATTR;
39};
40
41/**
42 * struct crypto_kpp - user-instantiated object which encapsulate
43 * algorithms and core processing logic
44 *
45 * @base: Common crypto API algorithm data structure
46 */
47struct crypto_kpp {
48 struct crypto_tfm base;
49};
50
51/**
52 * struct kpp_alg - generic key-agreement protocol primitives
53 *
54 * @set_secret: Function invokes the protocol specific function to
55 * store the secret private key along with parameters.
56 * The implementation knows how to decode thie buffer
57 * @generate_public_key: Function generate the public key to be sent to the
58 * counterpart. In case of error, where output is not big
59 * enough req->dst_len will be updated to the size
60 * required
61 * @compute_shared_secret: Function compute the shared secret as defined by
62 * the algorithm. The result is given back to the user.
63 * In case of error, where output is not big enough,
64 * req->dst_len will be updated to the size required
65 * @max_size: Function returns the size of the output buffer
66 * @init: Initialize the object. This is called only once at
67 * instantiation time. In case the cryptographic hardware
68 * needs to be initialized. Software fallback should be
69 * put in place here.
70 * @exit: Undo everything @init did.
71 *
72 * @reqsize: Request context size required by algorithm
73 * implementation
74 * @base Common crypto API algorithm data structure
75 */
76struct kpp_alg {
77 int (*set_secret)(struct crypto_kpp *tfm, void *buffer,
78 unsigned int len);
79 int (*generate_public_key)(struct kpp_request *req);
80 int (*compute_shared_secret)(struct kpp_request *req);
81
82 int (*max_size)(struct crypto_kpp *tfm);
83
84 int (*init)(struct crypto_kpp *tfm);
85 void (*exit)(struct crypto_kpp *tfm);
86
87 unsigned int reqsize;
88 struct crypto_alg base;
89};
90
91/**
92 * DOC: Generic Key-agreement Protocol Primitevs API
93 *
94 * The KPP API is used with the algorithm type
95 * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto)
96 */
97
98/**
99 * crypto_alloc_kpp() - allocate KPP tfm handle
100 * @alg_name: is the name of the kpp algorithm (e.g. "dh", "ecdh")
101 * @type: specifies the type of the algorithm
102 * @mask: specifies the mask for the algorithm
103 *
104 * Allocate a handle for kpp algorithm. The returned struct crypto_kpp
105 * is requeried for any following API invocation
106 *
107 * Return: allocated handle in case of success; IS_ERR() is true in case of
108 * an error, PTR_ERR() returns the error code.
109 */
110struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask);
111
112static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm)
113{
114 return &tfm->base;
115}
116
117static inline struct kpp_alg *__crypto_kpp_alg(struct crypto_alg *alg)
118{
119 return container_of(alg, struct kpp_alg, base);
120}
121
122static inline struct crypto_kpp *__crypto_kpp_tfm(struct crypto_tfm *tfm)
123{
124 return container_of(tfm, struct crypto_kpp, base);
125}
126
127static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm)
128{
129 return __crypto_kpp_alg(crypto_kpp_tfm(tfm)->__crt_alg);
130}
131
132static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm)
133{
134 return crypto_kpp_alg(tfm)->reqsize;
135}
136
137static inline void kpp_request_set_tfm(struct kpp_request *req,
138 struct crypto_kpp *tfm)
139{
140 req->base.tfm = crypto_kpp_tfm(tfm);
141}
142
143static inline struct crypto_kpp *crypto_kpp_reqtfm(struct kpp_request *req)
144{
145 return __crypto_kpp_tfm(req->base.tfm);
146}
147
148/**
149 * crypto_free_kpp() - free KPP tfm handle
150 *
151 * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
152 */
153static inline void crypto_free_kpp(struct crypto_kpp *tfm)
154{
155 crypto_destroy_tfm(tfm, crypto_kpp_tfm(tfm));
156}
157
158/**
159 * kpp_request_alloc() - allocates kpp request
160 *
161 * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
162 * @gfp: allocation flags
163 *
164 * Return: allocated handle in case of success or NULL in case of an error.
165 */
166static inline struct kpp_request *kpp_request_alloc(struct crypto_kpp *tfm,
167 gfp_t gfp)
168{
169 struct kpp_request *req;
170
171 req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp);
172 if (likely(req))
173 kpp_request_set_tfm(req, tfm);
174
175 return req;
176}
177
178/**
179 * kpp_request_free() - zeroize and free kpp request
180 *
181 * @req: request to free
182 */
183static inline void kpp_request_free(struct kpp_request *req)
184{
185 kzfree(req);
186}
187
188/**
189 * kpp_request_set_callback() - Sets an asynchronous callback.
190 *
191 * Callback will be called when an asynchronous operation on a given
192 * request is finished.
193 *
194 * @req: request that the callback will be set for
195 * @flgs: specify for instance if the operation may backlog
196 * @cmpl: callback which will be called
197 * @data: private data used by the caller
198 */
199static inline void kpp_request_set_callback(struct kpp_request *req,
200 u32 flgs,
201 crypto_completion_t cmpl,
202 void *data)
203{
204 req->base.complete = cmpl;
205 req->base.data = data;
206 req->base.flags = flgs;
207}
208
209/**
210 * kpp_request_set_input() - Sets input buffer
211 *
212 * Sets parameters required by generate_public_key
213 *
214 * @req: kpp request
215 * @input: ptr to input scatter list
216 * @input_len: size of the input scatter list
217 */
218static inline void kpp_request_set_input(struct kpp_request *req,
219 struct scatterlist *input,
220 unsigned int input_len)
221{
222 req->src = input;
223 req->src_len = input_len;
224}
225
226/**
227 * kpp_request_set_output() - Sets output buffer
228 *
229 * Sets parameters required by kpp operation
230 *
231 * @req: kpp request
232 * @output: ptr to output scatter list
233 * @output_len: size of the output scatter list
234 */
235static inline void kpp_request_set_output(struct kpp_request *req,
236 struct scatterlist *output,
237 unsigned int output_len)
238{
239 req->dst = output;
240 req->dst_len = output_len;
241}
242
243enum {
244 CRYPTO_KPP_SECRET_TYPE_UNKNOWN,
245 CRYPTO_KPP_SECRET_TYPE_DH,
246 CRYPTO_KPP_SECRET_TYPE_ECDH,
247};
248
249/**
250 * struct kpp_secret - small header for packing secret buffer
251 *
252 * @type: define type of secret. Each kpp type will define its own
253 * @len: specify the len of the secret, include the header, that
254 * follows the struct
255 */
256struct kpp_secret {
257 unsigned short type;
258 unsigned short len;
259};
260
261/**
262 * crypto_kpp_set_secret() - Invoke kpp operation
263 *
264 * Function invokes the specific kpp operation for a given alg.
265 *
266 * @tfm: tfm handle
267 *
268 * Return: zero on success; error code in case of error
269 */
270static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer,
271 unsigned int len)
272{
273 struct kpp_alg *alg = crypto_kpp_alg(tfm);
274
275 return alg->set_secret(tfm, buffer, len);
276}
277
278/**
279 * crypto_kpp_generate_public_key() - Invoke kpp operation
280 *
281 * Function invokes the specific kpp operation for generating the public part
282 * for a given kpp algorithm
283 *
284 * @req: kpp key request
285 *
286 * Return: zero on success; error code in case of error
287 */
288static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
289{
290 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
291 struct kpp_alg *alg = crypto_kpp_alg(tfm);
292
293 return alg->generate_public_key(req);
294}
295
296/**
297 * crypto_kpp_compute_shared_secret() - Invoke kpp operation
298 *
299 * Function invokes the specific kpp operation for computing the shared secret
300 * for a given kpp algorithm.
301 *
302 * @req: kpp key request
303 *
304 * Return: zero on success; error code in case of error
305 */
306static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
307{
308 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
309 struct kpp_alg *alg = crypto_kpp_alg(tfm);
310
311 return alg->compute_shared_secret(req);
312}
313
314/**
315 * crypto_kpp_maxsize() - Get len for output buffer
316 *
317 * Function returns the output buffer size required
318 *
319 * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
320 *
321 * Return: minimum len for output buffer or error code if key hasn't been set
322 */
323static inline int crypto_kpp_maxsize(struct crypto_kpp *tfm)
324{
325 struct kpp_alg *alg = crypto_kpp_alg(tfm);
326
327 return alg->max_size(tfm);
328}
329
330#endif
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
index c23ee1f7ee80..4a53c0d38cd2 100644
--- a/include/crypto/mcryptd.h
+++ b/include/crypto/mcryptd.h
@@ -39,7 +39,7 @@ struct mcryptd_instance_ctx {
39}; 39};
40 40
41struct mcryptd_hash_ctx { 41struct mcryptd_hash_ctx {
42 struct crypto_shash *child; 42 struct crypto_ahash *child;
43 struct mcryptd_alg_state *alg_state; 43 struct mcryptd_alg_state *alg_state;
44}; 44};
45 45
@@ -59,13 +59,13 @@ struct mcryptd_hash_request_ctx {
59 struct crypto_hash_walk walk; 59 struct crypto_hash_walk walk;
60 u8 *out; 60 u8 *out;
61 int flag; 61 int flag;
62 struct shash_desc desc; 62 struct ahash_request areq;
63}; 63};
64 64
65struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, 65struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
66 u32 type, u32 mask); 66 u32 type, u32 mask);
67struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); 67struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
68struct shash_desc *mcryptd_shash_desc(struct ahash_request *req); 68struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
69void mcryptd_free_ahash(struct mcryptd_ahash *tfm); 69void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
70void mcryptd_flusher(struct work_struct *work); 70void mcryptd_flusher(struct work_struct *work);
71 71
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 06dc30d9f56e..3f0c59fb0a61 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -8,7 +8,17 @@
8#define NULL_DIGEST_SIZE 0 8#define NULL_DIGEST_SIZE 0
9#define NULL_IV_SIZE 0 9#define NULL_IV_SIZE 0
10 10
11struct crypto_blkcipher *crypto_get_default_null_skcipher(void); 11struct crypto_skcipher *crypto_get_default_null_skcipher(void);
12void crypto_put_default_null_skcipher(void); 12void crypto_put_default_null_skcipher(void);
13 13
14static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void)
15{
16 return crypto_get_default_null_skcipher();
17}
18
19static inline void crypto_put_default_null_skcipher2(void)
20{
21 crypto_put_default_null_skcipher();
22}
23
14#endif 24#endif
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 35f99b68d037..880e6be9e95e 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -16,14 +16,10 @@
16#ifndef _CRYPTO_SCATTERWALK_H 16#ifndef _CRYPTO_SCATTERWALK_H
17#define _CRYPTO_SCATTERWALK_H 17#define _CRYPTO_SCATTERWALK_H
18 18
19#include <asm/kmap_types.h>
20#include <crypto/algapi.h> 19#include <crypto/algapi.h>
21#include <linux/hardirq.h>
22#include <linux/highmem.h> 20#include <linux/highmem.h>
23#include <linux/kernel.h> 21#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
26#include <linux/sched.h>
27 23
28static inline void scatterwalk_crypto_chain(struct scatterlist *head, 24static inline void scatterwalk_crypto_chain(struct scatterlist *head,
29 struct scatterlist *sg, 25 struct scatterlist *sg,
@@ -83,17 +79,53 @@ static inline void scatterwalk_unmap(void *vaddr)
83 kunmap_atomic(vaddr); 79 kunmap_atomic(vaddr);
84} 80}
85 81
86void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); 82static inline void scatterwalk_start(struct scatter_walk *walk,
83 struct scatterlist *sg)
84{
85 walk->sg = sg;
86 walk->offset = sg->offset;
87}
88
89static inline void *scatterwalk_map(struct scatter_walk *walk)
90{
91 return kmap_atomic(scatterwalk_page(walk)) +
92 offset_in_page(walk->offset);
93}
94
95static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
96 unsigned int more)
97{
98 if (out) {
99 struct page *page;
100
101 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
102 /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
103 * PageSlab cannot be optimised away per se due to
104 * use of volatile pointer.
105 */
106 if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
107 flush_dcache_page(page);
108 }
109
110 if (more && walk->offset >= walk->sg->offset + walk->sg->length)
111 scatterwalk_start(walk, sg_next(walk->sg));
112}
113
114static inline void scatterwalk_done(struct scatter_walk *walk, int out,
115 int more)
116{
117 if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
118 !(walk->offset & (PAGE_SIZE - 1)))
119 scatterwalk_pagedone(walk, out, more);
120}
121
87void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 122void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
88 size_t nbytes, int out); 123 size_t nbytes, int out);
89void *scatterwalk_map(struct scatter_walk *walk); 124void *scatterwalk_map(struct scatter_walk *walk);
90void scatterwalk_done(struct scatter_walk *walk, int out, int more);
91 125
92void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, 126void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
93 unsigned int start, unsigned int nbytes, int out); 127 unsigned int start, unsigned int nbytes, int out);
94 128
95int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes);
96
97struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], 129struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
98 struct scatterlist *src, 130 struct scatterlist *src,
99 unsigned int len); 131 unsigned int len);
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
new file mode 100644
index 000000000000..f4c9f68f5ffe
--- /dev/null
+++ b/include/crypto/sha3.h
@@ -0,0 +1,29 @@
1/*
2 * Common values for SHA-3 algorithms
3 */
4#ifndef __CRYPTO_SHA3_H__
5#define __CRYPTO_SHA3_H__
6
7#define SHA3_224_DIGEST_SIZE (224 / 8)
8#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE)
9
10#define SHA3_256_DIGEST_SIZE (256 / 8)
11#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE)
12
13#define SHA3_384_DIGEST_SIZE (384 / 8)
14#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE)
15
16#define SHA3_512_DIGEST_SIZE (512 / 8)
17#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE)
18
19struct sha3_state {
20 u64 st[25];
21 unsigned int md_len;
22 unsigned int rsiz;
23 unsigned int rsizw;
24
25 unsigned int partial;
26 u8 buf[SHA3_224_BLOCK_SIZE];
27};
28
29#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 0f987f50bb52..cc4d98a7892e 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -65,86 +65,80 @@ struct crypto_skcipher {
65 struct crypto_tfm base; 65 struct crypto_tfm base;
66}; 66};
67 67
68#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \ 68/**
69 char __##name##_desc[sizeof(struct skcipher_request) + \ 69 * struct skcipher_alg - symmetric key cipher definition
70 crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \ 70 * @min_keysize: Minimum key size supported by the transformation. This is the
71 struct skcipher_request *name = (void *)__##name##_desc 71 * smallest key length supported by this transformation algorithm.
72 72 * This must be set to one of the pre-defined values as this is
73static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm( 73 * not hardware specific. Possible values for this field can be
74 struct skcipher_givcrypt_request *req) 74 * found via git grep "_MIN_KEY_SIZE" include/crypto/
75{ 75 * @max_keysize: Maximum key size supported by the transformation. This is the
76 return crypto_ablkcipher_reqtfm(&req->creq); 76 * largest key length supported by this transformation algorithm.
77} 77 * This must be set to one of the pre-defined values as this is
78 * not hardware specific. Possible values for this field can be
79 * found via git grep "_MAX_KEY_SIZE" include/crypto/
80 * @setkey: Set key for the transformation. This function is used to either
81 * program a supplied key into the hardware or store the key in the
82 * transformation context for programming it later. Note that this
83 * function does modify the transformation context. This function can
84 * be called multiple times during the existence of the transformation
85 * object, so one must make sure the key is properly reprogrammed into
86 * the hardware. This function is also responsible for checking the key
87 * length for validity. In case a software fallback was put in place in
88 * the @cra_init call, this function might need to use the fallback if
89 * the algorithm doesn't support all of the key sizes.
90 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
91 * the supplied scatterlist containing the blocks of data. The crypto
92 * API consumer is responsible for aligning the entries of the
93 * scatterlist properly and making sure the chunks are correctly
94 * sized. In case a software fallback was put in place in the
95 * @cra_init call, this function might need to use the fallback if
96 * the algorithm doesn't support all of the key sizes. In case the
97 * key was stored in transformation context, the key might need to be
98 * re-programmed into the hardware in this function. This function
99 * shall not modify the transformation context, as this function may
100 * be called in parallel with the same transformation object.
101 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
102 * and the conditions are exactly the same.
103 * @init: Initialize the cryptographic transformation object. This function
104 * is used to initialize the cryptographic transformation object.
105 * This function is called only once at the instantiation time, right
106 * after the transformation context was allocated. In case the
107 * cryptographic hardware has some special requirements which need to
108 * be handled by software, this function shall check for the precise
109 * requirement of the transformation and put any software fallbacks
110 * in place.
111 * @exit: Deinitialize the cryptographic transformation object. This is a
112 * counterpart to @init, used to remove various changes set in
113 * @init.
114 * @ivsize: IV size applicable for transformation. The consumer must provide an
115 * IV of exactly that size to perform the encrypt or decrypt operation.
116 * @chunksize: Equal to the block size except for stream ciphers such as
117 * CTR where it is set to the underlying block size.
118 * @base: Definition of a generic crypto algorithm.
119 *
120 * All fields except @ivsize are mandatory and must be filled.
121 */
122struct skcipher_alg {
123 int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
124 unsigned int keylen);
125 int (*encrypt)(struct skcipher_request *req);
126 int (*decrypt)(struct skcipher_request *req);
127 int (*init)(struct crypto_skcipher *tfm);
128 void (*exit)(struct crypto_skcipher *tfm);
78 129
79static inline int crypto_skcipher_givencrypt( 130 unsigned int min_keysize;
80 struct skcipher_givcrypt_request *req) 131 unsigned int max_keysize;
81{ 132 unsigned int ivsize;
82 struct ablkcipher_tfm *crt = 133 unsigned int chunksize;
83 crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
84 return crt->givencrypt(req);
85};
86 134
87static inline int crypto_skcipher_givdecrypt( 135 struct crypto_alg base;
88 struct skcipher_givcrypt_request *req)
89{
90 struct ablkcipher_tfm *crt =
91 crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
92 return crt->givdecrypt(req);
93}; 136};
94 137
95static inline void skcipher_givcrypt_set_tfm( 138#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
96 struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm) 139 char __##name##_desc[sizeof(struct skcipher_request) + \
97{ 140 crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
98 req->creq.base.tfm = crypto_ablkcipher_tfm(tfm); 141 struct skcipher_request *name = (void *)__##name##_desc
99}
100
101static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast(
102 struct crypto_async_request *req)
103{
104 return container_of(ablkcipher_request_cast(req),
105 struct skcipher_givcrypt_request, creq);
106}
107
108static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc(
109 struct crypto_ablkcipher *tfm, gfp_t gfp)
110{
111 struct skcipher_givcrypt_request *req;
112
113 req = kmalloc(sizeof(struct skcipher_givcrypt_request) +
114 crypto_ablkcipher_reqsize(tfm), gfp);
115
116 if (likely(req))
117 skcipher_givcrypt_set_tfm(req, tfm);
118
119 return req;
120}
121
122static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req)
123{
124 kfree(req);
125}
126
127static inline void skcipher_givcrypt_set_callback(
128 struct skcipher_givcrypt_request *req, u32 flags,
129 crypto_completion_t compl, void *data)
130{
131 ablkcipher_request_set_callback(&req->creq, flags, compl, data);
132}
133
134static inline void skcipher_givcrypt_set_crypt(
135 struct skcipher_givcrypt_request *req,
136 struct scatterlist *src, struct scatterlist *dst,
137 unsigned int nbytes, void *iv)
138{
139 ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv);
140}
141
142static inline void skcipher_givcrypt_set_giv(
143 struct skcipher_givcrypt_request *req, u8 *giv, u64 seq)
144{
145 req->giv = giv;
146 req->seq = seq;
147}
148 142
149/** 143/**
150 * DOC: Symmetric Key Cipher API 144 * DOC: Symmetric Key Cipher API
@@ -231,12 +225,43 @@ static inline int crypto_has_skcipher(const char *alg_name, u32 type,
231 crypto_skcipher_mask(mask)); 225 crypto_skcipher_mask(mask));
232} 226}
233 227
228/**
229 * crypto_has_skcipher2() - Search for the availability of an skcipher.
230 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
231 * skcipher
232 * @type: specifies the type of the skcipher
233 * @mask: specifies the mask for the skcipher
234 *
235 * Return: true when the skcipher is known to the kernel crypto API; false
236 * otherwise
237 */
238int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask);
239
234static inline const char *crypto_skcipher_driver_name( 240static inline const char *crypto_skcipher_driver_name(
235 struct crypto_skcipher *tfm) 241 struct crypto_skcipher *tfm)
236{ 242{
237 return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); 243 return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
238} 244}
239 245
246static inline struct skcipher_alg *crypto_skcipher_alg(
247 struct crypto_skcipher *tfm)
248{
249 return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
250 struct skcipher_alg, base);
251}
252
253static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
254{
255 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
256 CRYPTO_ALG_TYPE_BLKCIPHER)
257 return alg->base.cra_blkcipher.ivsize;
258
259 if (alg->base.cra_ablkcipher.encrypt)
260 return alg->base.cra_ablkcipher.ivsize;
261
262 return alg->ivsize;
263}
264
240/** 265/**
241 * crypto_skcipher_ivsize() - obtain IV size 266 * crypto_skcipher_ivsize() - obtain IV size
242 * @tfm: cipher handle 267 * @tfm: cipher handle
@@ -251,6 +276,36 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
251 return tfm->ivsize; 276 return tfm->ivsize;
252} 277}
253 278
279static inline unsigned int crypto_skcipher_alg_chunksize(
280 struct skcipher_alg *alg)
281{
282 if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
283 CRYPTO_ALG_TYPE_BLKCIPHER)
284 return alg->base.cra_blocksize;
285
286 if (alg->base.cra_ablkcipher.encrypt)
287 return alg->base.cra_blocksize;
288
289 return alg->chunksize;
290}
291
292/**
293 * crypto_skcipher_chunksize() - obtain chunk size
294 * @tfm: cipher handle
295 *
296 * The block size is set to one for ciphers such as CTR. However,
297 * you still need to provide incremental updates in multiples of
298 * the underlying block size as the IV does not have sub-block
299 * granularity. This is known in this API as the chunk size.
300 *
301 * Return: chunk size in bytes
302 */
303static inline unsigned int crypto_skcipher_chunksize(
304 struct crypto_skcipher *tfm)
305{
306 return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
307}
308
254/** 309/**
255 * crypto_skcipher_blocksize() - obtain block size of cipher 310 * crypto_skcipher_blocksize() - obtain block size of cipher
256 * @tfm: cipher handle 311 * @tfm: cipher handle
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6e28c895c376..7cee5551625b 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -47,16 +47,18 @@
47#define CRYPTO_ALG_TYPE_AEAD 0x00000003 47#define CRYPTO_ALG_TYPE_AEAD 0x00000003
48#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 48#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
49#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 49#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
50#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
50#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 51#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
51#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 52#define CRYPTO_ALG_TYPE_KPP 0x00000008
52#define CRYPTO_ALG_TYPE_HASH 0x00000008
53#define CRYPTO_ALG_TYPE_SHASH 0x00000009
54#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
55#define CRYPTO_ALG_TYPE_RNG 0x0000000c 53#define CRYPTO_ALG_TYPE_RNG 0x0000000c
56#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d 54#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
55#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
56#define CRYPTO_ALG_TYPE_HASH 0x0000000e
57#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
58#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
57 59
58#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 60#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
59#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c 61#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
60#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c 62#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
61 63
62#define CRYPTO_ALG_LARVAL 0x00000010 64#define CRYPTO_ALG_LARVAL 0x00000010
@@ -486,8 +488,6 @@ struct ablkcipher_tfm {
486 unsigned int keylen); 488 unsigned int keylen);
487 int (*encrypt)(struct ablkcipher_request *req); 489 int (*encrypt)(struct ablkcipher_request *req);
488 int (*decrypt)(struct ablkcipher_request *req); 490 int (*decrypt)(struct ablkcipher_request *req);
489 int (*givencrypt)(struct skcipher_givcrypt_request *req);
490 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
491 491
492 struct crypto_ablkcipher *base; 492 struct crypto_ablkcipher *base;
493 493
@@ -712,23 +712,6 @@ static inline u32 crypto_skcipher_mask(u32 mask)
712 * state information is unused by the kernel crypto API. 712 * state information is unused by the kernel crypto API.
713 */ 713 */
714 714
715/**
716 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
717 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
718 * ablkcipher cipher
719 * @type: specifies the type of the cipher
720 * @mask: specifies the mask for the cipher
721 *
722 * Allocate a cipher handle for an ablkcipher. The returned struct
723 * crypto_ablkcipher is the cipher handle that is required for any subsequent
724 * API invocation for that ablkcipher.
725 *
726 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
727 * of an error, PTR_ERR() returns the error code.
728 */
729struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
730 u32 type, u32 mask);
731
732static inline struct crypto_tfm *crypto_ablkcipher_tfm( 715static inline struct crypto_tfm *crypto_ablkcipher_tfm(
733 struct crypto_ablkcipher *tfm) 716 struct crypto_ablkcipher *tfm)
734{ 717{
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 3a5abe95affd..1cc5ffb769af 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -80,8 +80,7 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
80int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, 80int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
81 int *sign); 81 int *sign);
82void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign); 82void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
83int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign); 83int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
84int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned *nbytes,
85 int *sign); 84 int *sign);
86 85
87#define log_mpidump g10_log_mpidump 86#define log_mpidump g10_log_mpidump
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 2e67bb64c1da..79b5ded2001a 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -45,6 +45,7 @@ enum crypto_attr_type_t {
45 CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */ 45 CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */
46 CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */ 46 CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
47 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ 47 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
48 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
48 __CRYPTOCFGA_MAX 49 __CRYPTOCFGA_MAX
49 50
50#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) 51#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -107,5 +108,9 @@ struct crypto_report_akcipher {
107 char type[CRYPTO_MAX_NAME]; 108 char type[CRYPTO_MAX_NAME];
108}; 109};
109 110
111struct crypto_report_kpp {
112 char type[CRYPTO_MAX_NAME];
113};
114
110#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ 115#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
111 sizeof(struct crypto_report_blkcipher)) 116 sizeof(struct crypto_report_blkcipher))
diff --git a/lib/digsig.c b/lib/digsig.c
index 07be6c1ef4e2..55b8b2f41a9e 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -104,21 +104,25 @@ static int digsig_verify_rsa(struct key *key,
104 datap = pkh->mpi; 104 datap = pkh->mpi;
105 endp = ukp->data + ukp->datalen; 105 endp = ukp->data + ukp->datalen;
106 106
107 err = -ENOMEM;
108
109 for (i = 0; i < pkh->nmpi; i++) { 107 for (i = 0; i < pkh->nmpi; i++) {
110 unsigned int remaining = endp - datap; 108 unsigned int remaining = endp - datap;
111 pkey[i] = mpi_read_from_buffer(datap, &remaining); 109 pkey[i] = mpi_read_from_buffer(datap, &remaining);
112 if (!pkey[i]) 110 if (IS_ERR(pkey[i])) {
111 err = PTR_ERR(pkey[i]);
113 goto err; 112 goto err;
113 }
114 datap += remaining; 114 datap += remaining;
115 } 115 }
116 116
117 mblen = mpi_get_nbits(pkey[0]); 117 mblen = mpi_get_nbits(pkey[0]);
118 mlen = DIV_ROUND_UP(mblen, 8); 118 mlen = DIV_ROUND_UP(mblen, 8);
119 119
120 if (mlen == 0) 120 if (mlen == 0) {
121 err = -EINVAL;
121 goto err; 122 goto err;
123 }
124
125 err = -ENOMEM;
122 126
123 out1 = kzalloc(mlen, GFP_KERNEL); 127 out1 = kzalloc(mlen, GFP_KERNEL);
124 if (!out1) 128 if (!out1)
@@ -126,8 +130,10 @@ static int digsig_verify_rsa(struct key *key,
126 130
127 nret = siglen; 131 nret = siglen;
128 in = mpi_read_from_buffer(sig, &nret); 132 in = mpi_read_from_buffer(sig, &nret);
129 if (!in) 133 if (IS_ERR(in)) {
134 err = PTR_ERR(in);
130 goto err; 135 goto err;
136 }
131 137
132 res = mpi_alloc(mpi_get_nlimbs(in) * 2); 138 res = mpi_alloc(mpi_get_nlimbs(in) * 2);
133 if (!res) 139 if (!res)
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 747606f9e4a3..c6272ae2015e 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -21,6 +21,7 @@
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/count_zeros.h> 22#include <linux/count_zeros.h>
23#include <linux/byteorder/generic.h> 23#include <linux/byteorder/generic.h>
24#include <linux/scatterlist.h>
24#include <linux/string.h> 25#include <linux/string.h>
25#include "mpi-internal.h" 26#include "mpi-internal.h"
26 27
@@ -50,9 +51,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes)
50 return NULL; 51 return NULL;
51 } 52 }
52 if (nbytes > 0) 53 if (nbytes > 0)
53 nbits -= count_leading_zeros(buffer[0]); 54 nbits -= count_leading_zeros(buffer[0]) - (BITS_PER_LONG - 8);
54 else
55 nbits = 0;
56 55
57 nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); 56 nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
58 val = mpi_alloc(nlimbs); 57 val = mpi_alloc(nlimbs);
@@ -82,50 +81,30 @@ EXPORT_SYMBOL_GPL(mpi_read_raw_data);
82MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) 81MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
83{ 82{
84 const uint8_t *buffer = xbuffer; 83 const uint8_t *buffer = xbuffer;
85 int i, j; 84 unsigned int nbits, nbytes;
86 unsigned nbits, nbytes, nlimbs, nread = 0; 85 MPI val;
87 mpi_limb_t a;
88 MPI val = NULL;
89 86
90 if (*ret_nread < 2) 87 if (*ret_nread < 2)
91 goto leave; 88 return ERR_PTR(-EINVAL);
92 nbits = buffer[0] << 8 | buffer[1]; 89 nbits = buffer[0] << 8 | buffer[1];
93 90
94 if (nbits > MAX_EXTERN_MPI_BITS) { 91 if (nbits > MAX_EXTERN_MPI_BITS) {
95 pr_info("MPI: mpi too large (%u bits)\n", nbits); 92 pr_info("MPI: mpi too large (%u bits)\n", nbits);
96 goto leave; 93 return ERR_PTR(-EINVAL);
97 } 94 }
98 buffer += 2;
99 nread = 2;
100 95
101 nbytes = DIV_ROUND_UP(nbits, 8); 96 nbytes = DIV_ROUND_UP(nbits, 8);
102 nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); 97 if (nbytes + 2 > *ret_nread) {
103 val = mpi_alloc(nlimbs); 98 pr_info("MPI: mpi larger than buffer nbytes=%u ret_nread=%u\n",
104 if (!val) 99 nbytes, *ret_nread);
105 return NULL; 100 return ERR_PTR(-EINVAL);
106 i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
107 i %= BYTES_PER_MPI_LIMB;
108 val->nbits = nbits;
109 j = val->nlimbs = nlimbs;
110 val->sign = 0;
111 for (; j > 0; j--) {
112 a = 0;
113 for (; i < BYTES_PER_MPI_LIMB; i++) {
114 if (++nread > *ret_nread) {
115 printk
116 ("MPI: mpi larger than buffer nread=%d ret_nread=%d\n",
117 nread, *ret_nread);
118 goto leave;
119 }
120 a <<= 8;
121 a |= *buffer++;
122 }
123 i = 0;
124 val->d[j - 1] = a;
125 } 101 }
126 102
127leave: 103 val = mpi_read_raw_data(buffer + 2, nbytes);
128 *ret_nread = nread; 104 if (!val)
105 return ERR_PTR(-ENOMEM);
106
107 *ret_nread = nbytes + 2;
129 return val; 108 return val;
130} 109}
131EXPORT_SYMBOL_GPL(mpi_read_from_buffer); 110EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
@@ -250,82 +229,6 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
250} 229}
251EXPORT_SYMBOL_GPL(mpi_get_buffer); 230EXPORT_SYMBOL_GPL(mpi_get_buffer);
252 231
253/****************
254 * Use BUFFER to update MPI.
255 */
256int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign)
257{
258 const uint8_t *buffer = xbuffer, *p;
259 mpi_limb_t alimb;
260 int nlimbs;
261 int i;
262
263 nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
264 if (RESIZE_IF_NEEDED(a, nlimbs) < 0)
265 return -ENOMEM;
266 a->sign = sign;
267
268 for (i = 0, p = buffer + nbytes - 1; p >= buffer + BYTES_PER_MPI_LIMB;) {
269#if BYTES_PER_MPI_LIMB == 4
270 alimb = (mpi_limb_t) *p--;
271 alimb |= (mpi_limb_t) *p-- << 8;
272 alimb |= (mpi_limb_t) *p-- << 16;
273 alimb |= (mpi_limb_t) *p-- << 24;
274#elif BYTES_PER_MPI_LIMB == 8
275 alimb = (mpi_limb_t) *p--;
276 alimb |= (mpi_limb_t) *p-- << 8;
277 alimb |= (mpi_limb_t) *p-- << 16;
278 alimb |= (mpi_limb_t) *p-- << 24;
279 alimb |= (mpi_limb_t) *p-- << 32;
280 alimb |= (mpi_limb_t) *p-- << 40;
281 alimb |= (mpi_limb_t) *p-- << 48;
282 alimb |= (mpi_limb_t) *p-- << 56;
283#else
284#error please implement for this limb size.
285#endif
286 a->d[i++] = alimb;
287 }
288 if (p >= buffer) {
289#if BYTES_PER_MPI_LIMB == 4
290 alimb = *p--;
291 if (p >= buffer)
292 alimb |= (mpi_limb_t) *p-- << 8;
293 if (p >= buffer)
294 alimb |= (mpi_limb_t) *p-- << 16;
295 if (p >= buffer)
296 alimb |= (mpi_limb_t) *p-- << 24;
297#elif BYTES_PER_MPI_LIMB == 8
298 alimb = (mpi_limb_t) *p--;
299 if (p >= buffer)
300 alimb |= (mpi_limb_t) *p-- << 8;
301 if (p >= buffer)
302 alimb |= (mpi_limb_t) *p-- << 16;
303 if (p >= buffer)
304 alimb |= (mpi_limb_t) *p-- << 24;
305 if (p >= buffer)
306 alimb |= (mpi_limb_t) *p-- << 32;
307 if (p >= buffer)
308 alimb |= (mpi_limb_t) *p-- << 40;
309 if (p >= buffer)
310 alimb |= (mpi_limb_t) *p-- << 48;
311 if (p >= buffer)
312 alimb |= (mpi_limb_t) *p-- << 56;
313#else
314#error please implement for this limb size.
315#endif
316 a->d[i++] = alimb;
317 }
318 a->nlimbs = i;
319
320 if (i != nlimbs) {
321 pr_emerg("MPI: mpi_set_buffer: Assertion failed (%d != %d)", i,
322 nlimbs);
323 BUG();
324 }
325 return 0;
326}
327EXPORT_SYMBOL_GPL(mpi_set_buffer);
328
329/** 232/**
330 * mpi_write_to_sgl() - Funnction exports MPI to an sgl (msb first) 233 * mpi_write_to_sgl() - Funnction exports MPI to an sgl (msb first)
331 * 234 *
@@ -335,16 +238,13 @@ EXPORT_SYMBOL_GPL(mpi_set_buffer);
335 * @a: a multi precision integer 238 * @a: a multi precision integer
336 * @sgl: scatterlist to write to. Needs to be at least 239 * @sgl: scatterlist to write to. Needs to be at least
337 * mpi_get_size(a) long. 240 * mpi_get_size(a) long.
338 * @nbytes: in/out param - it has the be set to the maximum number of 241 * @nbytes: the number of bytes to write. Leading bytes will be
339 * bytes that can be written to sgl. This has to be at least 242 * filled with zero.
340 * the size of the integer a. On return it receives the actual
341 * length of the data written on success or the data that would
342 * be written if buffer was too small.
343 * @sign: if not NULL, it will be set to the sign of a. 243 * @sign: if not NULL, it will be set to the sign of a.
344 * 244 *
345 * Return: 0 on success or error code in case of error 245 * Return: 0 on success or error code in case of error
346 */ 246 */
347int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, 247int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned nbytes,
348 int *sign) 248 int *sign)
349{ 249{
350 u8 *p, *p2; 250 u8 *p, *p2;
@@ -356,55 +256,60 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
356#error please implement for this limb size. 256#error please implement for this limb size.
357#endif 257#endif
358 unsigned int n = mpi_get_size(a); 258 unsigned int n = mpi_get_size(a);
359 int i, x, y = 0, lzeros, buf_len; 259 struct sg_mapping_iter miter;
360 260 int i, x, buf_len;
361 if (!nbytes) 261 int nents;
362 return -EINVAL;
363 262
364 if (sign) 263 if (sign)
365 *sign = a->sign; 264 *sign = a->sign;
366 265
367 lzeros = count_lzeros(a); 266 if (nbytes < n)
368
369 if (*nbytes < n - lzeros) {
370 *nbytes = n - lzeros;
371 return -EOVERFLOW; 267 return -EOVERFLOW;
372 }
373 268
374 *nbytes = n - lzeros; 269 nents = sg_nents_for_len(sgl, nbytes);
375 buf_len = sgl->length; 270 if (nents < 0)
376 p2 = sg_virt(sgl); 271 return -EINVAL;
377 272
378 for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB, 273 sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC | SG_MITER_TO_SG);
379 lzeros %= BYTES_PER_MPI_LIMB; 274 sg_miter_next(&miter);
380 i >= 0; i--) { 275 buf_len = miter.length;
276 p2 = miter.addr;
277
278 while (nbytes > n) {
279 i = min_t(unsigned, nbytes - n, buf_len);
280 memset(p2, 0, i);
281 p2 += i;
282 nbytes -= i;
283
284 buf_len -= i;
285 if (!buf_len) {
286 sg_miter_next(&miter);
287 buf_len = miter.length;
288 p2 = miter.addr;
289 }
290 }
291
292 for (i = a->nlimbs - 1; i >= 0; i--) {
381#if BYTES_PER_MPI_LIMB == 4 293#if BYTES_PER_MPI_LIMB == 4
382 alimb = cpu_to_be32(a->d[i]); 294 alimb = a->d[i] ? cpu_to_be32(a->d[i]) : 0;
383#elif BYTES_PER_MPI_LIMB == 8 295#elif BYTES_PER_MPI_LIMB == 8
384 alimb = cpu_to_be64(a->d[i]); 296 alimb = a->d[i] ? cpu_to_be64(a->d[i]) : 0;
385#else 297#else
386#error please implement for this limb size. 298#error please implement for this limb size.
387#endif 299#endif
388 if (lzeros) { 300 p = (u8 *)&alimb;
389 y = lzeros;
390 lzeros = 0;
391 }
392 301
393 p = (u8 *)&alimb + y; 302 for (x = 0; x < sizeof(alimb); x++) {
394
395 for (x = 0; x < sizeof(alimb) - y; x++) {
396 if (!buf_len) {
397 sgl = sg_next(sgl);
398 if (!sgl)
399 return -EINVAL;
400 buf_len = sgl->length;
401 p2 = sg_virt(sgl);
402 }
403 *p2++ = *p++; 303 *p2++ = *p++;
404 buf_len--; 304 if (!--buf_len) {
305 sg_miter_next(&miter);
306 buf_len = miter.length;
307 p2 = miter.addr;
308 }
405 } 309 }
406 y = 0;
407 } 310 }
311
312 sg_miter_stop(&miter);
408 return 0; 313 return 0;
409} 314}
410EXPORT_SYMBOL_GPL(mpi_write_to_sgl); 315EXPORT_SYMBOL_GPL(mpi_write_to_sgl);
@@ -424,19 +329,23 @@ EXPORT_SYMBOL_GPL(mpi_write_to_sgl);
424 */ 329 */
425MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) 330MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
426{ 331{
427 struct scatterlist *sg; 332 struct sg_mapping_iter miter;
428 int x, i, j, z, lzeros, ents;
429 unsigned int nbits, nlimbs; 333 unsigned int nbits, nlimbs;
334 int x, j, z, lzeros, ents;
335 unsigned int len;
336 const u8 *buff;
430 mpi_limb_t a; 337 mpi_limb_t a;
431 MPI val = NULL; 338 MPI val = NULL;
432 339
433 lzeros = 0; 340 ents = sg_nents_for_len(sgl, nbytes);
434 ents = sg_nents(sgl); 341 if (ents < 0)
342 return NULL;
435 343
436 for_each_sg(sgl, sg, ents, i) { 344 sg_miter_start(&miter, sgl, ents, SG_MITER_ATOMIC | SG_MITER_FROM_SG);
437 const u8 *buff = sg_virt(sg);
438 int len = sg->length;
439 345
346 lzeros = 0;
347 len = 0;
348 while (nbytes > 0) {
440 while (len && !*buff) { 349 while (len && !*buff) {
441 lzeros++; 350 lzeros++;
442 len--; 351 len--;
@@ -446,12 +355,14 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
446 if (len && *buff) 355 if (len && *buff)
447 break; 356 break;
448 357
449 ents--; 358 sg_miter_next(&miter);
359 buff = miter.addr;
360 len = miter.length;
361
450 nbytes -= lzeros; 362 nbytes -= lzeros;
451 lzeros = 0; 363 lzeros = 0;
452 } 364 }
453 365
454 sgl = sg;
455 nbytes -= lzeros; 366 nbytes -= lzeros;
456 nbits = nbytes * 8; 367 nbits = nbytes * 8;
457 if (nbits > MAX_EXTERN_MPI_BITS) { 368 if (nbits > MAX_EXTERN_MPI_BITS) {
@@ -460,8 +371,7 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
460 } 371 }
461 372
462 if (nbytes > 0) 373 if (nbytes > 0)
463 nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros)) - 374 nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8);
464 (BITS_PER_LONG - 8);
465 375
466 nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); 376 nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
467 val = mpi_alloc(nlimbs); 377 val = mpi_alloc(nlimbs);
@@ -480,21 +390,24 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
480 z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; 390 z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
481 z %= BYTES_PER_MPI_LIMB; 391 z %= BYTES_PER_MPI_LIMB;
482 392
483 for_each_sg(sgl, sg, ents, i) { 393 for (;;) {
484 const u8 *buffer = sg_virt(sg) + lzeros;
485 int len = sg->length - lzeros;
486
487 for (x = 0; x < len; x++) { 394 for (x = 0; x < len; x++) {
488 a <<= 8; 395 a <<= 8;
489 a |= *buffer++; 396 a |= *buff++;
490 if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) { 397 if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) {
491 val->d[j--] = a; 398 val->d[j--] = a;
492 a = 0; 399 a = 0;
493 } 400 }
494 } 401 }
495 z += x; 402 z += x;
496 lzeros = 0; 403
404 if (!sg_miter_next(&miter))
405 break;
406
407 buff = miter.addr;
408 len = miter.length;
497 } 409 }
410
498 return val; 411 return val;
499} 412}
500EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl); 413EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl);
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 9e443fccad4c..c0b3030b5634 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -18,6 +18,7 @@
18#include <keys/user-type.h> 18#include <keys/user-type.h>
19#include <keys/big_key-type.h> 19#include <keys/big_key-type.h>
20#include <crypto/rng.h> 20#include <crypto/rng.h>
21#include <crypto/skcipher.h>
21 22
22/* 23/*
23 * Layout of key payload words. 24 * Layout of key payload words.
@@ -74,7 +75,7 @@ static const char big_key_alg_name[] = "ecb(aes)";
74 * Crypto algorithms for big_key data encryption 75 * Crypto algorithms for big_key data encryption
75 */ 76 */
76static struct crypto_rng *big_key_rng; 77static struct crypto_rng *big_key_rng;
77static struct crypto_blkcipher *big_key_blkcipher; 78static struct crypto_skcipher *big_key_skcipher;
78 79
79/* 80/*
80 * Generate random key to encrypt big_key data 81 * Generate random key to encrypt big_key data
@@ -91,22 +92,26 @@ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
91{ 92{
92 int ret = -EINVAL; 93 int ret = -EINVAL;
93 struct scatterlist sgio; 94 struct scatterlist sgio;
94 struct blkcipher_desc desc; 95 SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher);
95 96
96 if (crypto_blkcipher_setkey(big_key_blkcipher, key, ENC_KEY_SIZE)) { 97 if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) {
97 ret = -EAGAIN; 98 ret = -EAGAIN;
98 goto error; 99 goto error;
99 } 100 }
100 101
101 desc.flags = 0; 102 skcipher_request_set_tfm(req, big_key_skcipher);
102 desc.tfm = big_key_blkcipher; 103 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
104 NULL, NULL);
103 105
104 sg_init_one(&sgio, data, datalen); 106 sg_init_one(&sgio, data, datalen);
107 skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL);
105 108
106 if (op == BIG_KEY_ENC) 109 if (op == BIG_KEY_ENC)
107 ret = crypto_blkcipher_encrypt(&desc, &sgio, &sgio, datalen); 110 ret = crypto_skcipher_encrypt(req);
108 else 111 else
109 ret = crypto_blkcipher_decrypt(&desc, &sgio, &sgio, datalen); 112 ret = crypto_skcipher_decrypt(req);
113
114 skcipher_request_zero(req);
110 115
111error: 116error:
112 return ret; 117 return ret;
@@ -140,7 +145,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
140 * 145 *
141 * File content is stored encrypted with randomly generated key. 146 * File content is stored encrypted with randomly generated key.
142 */ 147 */
143 size_t enclen = ALIGN(datalen, crypto_blkcipher_blocksize(big_key_blkcipher)); 148 size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
144 149
145 /* prepare aligned data to encrypt */ 150 /* prepare aligned data to encrypt */
146 data = kmalloc(enclen, GFP_KERNEL); 151 data = kmalloc(enclen, GFP_KERNEL);
@@ -288,7 +293,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
288 struct file *file; 293 struct file *file;
289 u8 *data; 294 u8 *data;
290 u8 *enckey = (u8 *)key->payload.data[big_key_data]; 295 u8 *enckey = (u8 *)key->payload.data[big_key_data];
291 size_t enclen = ALIGN(datalen, crypto_blkcipher_blocksize(big_key_blkcipher)); 296 size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher));
292 297
293 data = kmalloc(enclen, GFP_KERNEL); 298 data = kmalloc(enclen, GFP_KERNEL);
294 if (!data) 299 if (!data)
@@ -359,9 +364,10 @@ static int __init big_key_crypto_init(void)
359 goto error; 364 goto error;
360 365
361 /* init block cipher */ 366 /* init block cipher */
362 big_key_blkcipher = crypto_alloc_blkcipher(big_key_alg_name, 0, 0); 367 big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name,
363 if (IS_ERR(big_key_blkcipher)) { 368 0, CRYPTO_ALG_ASYNC);
364 big_key_blkcipher = NULL; 369 if (IS_ERR(big_key_skcipher)) {
370 big_key_skcipher = NULL;
365 ret = -EFAULT; 371 ret = -EFAULT;
366 goto error; 372 goto error;
367 } 373 }