aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-04 20:11:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-04 20:11:08 -0400
commit9eb31227cbccd3a37da0f42604f1ab5fc556bc53 (patch)
tree9aa467e620e002bf01cecdd98e3908e0cc3e7221
parent527cd20771888443b5d8707debe98f62c7a1f596 (diff)
parentf444ec106407d600f17fa1a4bd14f84577401dec (diff)
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - add AEAD support to crypto engine - allow batch registration in simd Algorithms: - add CFB mode - add speck block cipher - add sm4 block cipher - new test case for crct10dif - improve scheduling latency on ARM - scatter/gather support to gcm in aesni - convert x86 crypto algorithms to skcihper Drivers: - hmac(sha224/sha256) support in inside-secure - aes gcm/ccm support in stm32 - stm32mp1 support in stm32 - ccree driver from staging tree - gcm support over QI in caam - add ks-sa hwrng driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (212 commits) crypto: ccree - remove unused enums crypto: ahash - Fix early termination in hash walk crypto: brcm - explicitly cast cipher to hash type crypto: talitos - don't leak pointers to authenc keys crypto: qat - don't leak pointers to authenc keys crypto: picoxcell - don't leak pointers to authenc keys crypto: ixp4xx - don't leak pointers to authenc keys crypto: chelsio - don't leak pointers to authenc keys crypto: caam/qi - don't leak pointers to authenc keys crypto: caam - don't leak pointers to authenc keys crypto: lrw - Free rctx->ext with kzfree crypto: talitos - fix IPsec cipher in length crypto: Deduplicate le32_to_cpu_array() and cpu_to_le32_array() crypto: doc - clarify hash callbacks state machine crypto: api - Keep failed instances alive crypto: api - Make crypto_alg_lookup static crypto: api - Remove unused crypto_type lookup function crypto: chelsio - Remove declaration of static function from header crypto: inside-secure - hmac(sha224) support crypto: inside-secure - hmac(sha256) support ..
-rw-r--r--Documentation/crypto/crypto_engine.rst48
-rw-r--r--Documentation/crypto/devel-algos.rst8
-rw-r--r--Documentation/devicetree/bindings/crypto/arm-cryptocell.txt3
-rw-r--r--Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt6
-rw-r--r--Documentation/devicetree/bindings/rng/imx-rng.txt (renamed from Documentation/devicetree/bindings/rng/imx-rngc.txt)11
-rw-r--r--Documentation/devicetree/bindings/rng/ks-sa-rng.txt21
-rw-r--r--Documentation/devicetree/bindings/rng/omap_rng.txt7
-rw-r--r--Documentation/devicetree/bindings/rng/st,stm32-rng.txt4
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/arm/crypto/Kconfig6
-rw-r--r--arch/arm/crypto/Makefile4
-rw-r--r--arch/arm/crypto/aes-cipher-core.S19
-rw-r--r--arch/arm/crypto/speck-neon-core.S432
-rw-r--r--arch/arm/crypto/speck-neon-glue.c288
-rw-r--r--arch/arm64/crypto/Kconfig6
-rw-r--r--arch/arm64/crypto/Makefile8
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c47
-rw-r--r--arch/arm64/crypto/aes-glue.c95
-rw-r--r--arch/arm64/crypto/aes-modes.S355
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c48
-rw-r--r--arch/arm64/crypto/chacha20-neon-glue.c12
-rw-r--r--arch/arm64/crypto/sha256-glue.c36
-rw-r--r--arch/arm64/crypto/speck-neon-core.S352
-rw-r--r--arch/arm64/crypto/speck-neon-glue.c282
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S1404
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c230
-rw-r--r--arch/x86/crypto/blowfish_glue.c230
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c491
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c495
-rw-r--r--arch/x86/crypto/camellia_glue.c356
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c352
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c489
-rw-r--r--arch/x86/crypto/des3_ede_glue.c238
-rw-r--r--arch/x86/crypto/glue_helper.c391
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c478
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c518
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c519
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c28
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_ctx.h8
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c27
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_ctx.h8
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c30
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_ctx.h8
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c493
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c339
-rw-r--r--arch/x86/include/asm/crypto/camellia.h16
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h75
-rw-r--r--arch/x86/include/asm/crypto/serpent-avx.h17
-rw-r--r--arch/x86/include/asm/crypto/twofish.h19
-rw-r--r--crypto/Kconfig129
-rw-r--r--crypto/Makefile4
-rw-r--r--crypto/ablk_helper.c150
-rw-r--r--crypto/ahash.c25
-rw-r--r--crypto/algapi.c8
-rw-r--r--crypto/api.c34
-rw-r--r--crypto/cfb.c353
-rw-r--r--crypto/crypto_engine.c301
-rw-r--r--crypto/crypto_user.c2
-rw-r--r--crypto/ecc.c23
-rw-r--r--crypto/ecdh.c23
-rw-r--r--crypto/internal.h1
-rw-r--r--crypto/lrw.c154
-rw-r--r--crypto/mcryptd.c34
-rw-r--r--crypto/md4.c17
-rw-r--r--crypto/md5.c17
-rw-r--r--crypto/rsa-pkcs1pad.c2
-rw-r--r--crypto/simd.c50
-rw-r--r--crypto/sm4_generic.c244
-rw-r--r--crypto/speck.c307
-rw-r--r--crypto/tcrypt.c3
-rw-r--r--crypto/testmgr.c45
-rw-r--r--crypto/testmgr.h1882
-rw-r--r--crypto/xts.c72
-rw-r--r--drivers/char/hw_random/Kconfig7
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c2
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c2
-rw-r--r--drivers/char/hw_random/cavium-rng.c2
-rw-r--r--drivers/char/hw_random/imx-rngc.c2
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c257
-rw-r--r--drivers/char/hw_random/mxc-rnga.c23
-rw-r--r--drivers/char/hw_random/omap-rng.c22
-rw-r--r--drivers/char/hw_random/stm32-rng.c44
-rw-r--r--drivers/crypto/Kconfig34
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/atmel-aes.c8
-rw-r--r--drivers/crypto/atmel-sha.c9
-rw-r--r--drivers/crypto/atmel-tdes.c9
-rw-r--r--drivers/crypto/bcm/cipher.c4
-rw-r--r--drivers/crypto/bcm/util.c1
-rw-r--r--drivers/crypto/bfin_crc.c743
-rw-r--r--drivers/crypto/bfin_crc.h124
-rw-r--r--drivers/crypto/caam/caamalg.c21
-rw-r--r--drivers/crypto/caam/caamalg_desc.c165
-rw-r--r--drivers/crypto/caam/caamalg_desc.h24
-rw-r--r--drivers/crypto/caam/caamalg_qi.c388
-rw-r--r--drivers/crypto/caam/ctrl.c42
-rw-r--r--drivers/crypto/caam/qi.c11
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c7
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c108
-rw-r--r--drivers/crypto/ccp/psp-dev.c15
-rw-r--r--drivers/crypto/ccp/sp-dev.c6
-rw-r--r--drivers/crypto/ccree/Makefile7
-rw-r--r--drivers/crypto/ccree/cc_aead.c2718
-rw-r--r--drivers/crypto/ccree/cc_aead.h109
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c1651
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h71
-rw-r--r--drivers/crypto/ccree/cc_cipher.c1150
-rw-r--r--drivers/crypto/ccree/cc_cipher.h59
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h133
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c101
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h32
-rw-r--r--drivers/crypto/ccree/cc_driver.c518
-rw-r--r--drivers/crypto/ccree/cc_driver.h208
-rw-r--r--drivers/crypto/ccree/cc_fips.c120
-rw-r--r--drivers/crypto/ccree/cc_fips.h36
-rw-r--r--drivers/crypto/ccree/cc_hash.c2296
-rw-r--r--drivers/crypto/ccree/cc_hash.h109
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h145
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h576
-rw-r--r--drivers/crypto/ccree/cc_ivgen.c279
-rw-r--r--drivers/crypto/ccree/cc_ivgen.h55
-rw-r--r--drivers/crypto/ccree/cc_kernel_regs.h168
-rw-r--r--drivers/crypto/ccree/cc_lli_defs.h59
-rw-r--r--drivers/crypto/ccree/cc_pm.c122
-rw-r--r--drivers/crypto/ccree/cc_pm.h56
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c711
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h51
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c120
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h65
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c577
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h11
-rw-r--r--drivers/crypto/chelsio/chcr_core.h6
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h31
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel.c114
-rw-r--r--drivers/crypto/inside-secure/safexcel.h22
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c258
-rw-r--r--drivers/crypto/ixp4xx_crypto.c2
-rw-r--r--drivers/crypto/marvell/cesa.c1
-rw-r--r--drivers/crypto/mxs-dcp.c14
-rw-r--r--drivers/crypto/n2_core.c12
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c5
-rw-r--r--drivers/crypto/omap-aes.c112
-rw-r--r--drivers/crypto/omap-aes.h3
-rw-r--r--drivers/crypto/omap-crypto.c4
-rw-r--r--drivers/crypto/omap-des.c24
-rw-r--r--drivers/crypto/omap-sham.c106
-rw-r--r--drivers/crypto/picoxcell_crypto.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c9
-rw-r--r--drivers/crypto/s5p-sss.c34
-rw-r--r--drivers/crypto/sahara.c6
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c964
-rw-r--r--drivers/crypto/stm32/stm32-hash.c41
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c1
-rw-r--r--drivers/crypto/talitos.c218
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c14
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c18
-rw-r--r--drivers/crypto/virtio/Kconfig1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c16
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c3
-rw-r--r--drivers/staging/ccree/Kconfig4
-rw-r--r--drivers/staging/ccree/Makefile2
-rw-r--r--include/crypto/ablk_helper.h32
-rw-r--r--include/crypto/algapi.h1
-rw-r--r--include/crypto/engine.h68
-rw-r--r--include/crypto/hash.h11
-rw-r--r--include/crypto/internal/hash.h5
-rw-r--r--include/crypto/internal/simd.h7
-rw-r--r--include/crypto/lrw.h44
-rw-r--r--include/crypto/sm4.h28
-rw-r--r--include/crypto/speck.h62
-rw-r--r--include/crypto/xts.h17
-rw-r--r--include/linux/byteorder/generic.h17
-rw-r--r--include/linux/crypto.h8
183 files changed, 22264 insertions, 7763 deletions
diff --git a/Documentation/crypto/crypto_engine.rst b/Documentation/crypto/crypto_engine.rst
new file mode 100644
index 000000000000..8272ac92a14f
--- /dev/null
+++ b/Documentation/crypto/crypto_engine.rst
@@ -0,0 +1,48 @@
1=============
2CRYPTO ENGINE
3=============
4
5Overview
6--------
7The crypto engine API (CE), is a crypto queue manager.
8
9Requirement
10-----------
11You have to put at start of your tfm_ctx the struct crypto_engine_ctx
12struct your_tfm_ctx {
13 struct crypto_engine_ctx enginectx;
14 ...
15};
16Why: Since CE manage only crypto_async_request, it cannot know the underlying
17request_type and so have access only on the TFM.
18So using container_of for accessing __ctx is impossible.
19Furthermore, the crypto engine cannot know the "struct your_tfm_ctx",
20so it must assume that crypto_engine_ctx is at start of it.
21
22Order of operations
23-------------------
24You have to obtain a struct crypto_engine via crypto_engine_alloc_init().
25And start it via crypto_engine_start().
26
27Before transferring any request, you have to fill the enginectx.
28- prepare_request: (taking a function pointer) If you need to do some processing before doing the request
29- unprepare_request: (taking a function pointer) Undoing what's done in prepare_request
30- do_one_request: (taking a function pointer) Do encryption for current request
31
32Note: that those three functions get the crypto_async_request associated with the received request.
33So your need to get the original request via container_of(areq, struct yourrequesttype_request, base);
34
35When your driver receive a crypto_request, you have to transfer it to
36the cryptoengine via one of:
37- crypto_transfer_ablkcipher_request_to_engine()
38- crypto_transfer_aead_request_to_engine()
39- crypto_transfer_akcipher_request_to_engine()
40- crypto_transfer_hash_request_to_engine()
41- crypto_transfer_skcipher_request_to_engine()
42
43At the end of the request process, a call to one of the following function is needed:
44- crypto_finalize_ablkcipher_request
45- crypto_finalize_aead_request
46- crypto_finalize_akcipher_request
47- crypto_finalize_hash_request
48- crypto_finalize_skcipher_request
diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst
index 66f50d32dcec..c45c6f400dbd 100644
--- a/Documentation/crypto/devel-algos.rst
+++ b/Documentation/crypto/devel-algos.rst
@@ -236,6 +236,14 @@ when used from another part of the kernel.
236 | 236 |
237 '---------------> HASH2 237 '---------------> HASH2
238 238
239Note that it is perfectly legal to "abandon" a request object:
240- call .init() and then (as many times) .update()
241- _not_ call any of .final(), .finup() or .export() at any point in future
242
243In other words implementations should mind the resource allocation and clean-up.
244No resources related to request objects should remain allocated after a call
245to .init() or .update(), since there might be no chance to free them.
246
239 247
240Specifics Of Asynchronous HASH Transformation 248Specifics Of Asynchronous HASH Transformation
241~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 249~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
index cec8d5d74e26..c2598ab27f2e 100644
--- a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
+++ b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
@@ -1,7 +1,8 @@
1Arm TrustZone CryptoCell cryptographic engine 1Arm TrustZone CryptoCell cryptographic engine
2 2
3Required properties: 3Required properties:
4- compatible: Should be "arm,cryptocell-712-ree". 4- compatible: Should be one of: "arm,cryptocell-712-ree",
5 "arm,cryptocell-710-ree" or "arm,cryptocell-630p-ree".
5- reg: Base physical address of the engine and length of memory mapped region. 6- reg: Base physical address of the engine and length of memory mapped region.
6- interrupts: Interrupt number for the device. 7- interrupts: Interrupt number for the device.
7 8
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index 30c3ce6b502e..5dba55cdfa63 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -8,7 +8,11 @@ Required properties:
8- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem". 8- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
9 9
10Optional properties: 10Optional properties:
11- clocks: Reference to the crypto engine clock. 11- clocks: Reference to the crypto engine clocks, the second clock is
12 needed for the Armada 7K/8K SoCs.
13- clock-names: mandatory if there is a second clock, in this case the
14 name must be "core" for the first clock and "reg" for
15 the second one.
12 16
13Example: 17Example:
14 18
diff --git a/Documentation/devicetree/bindings/rng/imx-rngc.txt b/Documentation/devicetree/bindings/rng/imx-rng.txt
index 93c7174a7bed..405c2b00ccb0 100644
--- a/Documentation/devicetree/bindings/rng/imx-rngc.txt
+++ b/Documentation/devicetree/bindings/rng/imx-rng.txt
@@ -1,15 +1,14 @@
1Freescale RNGC (Random Number Generator Version C) 1Freescale RNGA/RNGB/RNGC (Random Number Generator Versions A, B and C)
2
3The driver also supports version B, which is mostly compatible
4to version C.
5 2
6Required properties: 3Required properties:
7- compatible : should be one of 4- compatible : should be one of
5 "fsl,imx21-rnga"
6 "fsl,imx31-rnga" (backward compatible with "fsl,imx21-rnga")
8 "fsl,imx25-rngb" 7 "fsl,imx25-rngb"
9 "fsl,imx35-rngc" 8 "fsl,imx35-rngc"
10- reg : offset and length of the register set of this block 9- reg : offset and length of the register set of this block
11- interrupts : the interrupt number for the RNGC block 10- interrupts : the interrupt number for the RNG block
12- clocks : the RNGC clk source 11- clocks : the RNG clk source
13 12
14Example: 13Example:
15 14
diff --git a/Documentation/devicetree/bindings/rng/ks-sa-rng.txt b/Documentation/devicetree/bindings/rng/ks-sa-rng.txt
new file mode 100644
index 000000000000..b7a65b487901
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/ks-sa-rng.txt
@@ -0,0 +1,21 @@
1Keystone SoC Hardware Random Number Generator(HWRNG) Module
2
3On Keystone SoCs HWRNG module is a submodule of the Security Accelerator.
4
5- compatible: should be "ti,keystone-rng"
6- ti,syscon-sa-cfg: phandle to syscon node of the SA configuration registers.
7 This registers are shared between hwrng and crypto drivers.
8- clocks: phandle to the reference clocks for the subsystem
9- clock-names: functional clock name. Should be set to "fck"
10- reg: HWRNG module register space
11
12Example:
13/* K2HK */
14
15rng@24000 {
16 compatible = "ti,keystone-rng";
17 ti,syscon-sa-cfg = <&sa_config>;
18 clocks = <&clksa>;
19 clock-names = "fck";
20 reg = <0x24000 0x1000>;
21};
diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt
index 9cf7876ab434..ea434ce50f36 100644
--- a/Documentation/devicetree/bindings/rng/omap_rng.txt
+++ b/Documentation/devicetree/bindings/rng/omap_rng.txt
@@ -13,7 +13,12 @@ Required properties:
13- interrupts : the interrupt number for the RNG module. 13- interrupts : the interrupt number for the RNG module.
14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76" 14 Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
15- clocks: the trng clock source. Only mandatory for the 15- clocks: the trng clock source. Only mandatory for the
16 "inside-secure,safexcel-eip76" compatible. 16 "inside-secure,safexcel-eip76" compatible, the second clock is
17 needed for the Armada 7K/8K SoCs
18- clock-names: mandatory if there is a second clock, in this case the
19 name must be "core" for the first clock and "reg" for the second
20 one
21
17 22
18Example: 23Example:
19/* AM335x */ 24/* AM335x */
diff --git a/Documentation/devicetree/bindings/rng/st,stm32-rng.txt b/Documentation/devicetree/bindings/rng/st,stm32-rng.txt
index 47f04176f93b..1dfa7d51e006 100644
--- a/Documentation/devicetree/bindings/rng/st,stm32-rng.txt
+++ b/Documentation/devicetree/bindings/rng/st,stm32-rng.txt
@@ -11,6 +11,10 @@ Required properties:
11- interrupts : The designated IRQ line for the RNG 11- interrupts : The designated IRQ line for the RNG
12- clocks : The clock needed to enable the RNG 12- clocks : The clock needed to enable the RNG
13 13
14Optional properties:
15- resets : The reset to properly start RNG
16- clock-error-detect : Enable the clock detection management
17
14Example: 18Example:
15 19
16 rng: rng@50060800 { 20 rng: rng@50060800 {
diff --git a/MAINTAINERS b/MAINTAINERS
index 2328eed6aea9..9d42bb8bb120 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3252,12 +3252,11 @@ F: drivers/net/ieee802154/cc2520.c
3252F: include/linux/spi/cc2520.h 3252F: include/linux/spi/cc2520.h
3253F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt 3253F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
3254 3254
3255CCREE ARM TRUSTZONE CRYPTOCELL 700 REE DRIVER 3255CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER
3256M: Gilad Ben-Yossef <gilad@benyossef.com> 3256M: Gilad Ben-Yossef <gilad@benyossef.com>
3257L: linux-crypto@vger.kernel.org 3257L: linux-crypto@vger.kernel.org
3258L: driverdev-devel@linuxdriverproject.org
3259S: Supported 3258S: Supported
3260F: drivers/staging/ccree/ 3259F: drivers/crypto/ccree/
3261W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family 3260W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
3262 3261
3263CEC FRAMEWORK 3262CEC FRAMEWORK
@@ -6962,7 +6961,7 @@ F: drivers/input/input-mt.c
6962K: \b(ABS|SYN)_MT_ 6961K: \b(ABS|SYN)_MT_
6963 6962
6964INSIDE SECURE CRYPTO DRIVER 6963INSIDE SECURE CRYPTO DRIVER
6965M: Antoine Tenart <antoine.tenart@free-electrons.com> 6964M: Antoine Tenart <antoine.tenart@bootlin.com>
6966F: drivers/crypto/inside-secure/ 6965F: drivers/crypto/inside-secure/
6967S: Maintained 6966S: Maintained
6968L: linux-crypto@vger.kernel.org 6967L: linux-crypto@vger.kernel.org
@@ -7200,6 +7199,14 @@ L: linux-rdma@vger.kernel.org
7200S: Supported 7199S: Supported
7201F: drivers/infiniband/hw/i40iw/ 7200F: drivers/infiniband/hw/i40iw/
7202 7201
7202INTEL SHA MULTIBUFFER DRIVER
7203M: Megha Dey <megha.dey@linux.intel.com>
7204R: Tim Chen <tim.c.chen@linux.intel.com>
7205L: linux-crypto@vger.kernel.org
7206S: Supported
7207F: arch/x86/crypto/sha*-mb
7208F: crypto/mcryptd.c
7209
7203INTEL TELEMETRY DRIVER 7210INTEL TELEMETRY DRIVER
7204M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com> 7211M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
7205L: platform-driver-x86@vger.kernel.org 7212L: platform-driver-x86@vger.kernel.org
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index b8e69fe282b8..925d1364727a 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -121,4 +121,10 @@ config CRYPTO_CHACHA20_NEON
121 select CRYPTO_BLKCIPHER 121 select CRYPTO_BLKCIPHER
122 select CRYPTO_CHACHA20 122 select CRYPTO_CHACHA20
123 123
124config CRYPTO_SPECK_NEON
125 tristate "NEON accelerated Speck cipher algorithms"
126 depends on KERNEL_MODE_NEON
127 select CRYPTO_BLKCIPHER
128 select CRYPTO_SPECK
129
124endif 130endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 30ef8e291271..3304e671918d 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
10obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o 10obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
11obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o 11obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
12obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o 12obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
13obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
13 14
14ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o 15ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
15ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o 16ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -53,7 +54,9 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
53crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o 54crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
54crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o 55crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
55chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o 56chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
57speck-neon-y := speck-neon-core.o speck-neon-glue.o
56 58
59ifdef REGENERATE_ARM_CRYPTO
57quiet_cmd_perl = PERL $@ 60quiet_cmd_perl = PERL $@
58 cmd_perl = $(PERL) $(<) > $(@) 61 cmd_perl = $(PERL) $(<) > $(@)
59 62
@@ -62,5 +65,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
62 65
63$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl 66$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
64 $(call cmd,perl) 67 $(call cmd,perl)
68endif
65 69
66.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S 70.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S
index 54b384084637..184d6c2d15d5 100644
--- a/arch/arm/crypto/aes-cipher-core.S
+++ b/arch/arm/crypto/aes-cipher-core.S
@@ -174,6 +174,16 @@
174 .ltorg 174 .ltorg
175 .endm 175 .endm
176 176
177ENTRY(__aes_arm_encrypt)
178 do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
179ENDPROC(__aes_arm_encrypt)
180
181 .align 5
182ENTRY(__aes_arm_decrypt)
183 do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0
184ENDPROC(__aes_arm_decrypt)
185
186 .section ".rodata", "a"
177 .align L1_CACHE_SHIFT 187 .align L1_CACHE_SHIFT
178 .type __aes_arm_inverse_sbox, %object 188 .type __aes_arm_inverse_sbox, %object
179__aes_arm_inverse_sbox: 189__aes_arm_inverse_sbox:
@@ -210,12 +220,3 @@ __aes_arm_inverse_sbox:
210 .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 220 .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
211 .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d 221 .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
212 .size __aes_arm_inverse_sbox, . - __aes_arm_inverse_sbox 222 .size __aes_arm_inverse_sbox, . - __aes_arm_inverse_sbox
213
214ENTRY(__aes_arm_encrypt)
215 do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
216ENDPROC(__aes_arm_encrypt)
217
218 .align 5
219ENTRY(__aes_arm_decrypt)
220 do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0
221ENDPROC(__aes_arm_decrypt)
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
new file mode 100644
index 000000000000..3c1e203e53b9
--- /dev/null
+++ b/arch/arm/crypto/speck-neon-core.S
@@ -0,0 +1,432 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Author: Eric Biggers <ebiggers@google.com>
8 */
9
10#include <linux/linkage.h>
11
12 .text
13 .fpu neon
14
15 // arguments
16 ROUND_KEYS .req r0 // const {u64,u32} *round_keys
17 NROUNDS .req r1 // int nrounds
18 DST .req r2 // void *dst
19 SRC .req r3 // const void *src
20 NBYTES .req r4 // unsigned int nbytes
21 TWEAK .req r5 // void *tweak
22
23 // registers which hold the data being encrypted/decrypted
24 X0 .req q0
25 X0_L .req d0
26 X0_H .req d1
27 Y0 .req q1
28 Y0_H .req d3
29 X1 .req q2
30 X1_L .req d4
31 X1_H .req d5
32 Y1 .req q3
33 Y1_H .req d7
34 X2 .req q4
35 X2_L .req d8
36 X2_H .req d9
37 Y2 .req q5
38 Y2_H .req d11
39 X3 .req q6
40 X3_L .req d12
41 X3_H .req d13
42 Y3 .req q7
43 Y3_H .req d15
44
45 // the round key, duplicated in all lanes
46 ROUND_KEY .req q8
47 ROUND_KEY_L .req d16
48 ROUND_KEY_H .req d17
49
50 // index vector for vtbl-based 8-bit rotates
51 ROTATE_TABLE .req d18
52
53 // multiplication table for updating XTS tweaks
54 GF128MUL_TABLE .req d19
55 GF64MUL_TABLE .req d19
56
57 // current XTS tweak value(s)
58 TWEAKV .req q10
59 TWEAKV_L .req d20
60 TWEAKV_H .req d21
61
62 TMP0 .req q12
63 TMP0_L .req d24
64 TMP0_H .req d25
65 TMP1 .req q13
66 TMP2 .req q14
67 TMP3 .req q15
68
69 .align 4
70.Lror64_8_table:
71 .byte 1, 2, 3, 4, 5, 6, 7, 0
72.Lror32_8_table:
73 .byte 1, 2, 3, 0, 5, 6, 7, 4
74.Lrol64_8_table:
75 .byte 7, 0, 1, 2, 3, 4, 5, 6
76.Lrol32_8_table:
77 .byte 3, 0, 1, 2, 7, 4, 5, 6
78.Lgf128mul_table:
79 .byte 0, 0x87
80 .fill 14
81.Lgf64mul_table:
82 .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
83 .fill 12
84
85/*
86 * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
87 *
88 * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
89 * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
90 * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
91 *
92 * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
93 * the vtbl approach is faster on some processors and the same speed on others.
94 */
95.macro _speck_round_128bytes n
96
97 // x = ror(x, 8)
98 vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
99 vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
100 vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
101 vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
102 vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
103 vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
104 vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
105 vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
106
107 // x += y
108 vadd.u\n X0, Y0
109 vadd.u\n X1, Y1
110 vadd.u\n X2, Y2
111 vadd.u\n X3, Y3
112
113 // x ^= k
114 veor X0, ROUND_KEY
115 veor X1, ROUND_KEY
116 veor X2, ROUND_KEY
117 veor X3, ROUND_KEY
118
119 // y = rol(y, 3)
120 vshl.u\n TMP0, Y0, #3
121 vshl.u\n TMP1, Y1, #3
122 vshl.u\n TMP2, Y2, #3
123 vshl.u\n TMP3, Y3, #3
124 vsri.u\n TMP0, Y0, #(\n - 3)
125 vsri.u\n TMP1, Y1, #(\n - 3)
126 vsri.u\n TMP2, Y2, #(\n - 3)
127 vsri.u\n TMP3, Y3, #(\n - 3)
128
129 // y ^= x
130 veor Y0, TMP0, X0
131 veor Y1, TMP1, X1
132 veor Y2, TMP2, X2
133 veor Y3, TMP3, X3
134.endm
135
136/*
137 * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
138 *
139 * This is the inverse of _speck_round_128bytes().
140 */
141.macro _speck_unround_128bytes n
142
143 // y ^= x
144 veor TMP0, Y0, X0
145 veor TMP1, Y1, X1
146 veor TMP2, Y2, X2
147 veor TMP3, Y3, X3
148
149 // y = ror(y, 3)
150 vshr.u\n Y0, TMP0, #3
151 vshr.u\n Y1, TMP1, #3
152 vshr.u\n Y2, TMP2, #3
153 vshr.u\n Y3, TMP3, #3
154 vsli.u\n Y0, TMP0, #(\n - 3)
155 vsli.u\n Y1, TMP1, #(\n - 3)
156 vsli.u\n Y2, TMP2, #(\n - 3)
157 vsli.u\n Y3, TMP3, #(\n - 3)
158
159 // x ^= k
160 veor X0, ROUND_KEY
161 veor X1, ROUND_KEY
162 veor X2, ROUND_KEY
163 veor X3, ROUND_KEY
164
165 // x -= y
166 vsub.u\n X0, Y0
167 vsub.u\n X1, Y1
168 vsub.u\n X2, Y2
169 vsub.u\n X3, Y3
170
171 // x = rol(x, 8);
172 vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
173 vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
174 vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
175 vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
176 vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
177 vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
178 vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
179 vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
180.endm
181
182.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
183
184 // Load the next source block
185 vld1.8 {\dst_reg}, [SRC]!
186
187 // Save the current tweak in the tweak buffer
188 vst1.8 {TWEAKV}, [\tweak_buf:128]!
189
190 // XOR the next source block with the current tweak
191 veor \dst_reg, TWEAKV
192
193 /*
194 * Calculate the next tweak by multiplying the current one by x,
195 * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
196 */
197 vshr.u64 \tmp, TWEAKV, #63
198 vshl.u64 TWEAKV, #1
199 veor TWEAKV_H, \tmp\()_L
200 vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
201 veor TWEAKV_L, \tmp\()_H
202.endm
203
204.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
205
206 // Load the next two source blocks
207 vld1.8 {\dst_reg}, [SRC]!
208
209 // Save the current two tweaks in the tweak buffer
210 vst1.8 {TWEAKV}, [\tweak_buf:128]!
211
212 // XOR the next two source blocks with the current two tweaks
213 veor \dst_reg, TWEAKV
214
215 /*
216 * Calculate the next two tweaks by multiplying the current ones by x^2,
217 * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
218 */
219 vshr.u64 \tmp, TWEAKV, #62
220 vshl.u64 TWEAKV, #2
221 vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
222 vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
223 veor TWEAKV, \tmp
224.endm
225
226/*
227 * _speck_xts_crypt() - Speck-XTS encryption/decryption
228 *
229 * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
230 * using Speck-XTS, specifically the variant with a block size of '2n' and round
231 * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
232 * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
233 * nonzero multiple of 128.
234 */
235.macro _speck_xts_crypt n, decrypting
236 push {r4-r7}
237 mov r7, sp
238
239 /*
240 * The first four parameters were passed in registers r0-r3. Load the
241 * additional parameters, which were passed on the stack.
242 */
243 ldr NBYTES, [sp, #16]
244 ldr TWEAK, [sp, #20]
245
246 /*
247 * If decrypting, modify the ROUND_KEYS parameter to point to the last
248 * round key rather than the first, since for decryption the round keys
249 * are used in reverse order.
250 */
251.if \decrypting
252.if \n == 64
253 add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
254 sub ROUND_KEYS, #8
255.else
256 add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
257 sub ROUND_KEYS, #4
258.endif
259.endif
260
261 // Load the index vector for vtbl-based 8-bit rotates
262.if \decrypting
263 ldr r12, =.Lrol\n\()_8_table
264.else
265 ldr r12, =.Lror\n\()_8_table
266.endif
267 vld1.8 {ROTATE_TABLE}, [r12:64]
268
269 // One-time XTS preparation
270
271 /*
272 * Allocate stack space to store 128 bytes worth of tweaks. For
273 * performance, this space is aligned to a 16-byte boundary so that we
274 * can use the load/store instructions that declare 16-byte alignment.
275 */
276 sub sp, #128
277 bic sp, #0xf
278
279.if \n == 64
280 // Load first tweak
281 vld1.8 {TWEAKV}, [TWEAK]
282
283 // Load GF(2^128) multiplication table
284 ldr r12, =.Lgf128mul_table
285 vld1.8 {GF128MUL_TABLE}, [r12:64]
286.else
287 // Load first tweak
288 vld1.8 {TWEAKV_L}, [TWEAK]
289
290 // Load GF(2^64) multiplication table
291 ldr r12, =.Lgf64mul_table
292 vld1.8 {GF64MUL_TABLE}, [r12:64]
293
294 // Calculate second tweak, packing it together with the first
295 vshr.u64 TMP0_L, TWEAKV_L, #63
296 vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
297 vshl.u64 TWEAKV_H, TWEAKV_L, #1
298 veor TWEAKV_H, TMP0_L
299.endif
300
301.Lnext_128bytes_\@:
302
303 /*
304 * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
305 * values, and save the tweaks on the stack for later. Then
306 * de-interleave the 'x' and 'y' elements of each block, i.e. make it so
307 * that the X[0-3] registers contain only the second halves of blocks,
308 * and the Y[0-3] registers contain only the first halves of blocks.
309 * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
310 */
311 mov r12, sp
312.if \n == 64
313 _xts128_precrypt_one X0, r12, TMP0
314 _xts128_precrypt_one Y0, r12, TMP0
315 _xts128_precrypt_one X1, r12, TMP0
316 _xts128_precrypt_one Y1, r12, TMP0
317 _xts128_precrypt_one X2, r12, TMP0
318 _xts128_precrypt_one Y2, r12, TMP0
319 _xts128_precrypt_one X3, r12, TMP0
320 _xts128_precrypt_one Y3, r12, TMP0
321 vswp X0_L, Y0_H
322 vswp X1_L, Y1_H
323 vswp X2_L, Y2_H
324 vswp X3_L, Y3_H
325.else
326 _xts64_precrypt_two X0, r12, TMP0
327 _xts64_precrypt_two Y0, r12, TMP0
328 _xts64_precrypt_two X1, r12, TMP0
329 _xts64_precrypt_two Y1, r12, TMP0
330 _xts64_precrypt_two X2, r12, TMP0
331 _xts64_precrypt_two Y2, r12, TMP0
332 _xts64_precrypt_two X3, r12, TMP0
333 _xts64_precrypt_two Y3, r12, TMP0
334 vuzp.32 Y0, X0
335 vuzp.32 Y1, X1
336 vuzp.32 Y2, X2
337 vuzp.32 Y3, X3
338.endif
339
340 // Do the cipher rounds
341
342 mov r12, ROUND_KEYS
343 mov r6, NROUNDS
344
345.Lnext_round_\@:
346.if \decrypting
347.if \n == 64
348 vld1.64 ROUND_KEY_L, [r12]
349 sub r12, #8
350 vmov ROUND_KEY_H, ROUND_KEY_L
351.else
352 vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
353 sub r12, #4
354.endif
355 _speck_unround_128bytes \n
356.else
357.if \n == 64
358 vld1.64 ROUND_KEY_L, [r12]!
359 vmov ROUND_KEY_H, ROUND_KEY_L
360.else
361 vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
362.endif
363 _speck_round_128bytes \n
364.endif
365 subs r6, r6, #1
366 bne .Lnext_round_\@
367
368 // Re-interleave the 'x' and 'y' elements of each block
369.if \n == 64
370 vswp X0_L, Y0_H
371 vswp X1_L, Y1_H
372 vswp X2_L, Y2_H
373 vswp X3_L, Y3_H
374.else
375 vzip.32 Y0, X0
376 vzip.32 Y1, X1
377 vzip.32 Y2, X2
378 vzip.32 Y3, X3
379.endif
380
381 // XOR the encrypted/decrypted blocks with the tweaks we saved earlier
382 mov r12, sp
383 vld1.8 {TMP0, TMP1}, [r12:128]!
384 vld1.8 {TMP2, TMP3}, [r12:128]!
385 veor X0, TMP0
386 veor Y0, TMP1
387 veor X1, TMP2
388 veor Y1, TMP3
389 vld1.8 {TMP0, TMP1}, [r12:128]!
390 vld1.8 {TMP2, TMP3}, [r12:128]!
391 veor X2, TMP0
392 veor Y2, TMP1
393 veor X3, TMP2
394 veor Y3, TMP3
395
396 // Store the ciphertext in the destination buffer
397 vst1.8 {X0, Y0}, [DST]!
398 vst1.8 {X1, Y1}, [DST]!
399 vst1.8 {X2, Y2}, [DST]!
400 vst1.8 {X3, Y3}, [DST]!
401
402 // Continue if there are more 128-byte chunks remaining, else return
403 subs NBYTES, #128
404 bne .Lnext_128bytes_\@
405
406 // Store the next tweak
407.if \n == 64
408 vst1.8 {TWEAKV}, [TWEAK]
409.else
410 vst1.8 {TWEAKV_L}, [TWEAK]
411.endif
412
413 mov sp, r7
414 pop {r4-r7}
415 bx lr
416.endm
417
418ENTRY(speck128_xts_encrypt_neon)
419 _speck_xts_crypt n=64, decrypting=0
420ENDPROC(speck128_xts_encrypt_neon)
421
422ENTRY(speck128_xts_decrypt_neon)
423 _speck_xts_crypt n=64, decrypting=1
424ENDPROC(speck128_xts_decrypt_neon)
425
426ENTRY(speck64_xts_encrypt_neon)
427 _speck_xts_crypt n=32, decrypting=0
428ENDPROC(speck64_xts_encrypt_neon)
429
430ENTRY(speck64_xts_decrypt_neon)
431 _speck_xts_crypt n=32, decrypting=1
432ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c
new file mode 100644
index 000000000000..f012c3ea998f
--- /dev/null
+++ b/arch/arm/crypto/speck-neon-glue.c
@@ -0,0 +1,288 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Note: the NIST recommendation for XTS only specifies a 128-bit block size,
8 * but a 64-bit version (needed for Speck64) is fairly straightforward; the math
9 * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
10 * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
11 * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
12 * OCB and PMAC"), represented as 0x1B.
13 */
14
15#include <asm/hwcap.h>
16#include <asm/neon.h>
17#include <asm/simd.h>
18#include <crypto/algapi.h>
19#include <crypto/gf128mul.h>
20#include <crypto/internal/skcipher.h>
21#include <crypto/speck.h>
22#include <crypto/xts.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25
26/* The assembly functions only handle multiples of 128 bytes */
27#define SPECK_NEON_CHUNK_SIZE 128
28
29/* Speck128 */
30
31struct speck128_xts_tfm_ctx {
32 struct speck128_tfm_ctx main_key;
33 struct speck128_tfm_ctx tweak_key;
34};
35
36asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
37 void *dst, const void *src,
38 unsigned int nbytes, void *tweak);
39
40asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
41 void *dst, const void *src,
42 unsigned int nbytes, void *tweak);
43
44typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
45 u8 *, const u8 *);
46typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
47 const void *, unsigned int, void *);
48
49static __always_inline int
50__speck128_xts_crypt(struct skcipher_request *req,
51 speck128_crypt_one_t crypt_one,
52 speck128_xts_crypt_many_t crypt_many)
53{
54 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
55 const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
56 struct skcipher_walk walk;
57 le128 tweak;
58 int err;
59
60 err = skcipher_walk_virt(&walk, req, true);
61
62 crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
63
64 while (walk.nbytes > 0) {
65 unsigned int nbytes = walk.nbytes;
66 u8 *dst = walk.dst.virt.addr;
67 const u8 *src = walk.src.virt.addr;
68
69 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
70 unsigned int count;
71
72 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
73 kernel_neon_begin();
74 (*crypt_many)(ctx->main_key.round_keys,
75 ctx->main_key.nrounds,
76 dst, src, count, &tweak);
77 kernel_neon_end();
78 dst += count;
79 src += count;
80 nbytes -= count;
81 }
82
83 /* Handle any remainder with generic code */
84 while (nbytes >= sizeof(tweak)) {
85 le128_xor((le128 *)dst, (const le128 *)src, &tweak);
86 (*crypt_one)(&ctx->main_key, dst, dst);
87 le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
88 gf128mul_x_ble(&tweak, &tweak);
89
90 dst += sizeof(tweak);
91 src += sizeof(tweak);
92 nbytes -= sizeof(tweak);
93 }
94 err = skcipher_walk_done(&walk, nbytes);
95 }
96
97 return err;
98}
99
100static int speck128_xts_encrypt(struct skcipher_request *req)
101{
102 return __speck128_xts_crypt(req, crypto_speck128_encrypt,
103 speck128_xts_encrypt_neon);
104}
105
106static int speck128_xts_decrypt(struct skcipher_request *req)
107{
108 return __speck128_xts_crypt(req, crypto_speck128_decrypt,
109 speck128_xts_decrypt_neon);
110}
111
112static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
113 unsigned int keylen)
114{
115 struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
116 int err;
117
118 err = xts_verify_key(tfm, key, keylen);
119 if (err)
120 return err;
121
122 keylen /= 2;
123
124 err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
125 if (err)
126 return err;
127
128 return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
129}
130
131/* Speck64 */
132
133struct speck64_xts_tfm_ctx {
134 struct speck64_tfm_ctx main_key;
135 struct speck64_tfm_ctx tweak_key;
136};
137
138asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
139 void *dst, const void *src,
140 unsigned int nbytes, void *tweak);
141
142asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
143 void *dst, const void *src,
144 unsigned int nbytes, void *tweak);
145
146typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
147 u8 *, const u8 *);
148typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
149 const void *, unsigned int, void *);
150
151static __always_inline int
152__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
153 speck64_xts_crypt_many_t crypt_many)
154{
155 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
156 const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
157 struct skcipher_walk walk;
158 __le64 tweak;
159 int err;
160
161 err = skcipher_walk_virt(&walk, req, true);
162
163 crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
164
165 while (walk.nbytes > 0) {
166 unsigned int nbytes = walk.nbytes;
167 u8 *dst = walk.dst.virt.addr;
168 const u8 *src = walk.src.virt.addr;
169
170 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
171 unsigned int count;
172
173 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
174 kernel_neon_begin();
175 (*crypt_many)(ctx->main_key.round_keys,
176 ctx->main_key.nrounds,
177 dst, src, count, &tweak);
178 kernel_neon_end();
179 dst += count;
180 src += count;
181 nbytes -= count;
182 }
183
184 /* Handle any remainder with generic code */
185 while (nbytes >= sizeof(tweak)) {
186 *(__le64 *)dst = *(__le64 *)src ^ tweak;
187 (*crypt_one)(&ctx->main_key, dst, dst);
188 *(__le64 *)dst ^= tweak;
189 tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
190 ((tweak & cpu_to_le64(1ULL << 63)) ?
191 0x1B : 0));
192 dst += sizeof(tweak);
193 src += sizeof(tweak);
194 nbytes -= sizeof(tweak);
195 }
196 err = skcipher_walk_done(&walk, nbytes);
197 }
198
199 return err;
200}
201
202static int speck64_xts_encrypt(struct skcipher_request *req)
203{
204 return __speck64_xts_crypt(req, crypto_speck64_encrypt,
205 speck64_xts_encrypt_neon);
206}
207
208static int speck64_xts_decrypt(struct skcipher_request *req)
209{
210 return __speck64_xts_crypt(req, crypto_speck64_decrypt,
211 speck64_xts_decrypt_neon);
212}
213
214static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
215 unsigned int keylen)
216{
217 struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
218 int err;
219
220 err = xts_verify_key(tfm, key, keylen);
221 if (err)
222 return err;
223
224 keylen /= 2;
225
226 err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
227 if (err)
228 return err;
229
230 return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
231}
232
233static struct skcipher_alg speck_algs[] = {
234 {
235 .base.cra_name = "xts(speck128)",
236 .base.cra_driver_name = "xts-speck128-neon",
237 .base.cra_priority = 300,
238 .base.cra_blocksize = SPECK128_BLOCK_SIZE,
239 .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
240 .base.cra_alignmask = 7,
241 .base.cra_module = THIS_MODULE,
242 .min_keysize = 2 * SPECK128_128_KEY_SIZE,
243 .max_keysize = 2 * SPECK128_256_KEY_SIZE,
244 .ivsize = SPECK128_BLOCK_SIZE,
245 .walksize = SPECK_NEON_CHUNK_SIZE,
246 .setkey = speck128_xts_setkey,
247 .encrypt = speck128_xts_encrypt,
248 .decrypt = speck128_xts_decrypt,
249 }, {
250 .base.cra_name = "xts(speck64)",
251 .base.cra_driver_name = "xts-speck64-neon",
252 .base.cra_priority = 300,
253 .base.cra_blocksize = SPECK64_BLOCK_SIZE,
254 .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
255 .base.cra_alignmask = 7,
256 .base.cra_module = THIS_MODULE,
257 .min_keysize = 2 * SPECK64_96_KEY_SIZE,
258 .max_keysize = 2 * SPECK64_128_KEY_SIZE,
259 .ivsize = SPECK64_BLOCK_SIZE,
260 .walksize = SPECK_NEON_CHUNK_SIZE,
261 .setkey = speck64_xts_setkey,
262 .encrypt = speck64_xts_encrypt,
263 .decrypt = speck64_xts_decrypt,
264 }
265};
266
267static int __init speck_neon_module_init(void)
268{
269 if (!(elf_hwcap & HWCAP_NEON))
270 return -ENODEV;
271 return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
272}
273
274static void __exit speck_neon_module_exit(void)
275{
276 crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
277}
278
279module_init(speck_neon_module_init);
280module_exit(speck_neon_module_exit);
281
282MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
283MODULE_LICENSE("GPL");
284MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
285MODULE_ALIAS_CRYPTO("xts(speck128)");
286MODULE_ALIAS_CRYPTO("xts-speck128-neon");
287MODULE_ALIAS_CRYPTO("xts(speck64)");
288MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 285c36c7b408..cb5a243110c4 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -113,4 +113,10 @@ config CRYPTO_AES_ARM64_BS
113 select CRYPTO_AES_ARM64 113 select CRYPTO_AES_ARM64
114 select CRYPTO_SIMD 114 select CRYPTO_SIMD
115 115
116config CRYPTO_SPECK_NEON
117 tristate "NEON accelerated Speck cipher algorithms"
118 depends on KERNEL_MODE_NEON
119 select CRYPTO_BLKCIPHER
120 select CRYPTO_SPECK
121
116endif 122endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index cee9b8d9830b..8df9f326f449 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -53,20 +53,21 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
53obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o 53obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
54chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o 54chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
55 55
56obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
57speck-neon-y := speck-neon-core.o speck-neon-glue.o
58
56obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o 59obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
57aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o 60aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
58 61
59obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o 62obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o
60aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o 63aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
61 64
62AFLAGS_aes-ce.o := -DINTERLEAVE=4
63AFLAGS_aes-neon.o := -DINTERLEAVE=4
64
65CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS 65CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
66 66
67$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE 67$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
68 $(call if_changed_rule,cc_o_c) 68 $(call if_changed_rule,cc_o_c)
69 69
70ifdef REGENERATE_ARM64_CRYPTO
70quiet_cmd_perlasm = PERLASM $@ 71quiet_cmd_perlasm = PERLASM $@
71 cmd_perlasm = $(PERL) $(<) void $(@) 72 cmd_perlasm = $(PERL) $(<) void $(@)
72 73
@@ -75,5 +76,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
75 76
76$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl 77$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
77 $(call cmd,perlasm) 78 $(call cmd,perlasm)
79endif
78 80
79.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S 81.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index a1254036f2b1..68b11aa690e4 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -107,11 +107,13 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
107} 107}
108 108
109static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], 109static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
110 u32 abytes, u32 *macp, bool use_neon) 110 u32 abytes, u32 *macp)
111{ 111{
112 if (likely(use_neon)) { 112 if (may_use_simd()) {
113 kernel_neon_begin();
113 ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, 114 ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
114 num_rounds(key)); 115 num_rounds(key));
116 kernel_neon_end();
115 } else { 117 } else {
116 if (*macp > 0 && *macp < AES_BLOCK_SIZE) { 118 if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
117 int added = min(abytes, AES_BLOCK_SIZE - *macp); 119 int added = min(abytes, AES_BLOCK_SIZE - *macp);
@@ -143,8 +145,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
143 } 145 }
144} 146}
145 147
146static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[], 148static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
147 bool use_neon)
148{ 149{
149 struct crypto_aead *aead = crypto_aead_reqtfm(req); 150 struct crypto_aead *aead = crypto_aead_reqtfm(req);
150 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); 151 struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
@@ -163,7 +164,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
163 ltag.len = 6; 164 ltag.len = 6;
164 } 165 }
165 166
166 ccm_update_mac(ctx, mac, (u8 *)&ltag, ltag.len, &macp, use_neon); 167 ccm_update_mac(ctx, mac, (u8 *)&ltag, ltag.len, &macp);
167 scatterwalk_start(&walk, req->src); 168 scatterwalk_start(&walk, req->src);
168 169
169 do { 170 do {
@@ -175,7 +176,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
175 n = scatterwalk_clamp(&walk, len); 176 n = scatterwalk_clamp(&walk, len);
176 } 177 }
177 p = scatterwalk_map(&walk); 178 p = scatterwalk_map(&walk);
178 ccm_update_mac(ctx, mac, p, n, &macp, use_neon); 179 ccm_update_mac(ctx, mac, p, n, &macp);
179 len -= n; 180 len -= n;
180 181
181 scatterwalk_unmap(p); 182 scatterwalk_unmap(p);
@@ -242,43 +243,42 @@ static int ccm_encrypt(struct aead_request *req)
242 u8 __aligned(8) mac[AES_BLOCK_SIZE]; 243 u8 __aligned(8) mac[AES_BLOCK_SIZE];
243 u8 buf[AES_BLOCK_SIZE]; 244 u8 buf[AES_BLOCK_SIZE];
244 u32 len = req->cryptlen; 245 u32 len = req->cryptlen;
245 bool use_neon = may_use_simd();
246 int err; 246 int err;
247 247
248 err = ccm_init_mac(req, mac, len); 248 err = ccm_init_mac(req, mac, len);
249 if (err) 249 if (err)
250 return err; 250 return err;
251 251
252 if (likely(use_neon))
253 kernel_neon_begin();
254
255 if (req->assoclen) 252 if (req->assoclen)
256 ccm_calculate_auth_mac(req, mac, use_neon); 253 ccm_calculate_auth_mac(req, mac);
257 254
258 /* preserve the original iv for the final round */ 255 /* preserve the original iv for the final round */
259 memcpy(buf, req->iv, AES_BLOCK_SIZE); 256 memcpy(buf, req->iv, AES_BLOCK_SIZE);
260 257
261 err = skcipher_walk_aead_encrypt(&walk, req, true); 258 err = skcipher_walk_aead_encrypt(&walk, req, true);
262 259
263 if (likely(use_neon)) { 260 if (may_use_simd()) {
264 while (walk.nbytes) { 261 while (walk.nbytes) {
265 u32 tail = walk.nbytes % AES_BLOCK_SIZE; 262 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
266 263
267 if (walk.nbytes == walk.total) 264 if (walk.nbytes == walk.total)
268 tail = 0; 265 tail = 0;
269 266
267 kernel_neon_begin();
270 ce_aes_ccm_encrypt(walk.dst.virt.addr, 268 ce_aes_ccm_encrypt(walk.dst.virt.addr,
271 walk.src.virt.addr, 269 walk.src.virt.addr,
272 walk.nbytes - tail, ctx->key_enc, 270 walk.nbytes - tail, ctx->key_enc,
273 num_rounds(ctx), mac, walk.iv); 271 num_rounds(ctx), mac, walk.iv);
272 kernel_neon_end();
274 273
275 err = skcipher_walk_done(&walk, tail); 274 err = skcipher_walk_done(&walk, tail);
276 } 275 }
277 if (!err) 276 if (!err) {
277 kernel_neon_begin();
278 ce_aes_ccm_final(mac, buf, ctx->key_enc, 278 ce_aes_ccm_final(mac, buf, ctx->key_enc,
279 num_rounds(ctx)); 279 num_rounds(ctx));
280 280 kernel_neon_end();
281 kernel_neon_end(); 281 }
282 } else { 282 } else {
283 err = ccm_crypt_fallback(&walk, mac, buf, ctx, true); 283 err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
284 } 284 }
@@ -301,43 +301,42 @@ static int ccm_decrypt(struct aead_request *req)
301 u8 __aligned(8) mac[AES_BLOCK_SIZE]; 301 u8 __aligned(8) mac[AES_BLOCK_SIZE];
302 u8 buf[AES_BLOCK_SIZE]; 302 u8 buf[AES_BLOCK_SIZE];
303 u32 len = req->cryptlen - authsize; 303 u32 len = req->cryptlen - authsize;
304 bool use_neon = may_use_simd();
305 int err; 304 int err;
306 305
307 err = ccm_init_mac(req, mac, len); 306 err = ccm_init_mac(req, mac, len);
308 if (err) 307 if (err)
309 return err; 308 return err;
310 309
311 if (likely(use_neon))
312 kernel_neon_begin();
313
314 if (req->assoclen) 310 if (req->assoclen)
315 ccm_calculate_auth_mac(req, mac, use_neon); 311 ccm_calculate_auth_mac(req, mac);
316 312
317 /* preserve the original iv for the final round */ 313 /* preserve the original iv for the final round */
318 memcpy(buf, req->iv, AES_BLOCK_SIZE); 314 memcpy(buf, req->iv, AES_BLOCK_SIZE);
319 315
320 err = skcipher_walk_aead_decrypt(&walk, req, true); 316 err = skcipher_walk_aead_decrypt(&walk, req, true);
321 317
322 if (likely(use_neon)) { 318 if (may_use_simd()) {
323 while (walk.nbytes) { 319 while (walk.nbytes) {
324 u32 tail = walk.nbytes % AES_BLOCK_SIZE; 320 u32 tail = walk.nbytes % AES_BLOCK_SIZE;
325 321
326 if (walk.nbytes == walk.total) 322 if (walk.nbytes == walk.total)
327 tail = 0; 323 tail = 0;
328 324
325 kernel_neon_begin();
329 ce_aes_ccm_decrypt(walk.dst.virt.addr, 326 ce_aes_ccm_decrypt(walk.dst.virt.addr,
330 walk.src.virt.addr, 327 walk.src.virt.addr,
331 walk.nbytes - tail, ctx->key_enc, 328 walk.nbytes - tail, ctx->key_enc,
332 num_rounds(ctx), mac, walk.iv); 329 num_rounds(ctx), mac, walk.iv);
330 kernel_neon_end();
333 331
334 err = skcipher_walk_done(&walk, tail); 332 err = skcipher_walk_done(&walk, tail);
335 } 333 }
336 if (!err) 334 if (!err) {
335 kernel_neon_begin();
337 ce_aes_ccm_final(mac, buf, ctx->key_enc, 336 ce_aes_ccm_final(mac, buf, ctx->key_enc,
338 num_rounds(ctx)); 337 num_rounds(ctx));
339 338 kernel_neon_end();
340 kernel_neon_end(); 339 }
341 } else { 340 } else {
342 err = ccm_crypt_fallback(&walk, mac, buf, ctx, false); 341 err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
343 } 342 }
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 2fa850e86aa8..253188fb8cb0 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -64,17 +64,17 @@ MODULE_LICENSE("GPL v2");
64 64
65/* defined in aes-modes.S */ 65/* defined in aes-modes.S */
66asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], 66asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
67 int rounds, int blocks, int first); 67 int rounds, int blocks);
68asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], 68asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
69 int rounds, int blocks, int first); 69 int rounds, int blocks);
70 70
71asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], 71asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
72 int rounds, int blocks, u8 iv[], int first); 72 int rounds, int blocks, u8 iv[]);
73asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], 73asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
74 int rounds, int blocks, u8 iv[], int first); 74 int rounds, int blocks, u8 iv[]);
75 75
76asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 76asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
77 int rounds, int blocks, u8 ctr[], int first); 77 int rounds, int blocks, u8 ctr[]);
78 78
79asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], 79asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
80 int rounds, int blocks, u8 const rk2[], u8 iv[], 80 int rounds, int blocks, u8 const rk2[], u8 iv[],
@@ -133,19 +133,19 @@ static int ecb_encrypt(struct skcipher_request *req)
133{ 133{
134 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 134 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
135 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 135 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
136 int err, first, rounds = 6 + ctx->key_length / 4; 136 int err, rounds = 6 + ctx->key_length / 4;
137 struct skcipher_walk walk; 137 struct skcipher_walk walk;
138 unsigned int blocks; 138 unsigned int blocks;
139 139
140 err = skcipher_walk_virt(&walk, req, true); 140 err = skcipher_walk_virt(&walk, req, false);
141 141
142 kernel_neon_begin(); 142 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
143 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 143 kernel_neon_begin();
144 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 144 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
145 (u8 *)ctx->key_enc, rounds, blocks, first); 145 (u8 *)ctx->key_enc, rounds, blocks);
146 kernel_neon_end();
146 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 147 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
147 } 148 }
148 kernel_neon_end();
149 return err; 149 return err;
150} 150}
151 151
@@ -153,19 +153,19 @@ static int ecb_decrypt(struct skcipher_request *req)
153{ 153{
154 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 154 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
155 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 155 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
156 int err, first, rounds = 6 + ctx->key_length / 4; 156 int err, rounds = 6 + ctx->key_length / 4;
157 struct skcipher_walk walk; 157 struct skcipher_walk walk;
158 unsigned int blocks; 158 unsigned int blocks;
159 159
160 err = skcipher_walk_virt(&walk, req, true); 160 err = skcipher_walk_virt(&walk, req, false);
161 161
162 kernel_neon_begin(); 162 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
163 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 163 kernel_neon_begin();
164 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 164 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
165 (u8 *)ctx->key_dec, rounds, blocks, first); 165 (u8 *)ctx->key_dec, rounds, blocks);
166 kernel_neon_end();
166 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 167 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
167 } 168 }
168 kernel_neon_end();
169 return err; 169 return err;
170} 170}
171 171
@@ -173,20 +173,19 @@ static int cbc_encrypt(struct skcipher_request *req)
173{ 173{
174 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 174 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
175 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 175 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
176 int err, first, rounds = 6 + ctx->key_length / 4; 176 int err, rounds = 6 + ctx->key_length / 4;
177 struct skcipher_walk walk; 177 struct skcipher_walk walk;
178 unsigned int blocks; 178 unsigned int blocks;
179 179
180 err = skcipher_walk_virt(&walk, req, true); 180 err = skcipher_walk_virt(&walk, req, false);
181 181
182 kernel_neon_begin(); 182 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
183 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 183 kernel_neon_begin();
184 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 184 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
185 (u8 *)ctx->key_enc, rounds, blocks, walk.iv, 185 (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
186 first); 186 kernel_neon_end();
187 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 187 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
188 } 188 }
189 kernel_neon_end();
190 return err; 189 return err;
191} 190}
192 191
@@ -194,20 +193,19 @@ static int cbc_decrypt(struct skcipher_request *req)
194{ 193{
195 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 194 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
196 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 195 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
197 int err, first, rounds = 6 + ctx->key_length / 4; 196 int err, rounds = 6 + ctx->key_length / 4;
198 struct skcipher_walk walk; 197 struct skcipher_walk walk;
199 unsigned int blocks; 198 unsigned int blocks;
200 199
201 err = skcipher_walk_virt(&walk, req, true); 200 err = skcipher_walk_virt(&walk, req, false);
202 201
203 kernel_neon_begin(); 202 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
204 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 203 kernel_neon_begin();
205 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 204 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
206 (u8 *)ctx->key_dec, rounds, blocks, walk.iv, 205 (u8 *)ctx->key_dec, rounds, blocks, walk.iv);
207 first); 206 kernel_neon_end();
208 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 207 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
209 } 208 }
210 kernel_neon_end();
211 return err; 209 return err;
212} 210}
213 211
@@ -215,20 +213,18 @@ static int ctr_encrypt(struct skcipher_request *req)
215{ 213{
216 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 214 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
217 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 215 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
218 int err, first, rounds = 6 + ctx->key_length / 4; 216 int err, rounds = 6 + ctx->key_length / 4;
219 struct skcipher_walk walk; 217 struct skcipher_walk walk;
220 int blocks; 218 int blocks;
221 219
222 err = skcipher_walk_virt(&walk, req, true); 220 err = skcipher_walk_virt(&walk, req, false);
223 221
224 first = 1;
225 kernel_neon_begin();
226 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { 222 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
223 kernel_neon_begin();
227 aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 224 aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
228 (u8 *)ctx->key_enc, rounds, blocks, walk.iv, 225 (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
229 first);
230 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 226 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
231 first = 0; 227 kernel_neon_end();
232 } 228 }
233 if (walk.nbytes) { 229 if (walk.nbytes) {
234 u8 __aligned(8) tail[AES_BLOCK_SIZE]; 230 u8 __aligned(8) tail[AES_BLOCK_SIZE];
@@ -241,12 +237,13 @@ static int ctr_encrypt(struct skcipher_request *req)
241 */ 237 */
242 blocks = -1; 238 blocks = -1;
243 239
240 kernel_neon_begin();
244 aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds, 241 aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds,
245 blocks, walk.iv, first); 242 blocks, walk.iv);
243 kernel_neon_end();
246 crypto_xor_cpy(tdst, tsrc, tail, nbytes); 244 crypto_xor_cpy(tdst, tsrc, tail, nbytes);
247 err = skcipher_walk_done(&walk, 0); 245 err = skcipher_walk_done(&walk, 0);
248 } 246 }
249 kernel_neon_end();
250 247
251 return err; 248 return err;
252} 249}
@@ -270,16 +267,16 @@ static int xts_encrypt(struct skcipher_request *req)
270 struct skcipher_walk walk; 267 struct skcipher_walk walk;
271 unsigned int blocks; 268 unsigned int blocks;
272 269
273 err = skcipher_walk_virt(&walk, req, true); 270 err = skcipher_walk_virt(&walk, req, false);
274 271
275 kernel_neon_begin();
276 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 272 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
273 kernel_neon_begin();
277 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 274 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
278 (u8 *)ctx->key1.key_enc, rounds, blocks, 275 (u8 *)ctx->key1.key_enc, rounds, blocks,
279 (u8 *)ctx->key2.key_enc, walk.iv, first); 276 (u8 *)ctx->key2.key_enc, walk.iv, first);
277 kernel_neon_end();
280 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 278 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
281 } 279 }
282 kernel_neon_end();
283 280
284 return err; 281 return err;
285} 282}
@@ -292,16 +289,16 @@ static int xts_decrypt(struct skcipher_request *req)
292 struct skcipher_walk walk; 289 struct skcipher_walk walk;
293 unsigned int blocks; 290 unsigned int blocks;
294 291
295 err = skcipher_walk_virt(&walk, req, true); 292 err = skcipher_walk_virt(&walk, req, false);
296 293
297 kernel_neon_begin();
298 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 294 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
295 kernel_neon_begin();
299 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 296 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
300 (u8 *)ctx->key1.key_dec, rounds, blocks, 297 (u8 *)ctx->key1.key_dec, rounds, blocks,
301 (u8 *)ctx->key2.key_enc, walk.iv, first); 298 (u8 *)ctx->key2.key_enc, walk.iv, first);
299 kernel_neon_end();
302 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 300 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
303 } 301 }
304 kernel_neon_end();
305 302
306 return err; 303 return err;
307} 304}
@@ -425,7 +422,7 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
425 422
426 /* encrypt the zero vector */ 423 /* encrypt the zero vector */
427 kernel_neon_begin(); 424 kernel_neon_begin();
428 aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1, 1); 425 aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1);
429 kernel_neon_end(); 426 kernel_neon_end();
430 427
431 cmac_gf128_mul_by_x(consts, consts); 428 cmac_gf128_mul_by_x(consts, consts);
@@ -454,8 +451,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
454 return err; 451 return err;
455 452
456 kernel_neon_begin(); 453 kernel_neon_begin();
457 aes_ecb_encrypt(key, ks[0], rk, rounds, 1, 1); 454 aes_ecb_encrypt(key, ks[0], rk, rounds, 1);
458 aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2, 0); 455 aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2);
459 kernel_neon_end(); 456 kernel_neon_end();
460 457
461 return cbcmac_setkey(tfm, key, sizeof(key)); 458 return cbcmac_setkey(tfm, key, sizeof(key));
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 2674d43d1384..a68412e1e3a4 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -13,127 +13,39 @@
13 .text 13 .text
14 .align 4 14 .align 4
15 15
16/*
17 * There are several ways to instantiate this code:
18 * - no interleave, all inline
19 * - 2-way interleave, 2x calls out of line (-DINTERLEAVE=2)
20 * - 2-way interleave, all inline (-DINTERLEAVE=2 -DINTERLEAVE_INLINE)
21 * - 4-way interleave, 4x calls out of line (-DINTERLEAVE=4)
22 * - 4-way interleave, all inline (-DINTERLEAVE=4 -DINTERLEAVE_INLINE)
23 *
24 * Macros imported by this code:
25 * - enc_prepare - setup NEON registers for encryption
26 * - dec_prepare - setup NEON registers for decryption
27 * - enc_switch_key - change to new key after having prepared for encryption
28 * - encrypt_block - encrypt a single block
29 * - decrypt block - decrypt a single block
30 * - encrypt_block2x - encrypt 2 blocks in parallel (if INTERLEAVE == 2)
31 * - decrypt_block2x - decrypt 2 blocks in parallel (if INTERLEAVE == 2)
32 * - encrypt_block4x - encrypt 4 blocks in parallel (if INTERLEAVE == 4)
33 * - decrypt_block4x - decrypt 4 blocks in parallel (if INTERLEAVE == 4)
34 */
35
36#if defined(INTERLEAVE) && !defined(INTERLEAVE_INLINE)
37#define FRAME_PUSH stp x29, x30, [sp,#-16]! ; mov x29, sp
38#define FRAME_POP ldp x29, x30, [sp],#16
39
40#if INTERLEAVE == 2
41
42aes_encrypt_block2x:
43 encrypt_block2x v0, v1, w3, x2, x6, w7
44 ret
45ENDPROC(aes_encrypt_block2x)
46
47aes_decrypt_block2x:
48 decrypt_block2x v0, v1, w3, x2, x6, w7
49 ret
50ENDPROC(aes_decrypt_block2x)
51
52#elif INTERLEAVE == 4
53
54aes_encrypt_block4x: 16aes_encrypt_block4x:
55 encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 17 encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
56 ret 18 ret
57ENDPROC(aes_encrypt_block4x) 19ENDPROC(aes_encrypt_block4x)
58 20
59aes_decrypt_block4x: 21aes_decrypt_block4x:
60 decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 22 decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
61 ret 23 ret
62ENDPROC(aes_decrypt_block4x) 24ENDPROC(aes_decrypt_block4x)
63 25
64#else
65#error INTERLEAVE should equal 2 or 4
66#endif
67
68 .macro do_encrypt_block2x
69 bl aes_encrypt_block2x
70 .endm
71
72 .macro do_decrypt_block2x
73 bl aes_decrypt_block2x
74 .endm
75
76 .macro do_encrypt_block4x
77 bl aes_encrypt_block4x
78 .endm
79
80 .macro do_decrypt_block4x
81 bl aes_decrypt_block4x
82 .endm
83
84#else
85#define FRAME_PUSH
86#define FRAME_POP
87
88 .macro do_encrypt_block2x
89 encrypt_block2x v0, v1, w3, x2, x6, w7
90 .endm
91
92 .macro do_decrypt_block2x
93 decrypt_block2x v0, v1, w3, x2, x6, w7
94 .endm
95
96 .macro do_encrypt_block4x
97 encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
98 .endm
99
100 .macro do_decrypt_block4x
101 decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7
102 .endm
103
104#endif
105
106 /* 26 /*
107 * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 27 * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
108 * int blocks, int first) 28 * int blocks)
109 * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 29 * aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
110 * int blocks, int first) 30 * int blocks)
111 */ 31 */
112 32
113AES_ENTRY(aes_ecb_encrypt) 33AES_ENTRY(aes_ecb_encrypt)
114 FRAME_PUSH 34 stp x29, x30, [sp, #-16]!
115 cbz w5, .LecbencloopNx 35 mov x29, sp
116 36
117 enc_prepare w3, x2, x5 37 enc_prepare w3, x2, x5
118 38
119.LecbencloopNx: 39.LecbencloopNx:
120#if INTERLEAVE >= 2 40 subs w4, w4, #4
121 subs w4, w4, #INTERLEAVE
122 bmi .Lecbenc1x 41 bmi .Lecbenc1x
123#if INTERLEAVE == 2
124 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
125 do_encrypt_block2x
126 st1 {v0.16b-v1.16b}, [x0], #32
127#else
128 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ 42 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
129 do_encrypt_block4x 43 bl aes_encrypt_block4x
130 st1 {v0.16b-v3.16b}, [x0], #64 44 st1 {v0.16b-v3.16b}, [x0], #64
131#endif
132 b .LecbencloopNx 45 b .LecbencloopNx
133.Lecbenc1x: 46.Lecbenc1x:
134 adds w4, w4, #INTERLEAVE 47 adds w4, w4, #4
135 beq .Lecbencout 48 beq .Lecbencout
136#endif
137.Lecbencloop: 49.Lecbencloop:
138 ld1 {v0.16b}, [x1], #16 /* get next pt block */ 50 ld1 {v0.16b}, [x1], #16 /* get next pt block */
139 encrypt_block v0, w3, x2, x5, w6 51 encrypt_block v0, w3, x2, x5, w6
@@ -141,35 +53,27 @@ AES_ENTRY(aes_ecb_encrypt)
141 subs w4, w4, #1 53 subs w4, w4, #1
142 bne .Lecbencloop 54 bne .Lecbencloop
143.Lecbencout: 55.Lecbencout:
144 FRAME_POP 56 ldp x29, x30, [sp], #16
145 ret 57 ret
146AES_ENDPROC(aes_ecb_encrypt) 58AES_ENDPROC(aes_ecb_encrypt)
147 59
148 60
149AES_ENTRY(aes_ecb_decrypt) 61AES_ENTRY(aes_ecb_decrypt)
150 FRAME_PUSH 62 stp x29, x30, [sp, #-16]!
151 cbz w5, .LecbdecloopNx 63 mov x29, sp
152 64
153 dec_prepare w3, x2, x5 65 dec_prepare w3, x2, x5
154 66
155.LecbdecloopNx: 67.LecbdecloopNx:
156#if INTERLEAVE >= 2 68 subs w4, w4, #4
157 subs w4, w4, #INTERLEAVE
158 bmi .Lecbdec1x 69 bmi .Lecbdec1x
159#if INTERLEAVE == 2
160 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
161 do_decrypt_block2x
162 st1 {v0.16b-v1.16b}, [x0], #32
163#else
164 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ 70 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
165 do_decrypt_block4x 71 bl aes_decrypt_block4x
166 st1 {v0.16b-v3.16b}, [x0], #64 72 st1 {v0.16b-v3.16b}, [x0], #64
167#endif
168 b .LecbdecloopNx 73 b .LecbdecloopNx
169.Lecbdec1x: 74.Lecbdec1x:
170 adds w4, w4, #INTERLEAVE 75 adds w4, w4, #4
171 beq .Lecbdecout 76 beq .Lecbdecout
172#endif
173.Lecbdecloop: 77.Lecbdecloop:
174 ld1 {v0.16b}, [x1], #16 /* get next ct block */ 78 ld1 {v0.16b}, [x1], #16 /* get next ct block */
175 decrypt_block v0, w3, x2, x5, w6 79 decrypt_block v0, w3, x2, x5, w6
@@ -177,62 +81,68 @@ AES_ENTRY(aes_ecb_decrypt)
177 subs w4, w4, #1 81 subs w4, w4, #1
178 bne .Lecbdecloop 82 bne .Lecbdecloop
179.Lecbdecout: 83.Lecbdecout:
180 FRAME_POP 84 ldp x29, x30, [sp], #16
181 ret 85 ret
182AES_ENDPROC(aes_ecb_decrypt) 86AES_ENDPROC(aes_ecb_decrypt)
183 87
184 88
185 /* 89 /*
186 * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 90 * aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
187 * int blocks, u8 iv[], int first) 91 * int blocks, u8 iv[])
188 * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 92 * aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
189 * int blocks, u8 iv[], int first) 93 * int blocks, u8 iv[])
190 */ 94 */
191 95
192AES_ENTRY(aes_cbc_encrypt) 96AES_ENTRY(aes_cbc_encrypt)
193 cbz w6, .Lcbcencloop 97 ld1 {v4.16b}, [x5] /* get iv */
194
195 ld1 {v0.16b}, [x5] /* get iv */
196 enc_prepare w3, x2, x6 98 enc_prepare w3, x2, x6
197 99
198.Lcbcencloop: 100.Lcbcencloop4x:
199 ld1 {v1.16b}, [x1], #16 /* get next pt block */ 101 subs w4, w4, #4
200 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */ 102 bmi .Lcbcenc1x
103 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
104 eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
201 encrypt_block v0, w3, x2, x6, w7 105 encrypt_block v0, w3, x2, x6, w7
202 st1 {v0.16b}, [x0], #16 106 eor v1.16b, v1.16b, v0.16b
107 encrypt_block v1, w3, x2, x6, w7
108 eor v2.16b, v2.16b, v1.16b
109 encrypt_block v2, w3, x2, x6, w7
110 eor v3.16b, v3.16b, v2.16b
111 encrypt_block v3, w3, x2, x6, w7
112 st1 {v0.16b-v3.16b}, [x0], #64
113 mov v4.16b, v3.16b
114 b .Lcbcencloop4x
115.Lcbcenc1x:
116 adds w4, w4, #4
117 beq .Lcbcencout
118.Lcbcencloop:
119 ld1 {v0.16b}, [x1], #16 /* get next pt block */
120 eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
121 encrypt_block v4, w3, x2, x6, w7
122 st1 {v4.16b}, [x0], #16
203 subs w4, w4, #1 123 subs w4, w4, #1
204 bne .Lcbcencloop 124 bne .Lcbcencloop
205 st1 {v0.16b}, [x5] /* return iv */ 125.Lcbcencout:
126 st1 {v4.16b}, [x5] /* return iv */
206 ret 127 ret
207AES_ENDPROC(aes_cbc_encrypt) 128AES_ENDPROC(aes_cbc_encrypt)
208 129
209 130
210AES_ENTRY(aes_cbc_decrypt) 131AES_ENTRY(aes_cbc_decrypt)
211 FRAME_PUSH 132 stp x29, x30, [sp, #-16]!
212 cbz w6, .LcbcdecloopNx 133 mov x29, sp
213 134
214 ld1 {v7.16b}, [x5] /* get iv */ 135 ld1 {v7.16b}, [x5] /* get iv */
215 dec_prepare w3, x2, x6 136 dec_prepare w3, x2, x6
216 137
217.LcbcdecloopNx: 138.LcbcdecloopNx:
218#if INTERLEAVE >= 2 139 subs w4, w4, #4
219 subs w4, w4, #INTERLEAVE
220 bmi .Lcbcdec1x 140 bmi .Lcbcdec1x
221#if INTERLEAVE == 2
222 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
223 mov v2.16b, v0.16b
224 mov v3.16b, v1.16b
225 do_decrypt_block2x
226 eor v0.16b, v0.16b, v7.16b
227 eor v1.16b, v1.16b, v2.16b
228 mov v7.16b, v3.16b
229 st1 {v0.16b-v1.16b}, [x0], #32
230#else
231 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ 141 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
232 mov v4.16b, v0.16b 142 mov v4.16b, v0.16b
233 mov v5.16b, v1.16b 143 mov v5.16b, v1.16b
234 mov v6.16b, v2.16b 144 mov v6.16b, v2.16b
235 do_decrypt_block4x 145 bl aes_decrypt_block4x
236 sub x1, x1, #16 146 sub x1, x1, #16
237 eor v0.16b, v0.16b, v7.16b 147 eor v0.16b, v0.16b, v7.16b
238 eor v1.16b, v1.16b, v4.16b 148 eor v1.16b, v1.16b, v4.16b
@@ -240,12 +150,10 @@ AES_ENTRY(aes_cbc_decrypt)
240 eor v2.16b, v2.16b, v5.16b 150 eor v2.16b, v2.16b, v5.16b
241 eor v3.16b, v3.16b, v6.16b 151 eor v3.16b, v3.16b, v6.16b
242 st1 {v0.16b-v3.16b}, [x0], #64 152 st1 {v0.16b-v3.16b}, [x0], #64
243#endif
244 b .LcbcdecloopNx 153 b .LcbcdecloopNx
245.Lcbcdec1x: 154.Lcbcdec1x:
246 adds w4, w4, #INTERLEAVE 155 adds w4, w4, #4
247 beq .Lcbcdecout 156 beq .Lcbcdecout
248#endif
249.Lcbcdecloop: 157.Lcbcdecloop:
250 ld1 {v1.16b}, [x1], #16 /* get next ct block */ 158 ld1 {v1.16b}, [x1], #16 /* get next ct block */
251 mov v0.16b, v1.16b /* ...and copy to v0 */ 159 mov v0.16b, v1.16b /* ...and copy to v0 */
@@ -256,49 +164,33 @@ AES_ENTRY(aes_cbc_decrypt)
256 subs w4, w4, #1 164 subs w4, w4, #1
257 bne .Lcbcdecloop 165 bne .Lcbcdecloop
258.Lcbcdecout: 166.Lcbcdecout:
259 FRAME_POP
260 st1 {v7.16b}, [x5] /* return iv */ 167 st1 {v7.16b}, [x5] /* return iv */
168 ldp x29, x30, [sp], #16
261 ret 169 ret
262AES_ENDPROC(aes_cbc_decrypt) 170AES_ENDPROC(aes_cbc_decrypt)
263 171
264 172
265 /* 173 /*
266 * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 174 * aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
267 * int blocks, u8 ctr[], int first) 175 * int blocks, u8 ctr[])
268 */ 176 */
269 177
270AES_ENTRY(aes_ctr_encrypt) 178AES_ENTRY(aes_ctr_encrypt)
271 FRAME_PUSH 179 stp x29, x30, [sp, #-16]!
272 cbz w6, .Lctrnotfirst /* 1st time around? */ 180 mov x29, sp
181
273 enc_prepare w3, x2, x6 182 enc_prepare w3, x2, x6
274 ld1 {v4.16b}, [x5] 183 ld1 {v4.16b}, [x5]
275 184
276.Lctrnotfirst: 185 umov x6, v4.d[1] /* keep swabbed ctr in reg */
277 umov x8, v4.d[1] /* keep swabbed ctr in reg */ 186 rev x6, x6
278 rev x8, x8 187 cmn w6, w4 /* 32 bit overflow? */
279#if INTERLEAVE >= 2
280 cmn w8, w4 /* 32 bit overflow? */
281 bcs .Lctrloop 188 bcs .Lctrloop
282.LctrloopNx: 189.LctrloopNx:
283 subs w4, w4, #INTERLEAVE 190 subs w4, w4, #4
284 bmi .Lctr1x 191 bmi .Lctr1x
285#if INTERLEAVE == 2
286 mov v0.8b, v4.8b
287 mov v1.8b, v4.8b
288 rev x7, x8
289 add x8, x8, #1
290 ins v0.d[1], x7
291 rev x7, x8
292 add x8, x8, #1
293 ins v1.d[1], x7
294 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
295 do_encrypt_block2x
296 eor v0.16b, v0.16b, v2.16b
297 eor v1.16b, v1.16b, v3.16b
298 st1 {v0.16b-v1.16b}, [x0], #32
299#else
300 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ 192 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
301 dup v7.4s, w8 193 dup v7.4s, w6
302 mov v0.16b, v4.16b 194 mov v0.16b, v4.16b
303 add v7.4s, v7.4s, v8.4s 195 add v7.4s, v7.4s, v8.4s
304 mov v1.16b, v4.16b 196 mov v1.16b, v4.16b
@@ -309,29 +201,27 @@ AES_ENTRY(aes_ctr_encrypt)
309 mov v2.s[3], v8.s[1] 201 mov v2.s[3], v8.s[1]
310 mov v3.s[3], v8.s[2] 202 mov v3.s[3], v8.s[2]
311 ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */ 203 ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
312 do_encrypt_block4x 204 bl aes_encrypt_block4x
313 eor v0.16b, v5.16b, v0.16b 205 eor v0.16b, v5.16b, v0.16b
314 ld1 {v5.16b}, [x1], #16 /* get 1 input block */ 206 ld1 {v5.16b}, [x1], #16 /* get 1 input block */
315 eor v1.16b, v6.16b, v1.16b 207 eor v1.16b, v6.16b, v1.16b
316 eor v2.16b, v7.16b, v2.16b 208 eor v2.16b, v7.16b, v2.16b
317 eor v3.16b, v5.16b, v3.16b 209 eor v3.16b, v5.16b, v3.16b
318 st1 {v0.16b-v3.16b}, [x0], #64 210 st1 {v0.16b-v3.16b}, [x0], #64
319 add x8, x8, #INTERLEAVE 211 add x6, x6, #4
320#endif 212 rev x7, x6
321 rev x7, x8
322 ins v4.d[1], x7 213 ins v4.d[1], x7
323 cbz w4, .Lctrout 214 cbz w4, .Lctrout
324 b .LctrloopNx 215 b .LctrloopNx
325.Lctr1x: 216.Lctr1x:
326 adds w4, w4, #INTERLEAVE 217 adds w4, w4, #4
327 beq .Lctrout 218 beq .Lctrout
328#endif
329.Lctrloop: 219.Lctrloop:
330 mov v0.16b, v4.16b 220 mov v0.16b, v4.16b
331 encrypt_block v0, w3, x2, x6, w7 221 encrypt_block v0, w3, x2, x8, w7
332 222
333 adds x8, x8, #1 /* increment BE ctr */ 223 adds x6, x6, #1 /* increment BE ctr */
334 rev x7, x8 224 rev x7, x6
335 ins v4.d[1], x7 225 ins v4.d[1], x7
336 bcs .Lctrcarry /* overflow? */ 226 bcs .Lctrcarry /* overflow? */
337 227
@@ -345,12 +235,12 @@ AES_ENTRY(aes_ctr_encrypt)
345 235
346.Lctrout: 236.Lctrout:
347 st1 {v4.16b}, [x5] /* return next CTR value */ 237 st1 {v4.16b}, [x5] /* return next CTR value */
348 FRAME_POP 238 ldp x29, x30, [sp], #16
349 ret 239 ret
350 240
351.Lctrtailblock: 241.Lctrtailblock:
352 st1 {v0.16b}, [x0] 242 st1 {v0.16b}, [x0]
353 FRAME_POP 243 ldp x29, x30, [sp], #16
354 ret 244 ret
355 245
356.Lctrcarry: 246.Lctrcarry:
@@ -384,39 +274,26 @@ CPU_LE( .quad 1, 0x87 )
384CPU_BE( .quad 0x87, 1 ) 274CPU_BE( .quad 0x87, 1 )
385 275
386AES_ENTRY(aes_xts_encrypt) 276AES_ENTRY(aes_xts_encrypt)
387 FRAME_PUSH 277 stp x29, x30, [sp, #-16]!
388 cbz w7, .LxtsencloopNx 278 mov x29, sp
389 279
390 ld1 {v4.16b}, [x6] 280 ld1 {v4.16b}, [x6]
391 enc_prepare w3, x5, x6 281 cbz w7, .Lxtsencnotfirst
392 encrypt_block v4, w3, x5, x6, w7 /* first tweak */ 282
393 enc_switch_key w3, x2, x6 283 enc_prepare w3, x5, x8
284 encrypt_block v4, w3, x5, x8, w7 /* first tweak */
285 enc_switch_key w3, x2, x8
394 ldr q7, .Lxts_mul_x 286 ldr q7, .Lxts_mul_x
395 b .LxtsencNx 287 b .LxtsencNx
396 288
289.Lxtsencnotfirst:
290 enc_prepare w3, x2, x8
397.LxtsencloopNx: 291.LxtsencloopNx:
398 ldr q7, .Lxts_mul_x 292 ldr q7, .Lxts_mul_x
399 next_tweak v4, v4, v7, v8 293 next_tweak v4, v4, v7, v8
400.LxtsencNx: 294.LxtsencNx:
401#if INTERLEAVE >= 2 295 subs w4, w4, #4
402 subs w4, w4, #INTERLEAVE
403 bmi .Lxtsenc1x 296 bmi .Lxtsenc1x
404#if INTERLEAVE == 2
405 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */
406 next_tweak v5, v4, v7, v8
407 eor v0.16b, v0.16b, v4.16b
408 eor v1.16b, v1.16b, v5.16b
409 do_encrypt_block2x
410 eor v0.16b, v0.16b, v4.16b
411 eor v1.16b, v1.16b, v5.16b
412 st1 {v0.16b-v1.16b}, [x0], #32
413 cbz w4, .LxtsencoutNx
414 next_tweak v4, v5, v7, v8
415 b .LxtsencNx
416.LxtsencoutNx:
417 mov v4.16b, v5.16b
418 b .Lxtsencout
419#else
420 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ 297 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
421 next_tweak v5, v4, v7, v8 298 next_tweak v5, v4, v7, v8
422 eor v0.16b, v0.16b, v4.16b 299 eor v0.16b, v0.16b, v4.16b
@@ -425,7 +302,7 @@ AES_ENTRY(aes_xts_encrypt)
425 eor v2.16b, v2.16b, v6.16b 302 eor v2.16b, v2.16b, v6.16b
426 next_tweak v7, v6, v7, v8 303 next_tweak v7, v6, v7, v8
427 eor v3.16b, v3.16b, v7.16b 304 eor v3.16b, v3.16b, v7.16b
428 do_encrypt_block4x 305 bl aes_encrypt_block4x
429 eor v3.16b, v3.16b, v7.16b 306 eor v3.16b, v3.16b, v7.16b
430 eor v0.16b, v0.16b, v4.16b 307 eor v0.16b, v0.16b, v4.16b
431 eor v1.16b, v1.16b, v5.16b 308 eor v1.16b, v1.16b, v5.16b
@@ -434,15 +311,13 @@ AES_ENTRY(aes_xts_encrypt)
434 mov v4.16b, v7.16b 311 mov v4.16b, v7.16b
435 cbz w4, .Lxtsencout 312 cbz w4, .Lxtsencout
436 b .LxtsencloopNx 313 b .LxtsencloopNx
437#endif
438.Lxtsenc1x: 314.Lxtsenc1x:
439 adds w4, w4, #INTERLEAVE 315 adds w4, w4, #4
440 beq .Lxtsencout 316 beq .Lxtsencout
441#endif
442.Lxtsencloop: 317.Lxtsencloop:
443 ld1 {v1.16b}, [x1], #16 318 ld1 {v1.16b}, [x1], #16
444 eor v0.16b, v1.16b, v4.16b 319 eor v0.16b, v1.16b, v4.16b
445 encrypt_block v0, w3, x2, x6, w7 320 encrypt_block v0, w3, x2, x8, w7
446 eor v0.16b, v0.16b, v4.16b 321 eor v0.16b, v0.16b, v4.16b
447 st1 {v0.16b}, [x0], #16 322 st1 {v0.16b}, [x0], #16
448 subs w4, w4, #1 323 subs w4, w4, #1
@@ -450,45 +325,33 @@ AES_ENTRY(aes_xts_encrypt)
450 next_tweak v4, v4, v7, v8 325 next_tweak v4, v4, v7, v8
451 b .Lxtsencloop 326 b .Lxtsencloop
452.Lxtsencout: 327.Lxtsencout:
453 FRAME_POP 328 st1 {v4.16b}, [x6]
329 ldp x29, x30, [sp], #16
454 ret 330 ret
455AES_ENDPROC(aes_xts_encrypt) 331AES_ENDPROC(aes_xts_encrypt)
456 332
457 333
458AES_ENTRY(aes_xts_decrypt) 334AES_ENTRY(aes_xts_decrypt)
459 FRAME_PUSH 335 stp x29, x30, [sp, #-16]!
460 cbz w7, .LxtsdecloopNx 336 mov x29, sp
461 337
462 ld1 {v4.16b}, [x6] 338 ld1 {v4.16b}, [x6]
463 enc_prepare w3, x5, x6 339 cbz w7, .Lxtsdecnotfirst
464 encrypt_block v4, w3, x5, x6, w7 /* first tweak */ 340
465 dec_prepare w3, x2, x6 341 enc_prepare w3, x5, x8
342 encrypt_block v4, w3, x5, x8, w7 /* first tweak */
343 dec_prepare w3, x2, x8
466 ldr q7, .Lxts_mul_x 344 ldr q7, .Lxts_mul_x
467 b .LxtsdecNx 345 b .LxtsdecNx
468 346
347.Lxtsdecnotfirst:
348 dec_prepare w3, x2, x8
469.LxtsdecloopNx: 349.LxtsdecloopNx:
470 ldr q7, .Lxts_mul_x 350 ldr q7, .Lxts_mul_x
471 next_tweak v4, v4, v7, v8 351 next_tweak v4, v4, v7, v8
472.LxtsdecNx: 352.LxtsdecNx:
473#if INTERLEAVE >= 2 353 subs w4, w4, #4
474 subs w4, w4, #INTERLEAVE
475 bmi .Lxtsdec1x 354 bmi .Lxtsdec1x
476#if INTERLEAVE == 2
477 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */
478 next_tweak v5, v4, v7, v8
479 eor v0.16b, v0.16b, v4.16b
480 eor v1.16b, v1.16b, v5.16b
481 do_decrypt_block2x
482 eor v0.16b, v0.16b, v4.16b
483 eor v1.16b, v1.16b, v5.16b
484 st1 {v0.16b-v1.16b}, [x0], #32
485 cbz w4, .LxtsdecoutNx
486 next_tweak v4, v5, v7, v8
487 b .LxtsdecNx
488.LxtsdecoutNx:
489 mov v4.16b, v5.16b
490 b .Lxtsdecout
491#else
492 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ 355 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
493 next_tweak v5, v4, v7, v8 356 next_tweak v5, v4, v7, v8
494 eor v0.16b, v0.16b, v4.16b 357 eor v0.16b, v0.16b, v4.16b
@@ -497,7 +360,7 @@ AES_ENTRY(aes_xts_decrypt)
497 eor v2.16b, v2.16b, v6.16b 360 eor v2.16b, v2.16b, v6.16b
498 next_tweak v7, v6, v7, v8 361 next_tweak v7, v6, v7, v8
499 eor v3.16b, v3.16b, v7.16b 362 eor v3.16b, v3.16b, v7.16b
500 do_decrypt_block4x 363 bl aes_decrypt_block4x
501 eor v3.16b, v3.16b, v7.16b 364 eor v3.16b, v3.16b, v7.16b
502 eor v0.16b, v0.16b, v4.16b 365 eor v0.16b, v0.16b, v4.16b
503 eor v1.16b, v1.16b, v5.16b 366 eor v1.16b, v1.16b, v5.16b
@@ -506,15 +369,13 @@ AES_ENTRY(aes_xts_decrypt)
506 mov v4.16b, v7.16b 369 mov v4.16b, v7.16b
507 cbz w4, .Lxtsdecout 370 cbz w4, .Lxtsdecout
508 b .LxtsdecloopNx 371 b .LxtsdecloopNx
509#endif
510.Lxtsdec1x: 372.Lxtsdec1x:
511 adds w4, w4, #INTERLEAVE 373 adds w4, w4, #4
512 beq .Lxtsdecout 374 beq .Lxtsdecout
513#endif
514.Lxtsdecloop: 375.Lxtsdecloop:
515 ld1 {v1.16b}, [x1], #16 376 ld1 {v1.16b}, [x1], #16
516 eor v0.16b, v1.16b, v4.16b 377 eor v0.16b, v1.16b, v4.16b
517 decrypt_block v0, w3, x2, x6, w7 378 decrypt_block v0, w3, x2, x8, w7
518 eor v0.16b, v0.16b, v4.16b 379 eor v0.16b, v0.16b, v4.16b
519 st1 {v0.16b}, [x0], #16 380 st1 {v0.16b}, [x0], #16
520 subs w4, w4, #1 381 subs w4, w4, #1
@@ -522,7 +383,8 @@ AES_ENTRY(aes_xts_decrypt)
522 next_tweak v4, v4, v7, v8 383 next_tweak v4, v4, v7, v8
523 b .Lxtsdecloop 384 b .Lxtsdecloop
524.Lxtsdecout: 385.Lxtsdecout:
525 FRAME_POP 386 st1 {v4.16b}, [x6]
387 ldp x29, x30, [sp], #16
526 ret 388 ret
527AES_ENDPROC(aes_xts_decrypt) 389AES_ENDPROC(aes_xts_decrypt)
528 390
@@ -533,8 +395,28 @@ AES_ENDPROC(aes_xts_decrypt)
533AES_ENTRY(aes_mac_update) 395AES_ENTRY(aes_mac_update)
534 ld1 {v0.16b}, [x4] /* get dg */ 396 ld1 {v0.16b}, [x4] /* get dg */
535 enc_prepare w2, x1, x7 397 enc_prepare w2, x1, x7
536 cbnz w5, .Lmacenc 398 cbz w5, .Lmacloop4x
399
400 encrypt_block v0, w2, x1, x7, w8
537 401
402.Lmacloop4x:
403 subs w3, w3, #4
404 bmi .Lmac1x
405 ld1 {v1.16b-v4.16b}, [x0], #64 /* get next pt block */
406 eor v0.16b, v0.16b, v1.16b /* ..and xor with dg */
407 encrypt_block v0, w2, x1, x7, w8
408 eor v0.16b, v0.16b, v2.16b
409 encrypt_block v0, w2, x1, x7, w8
410 eor v0.16b, v0.16b, v3.16b
411 encrypt_block v0, w2, x1, x7, w8
412 eor v0.16b, v0.16b, v4.16b
413 cmp w3, wzr
414 csinv x5, x6, xzr, eq
415 cbz w5, .Lmacout
416 encrypt_block v0, w2, x1, x7, w8
417 b .Lmacloop4x
418.Lmac1x:
419 add w3, w3, #4
538.Lmacloop: 420.Lmacloop:
539 cbz w3, .Lmacout 421 cbz w3, .Lmacout
540 ld1 {v1.16b}, [x0], #16 /* get next pt block */ 422 ld1 {v1.16b}, [x0], #16 /* get next pt block */
@@ -544,7 +426,6 @@ AES_ENTRY(aes_mac_update)
544 csinv x5, x6, xzr, eq 426 csinv x5, x6, xzr, eq
545 cbz w5, .Lmacout 427 cbz w5, .Lmacout
546 428
547.Lmacenc:
548 encrypt_block v0, w2, x1, x7, w8 429 encrypt_block v0, w2, x1, x7, w8
549 b .Lmacloop 430 b .Lmacloop
550 431
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index c55d68ccb89f..e7a95a566462 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -46,10 +46,9 @@ asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[],
46 46
47/* borrowed from aes-neon-blk.ko */ 47/* borrowed from aes-neon-blk.ko */
48asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], 48asmlinkage void neon_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
49 int rounds, int blocks, int first); 49 int rounds, int blocks);
50asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], 50asmlinkage void neon_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
51 int rounds, int blocks, u8 iv[], 51 int rounds, int blocks, u8 iv[]);
52 int first);
53 52
54struct aesbs_ctx { 53struct aesbs_ctx {
55 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32]; 54 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32];
@@ -100,9 +99,8 @@ static int __ecb_crypt(struct skcipher_request *req,
100 struct skcipher_walk walk; 99 struct skcipher_walk walk;
101 int err; 100 int err;
102 101
103 err = skcipher_walk_virt(&walk, req, true); 102 err = skcipher_walk_virt(&walk, req, false);
104 103
105 kernel_neon_begin();
106 while (walk.nbytes >= AES_BLOCK_SIZE) { 104 while (walk.nbytes >= AES_BLOCK_SIZE) {
107 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 105 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
108 106
@@ -110,12 +108,13 @@ static int __ecb_crypt(struct skcipher_request *req,
110 blocks = round_down(blocks, 108 blocks = round_down(blocks,
111 walk.stride / AES_BLOCK_SIZE); 109 walk.stride / AES_BLOCK_SIZE);
112 110
111 kernel_neon_begin();
113 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk, 112 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk,
114 ctx->rounds, blocks); 113 ctx->rounds, blocks);
114 kernel_neon_end();
115 err = skcipher_walk_done(&walk, 115 err = skcipher_walk_done(&walk,
116 walk.nbytes - blocks * AES_BLOCK_SIZE); 116 walk.nbytes - blocks * AES_BLOCK_SIZE);
117 } 117 }
118 kernel_neon_end();
119 118
120 return err; 119 return err;
121} 120}
@@ -157,22 +156,21 @@ static int cbc_encrypt(struct skcipher_request *req)
157 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 156 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
158 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 157 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
159 struct skcipher_walk walk; 158 struct skcipher_walk walk;
160 int err, first = 1; 159 int err;
161 160
162 err = skcipher_walk_virt(&walk, req, true); 161 err = skcipher_walk_virt(&walk, req, false);
163 162
164 kernel_neon_begin();
165 while (walk.nbytes >= AES_BLOCK_SIZE) { 163 while (walk.nbytes >= AES_BLOCK_SIZE) {
166 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 164 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
167 165
168 /* fall back to the non-bitsliced NEON implementation */ 166 /* fall back to the non-bitsliced NEON implementation */
167 kernel_neon_begin();
169 neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 168 neon_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
170 ctx->enc, ctx->key.rounds, blocks, walk.iv, 169 ctx->enc, ctx->key.rounds, blocks,
171 first); 170 walk.iv);
171 kernel_neon_end();
172 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); 172 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
173 first = 0;
174 } 173 }
175 kernel_neon_end();
176 return err; 174 return err;
177} 175}
178 176
@@ -183,9 +181,8 @@ static int cbc_decrypt(struct skcipher_request *req)
183 struct skcipher_walk walk; 181 struct skcipher_walk walk;
184 int err; 182 int err;
185 183
186 err = skcipher_walk_virt(&walk, req, true); 184 err = skcipher_walk_virt(&walk, req, false);
187 185
188 kernel_neon_begin();
189 while (walk.nbytes >= AES_BLOCK_SIZE) { 186 while (walk.nbytes >= AES_BLOCK_SIZE) {
190 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 187 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
191 188
@@ -193,13 +190,14 @@ static int cbc_decrypt(struct skcipher_request *req)
193 blocks = round_down(blocks, 190 blocks = round_down(blocks,
194 walk.stride / AES_BLOCK_SIZE); 191 walk.stride / AES_BLOCK_SIZE);
195 192
193 kernel_neon_begin();
196 aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 194 aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
197 ctx->key.rk, ctx->key.rounds, blocks, 195 ctx->key.rk, ctx->key.rounds, blocks,
198 walk.iv); 196 walk.iv);
197 kernel_neon_end();
199 err = skcipher_walk_done(&walk, 198 err = skcipher_walk_done(&walk,
200 walk.nbytes - blocks * AES_BLOCK_SIZE); 199 walk.nbytes - blocks * AES_BLOCK_SIZE);
201 } 200 }
202 kernel_neon_end();
203 201
204 return err; 202 return err;
205} 203}
@@ -231,9 +229,8 @@ static int ctr_encrypt(struct skcipher_request *req)
231 u8 buf[AES_BLOCK_SIZE]; 229 u8 buf[AES_BLOCK_SIZE];
232 int err; 230 int err;
233 231
234 err = skcipher_walk_virt(&walk, req, true); 232 err = skcipher_walk_virt(&walk, req, false);
235 233
236 kernel_neon_begin();
237 while (walk.nbytes > 0) { 234 while (walk.nbytes > 0) {
238 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 235 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
239 u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL; 236 u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL;
@@ -244,8 +241,10 @@ static int ctr_encrypt(struct skcipher_request *req)
244 final = NULL; 241 final = NULL;
245 } 242 }
246 243
244 kernel_neon_begin();
247 aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 245 aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
248 ctx->rk, ctx->rounds, blocks, walk.iv, final); 246 ctx->rk, ctx->rounds, blocks, walk.iv, final);
247 kernel_neon_end();
249 248
250 if (final) { 249 if (final) {
251 u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; 250 u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
@@ -260,8 +259,6 @@ static int ctr_encrypt(struct skcipher_request *req)
260 err = skcipher_walk_done(&walk, 259 err = skcipher_walk_done(&walk,
261 walk.nbytes - blocks * AES_BLOCK_SIZE); 260 walk.nbytes - blocks * AES_BLOCK_SIZE);
262 } 261 }
263 kernel_neon_end();
264
265 return err; 262 return err;
266} 263}
267 264
@@ -306,12 +303,11 @@ static int __xts_crypt(struct skcipher_request *req,
306 struct skcipher_walk walk; 303 struct skcipher_walk walk;
307 int err; 304 int err;
308 305
309 err = skcipher_walk_virt(&walk, req, true); 306 err = skcipher_walk_virt(&walk, req, false);
310 307
311 kernel_neon_begin(); 308 kernel_neon_begin();
312 309 neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
313 neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, 310 kernel_neon_end();
314 ctx->key.rounds, 1, 1);
315 311
316 while (walk.nbytes >= AES_BLOCK_SIZE) { 312 while (walk.nbytes >= AES_BLOCK_SIZE) {
317 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 313 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -320,13 +316,13 @@ static int __xts_crypt(struct skcipher_request *req,
320 blocks = round_down(blocks, 316 blocks = round_down(blocks,
321 walk.stride / AES_BLOCK_SIZE); 317 walk.stride / AES_BLOCK_SIZE);
322 318
319 kernel_neon_begin();
323 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, 320 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
324 ctx->key.rounds, blocks, walk.iv); 321 ctx->key.rounds, blocks, walk.iv);
322 kernel_neon_end();
325 err = skcipher_walk_done(&walk, 323 err = skcipher_walk_done(&walk,
326 walk.nbytes - blocks * AES_BLOCK_SIZE); 324 walk.nbytes - blocks * AES_BLOCK_SIZE);
327 } 325 }
328 kernel_neon_end();
329
330 return err; 326 return err;
331} 327}
332 328
diff --git a/arch/arm64/crypto/chacha20-neon-glue.c b/arch/arm64/crypto/chacha20-neon-glue.c
index cbdb75d15cd0..727579c93ded 100644
--- a/arch/arm64/crypto/chacha20-neon-glue.c
+++ b/arch/arm64/crypto/chacha20-neon-glue.c
@@ -37,12 +37,19 @@ static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src,
37 u8 buf[CHACHA20_BLOCK_SIZE]; 37 u8 buf[CHACHA20_BLOCK_SIZE];
38 38
39 while (bytes >= CHACHA20_BLOCK_SIZE * 4) { 39 while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
40 kernel_neon_begin();
40 chacha20_4block_xor_neon(state, dst, src); 41 chacha20_4block_xor_neon(state, dst, src);
42 kernel_neon_end();
41 bytes -= CHACHA20_BLOCK_SIZE * 4; 43 bytes -= CHACHA20_BLOCK_SIZE * 4;
42 src += CHACHA20_BLOCK_SIZE * 4; 44 src += CHACHA20_BLOCK_SIZE * 4;
43 dst += CHACHA20_BLOCK_SIZE * 4; 45 dst += CHACHA20_BLOCK_SIZE * 4;
44 state[12] += 4; 46 state[12] += 4;
45 } 47 }
48
49 if (!bytes)
50 return;
51
52 kernel_neon_begin();
46 while (bytes >= CHACHA20_BLOCK_SIZE) { 53 while (bytes >= CHACHA20_BLOCK_SIZE) {
47 chacha20_block_xor_neon(state, dst, src); 54 chacha20_block_xor_neon(state, dst, src);
48 bytes -= CHACHA20_BLOCK_SIZE; 55 bytes -= CHACHA20_BLOCK_SIZE;
@@ -55,6 +62,7 @@ static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src,
55 chacha20_block_xor_neon(state, buf, buf); 62 chacha20_block_xor_neon(state, buf, buf);
56 memcpy(dst, buf, bytes); 63 memcpy(dst, buf, bytes);
57 } 64 }
65 kernel_neon_end();
58} 66}
59 67
60static int chacha20_neon(struct skcipher_request *req) 68static int chacha20_neon(struct skcipher_request *req)
@@ -68,11 +76,10 @@ static int chacha20_neon(struct skcipher_request *req)
68 if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE) 76 if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE)
69 return crypto_chacha20_crypt(req); 77 return crypto_chacha20_crypt(req);
70 78
71 err = skcipher_walk_virt(&walk, req, true); 79 err = skcipher_walk_virt(&walk, req, false);
72 80
73 crypto_chacha20_init(state, ctx, walk.iv); 81 crypto_chacha20_init(state, ctx, walk.iv);
74 82
75 kernel_neon_begin();
76 while (walk.nbytes > 0) { 83 while (walk.nbytes > 0) {
77 unsigned int nbytes = walk.nbytes; 84 unsigned int nbytes = walk.nbytes;
78 85
@@ -83,7 +90,6 @@ static int chacha20_neon(struct skcipher_request *req)
83 nbytes); 90 nbytes);
84 err = skcipher_walk_done(&walk, walk.nbytes - nbytes); 91 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
85 } 92 }
86 kernel_neon_end();
87 93
88 return err; 94 return err;
89} 95}
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index b064d925fe2a..e8880ccdc71f 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -89,21 +89,32 @@ static struct shash_alg algs[] = { {
89static int sha256_update_neon(struct shash_desc *desc, const u8 *data, 89static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
90 unsigned int len) 90 unsigned int len)
91{ 91{
92 /* 92 struct sha256_state *sctx = shash_desc_ctx(desc);
93 * Stacking and unstacking a substantial slice of the NEON register 93
94 * file may significantly affect performance for small updates when
95 * executing in interrupt context, so fall back to the scalar code
96 * in that case.
97 */
98 if (!may_use_simd()) 94 if (!may_use_simd())
99 return sha256_base_do_update(desc, data, len, 95 return sha256_base_do_update(desc, data, len,
100 (sha256_block_fn *)sha256_block_data_order); 96 (sha256_block_fn *)sha256_block_data_order);
101 97
102 kernel_neon_begin(); 98 while (len > 0) {
103 sha256_base_do_update(desc, data, len, 99 unsigned int chunk = len;
104 (sha256_block_fn *)sha256_block_neon); 100
105 kernel_neon_end(); 101 /*
102 * Don't hog the CPU for the entire time it takes to process all
103 * input when running on a preemptible kernel, but process the
104 * data block by block instead.
105 */
106 if (IS_ENABLED(CONFIG_PREEMPT) &&
107 chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
108 chunk = SHA256_BLOCK_SIZE -
109 sctx->count % SHA256_BLOCK_SIZE;
106 110
111 kernel_neon_begin();
112 sha256_base_do_update(desc, data, chunk,
113 (sha256_block_fn *)sha256_block_neon);
114 kernel_neon_end();
115 data += chunk;
116 len -= chunk;
117 }
107 return 0; 118 return 0;
108} 119}
109 120
@@ -117,10 +128,9 @@ static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
117 sha256_base_do_finalize(desc, 128 sha256_base_do_finalize(desc,
118 (sha256_block_fn *)sha256_block_data_order); 129 (sha256_block_fn *)sha256_block_data_order);
119 } else { 130 } else {
120 kernel_neon_begin();
121 if (len) 131 if (len)
122 sha256_base_do_update(desc, data, len, 132 sha256_update_neon(desc, data, len);
123 (sha256_block_fn *)sha256_block_neon); 133 kernel_neon_begin();
124 sha256_base_do_finalize(desc, 134 sha256_base_do_finalize(desc,
125 (sha256_block_fn *)sha256_block_neon); 135 (sha256_block_fn *)sha256_block_neon);
126 kernel_neon_end(); 136 kernel_neon_end();
diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S
new file mode 100644
index 000000000000..b14463438b09
--- /dev/null
+++ b/arch/arm64/crypto/speck-neon-core.S
@@ -0,0 +1,352 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Author: Eric Biggers <ebiggers@google.com>
8 */
9
10#include <linux/linkage.h>
11
12 .text
13
14 // arguments
15 ROUND_KEYS .req x0 // const {u64,u32} *round_keys
16 NROUNDS .req w1 // int nrounds
17 NROUNDS_X .req x1
18 DST .req x2 // void *dst
19 SRC .req x3 // const void *src
20 NBYTES .req w4 // unsigned int nbytes
21 TWEAK .req x5 // void *tweak
22
23 // registers which hold the data being encrypted/decrypted
24 // (underscores avoid a naming collision with ARM64 registers x0-x3)
25 X_0 .req v0
26 Y_0 .req v1
27 X_1 .req v2
28 Y_1 .req v3
29 X_2 .req v4
30 Y_2 .req v5
31 X_3 .req v6
32 Y_3 .req v7
33
34 // the round key, duplicated in all lanes
35 ROUND_KEY .req v8
36
37 // index vector for tbl-based 8-bit rotates
38 ROTATE_TABLE .req v9
39 ROTATE_TABLE_Q .req q9
40
41 // temporary registers
42 TMP0 .req v10
43 TMP1 .req v11
44 TMP2 .req v12
45 TMP3 .req v13
46
47 // multiplication table for updating XTS tweaks
48 GFMUL_TABLE .req v14
49 GFMUL_TABLE_Q .req q14
50
51 // next XTS tweak value(s)
52 TWEAKV_NEXT .req v15
53
54 // XTS tweaks for the blocks currently being encrypted/decrypted
55 TWEAKV0 .req v16
56 TWEAKV1 .req v17
57 TWEAKV2 .req v18
58 TWEAKV3 .req v19
59 TWEAKV4 .req v20
60 TWEAKV5 .req v21
61 TWEAKV6 .req v22
62 TWEAKV7 .req v23
63
64 .align 4
65.Lror64_8_table:
66 .octa 0x080f0e0d0c0b0a090007060504030201
67.Lror32_8_table:
68 .octa 0x0c0f0e0d080b0a090407060500030201
69.Lrol64_8_table:
70 .octa 0x0e0d0c0b0a09080f0605040302010007
71.Lrol32_8_table:
72 .octa 0x0e0d0c0f0a09080b0605040702010003
73.Lgf128mul_table:
74 .octa 0x00000000000000870000000000000001
75.Lgf64mul_table:
76 .octa 0x0000000000000000000000002d361b00
77
78/*
79 * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
80 *
81 * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
82 * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
83 * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
84 * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
85 */
86.macro _speck_round_128bytes n, lanes
87
88 // x = ror(x, 8)
89 tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
90 tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
91 tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
92 tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
93
94 // x += y
95 add X_0.\lanes, X_0.\lanes, Y_0.\lanes
96 add X_1.\lanes, X_1.\lanes, Y_1.\lanes
97 add X_2.\lanes, X_2.\lanes, Y_2.\lanes
98 add X_3.\lanes, X_3.\lanes, Y_3.\lanes
99
100 // x ^= k
101 eor X_0.16b, X_0.16b, ROUND_KEY.16b
102 eor X_1.16b, X_1.16b, ROUND_KEY.16b
103 eor X_2.16b, X_2.16b, ROUND_KEY.16b
104 eor X_3.16b, X_3.16b, ROUND_KEY.16b
105
106 // y = rol(y, 3)
107 shl TMP0.\lanes, Y_0.\lanes, #3
108 shl TMP1.\lanes, Y_1.\lanes, #3
109 shl TMP2.\lanes, Y_2.\lanes, #3
110 shl TMP3.\lanes, Y_3.\lanes, #3
111 sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
112 sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
113 sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
114 sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
115
116 // y ^= x
117 eor Y_0.16b, TMP0.16b, X_0.16b
118 eor Y_1.16b, TMP1.16b, X_1.16b
119 eor Y_2.16b, TMP2.16b, X_2.16b
120 eor Y_3.16b, TMP3.16b, X_3.16b
121.endm
122
123/*
124 * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
125 *
126 * This is the inverse of _speck_round_128bytes().
127 */
128.macro _speck_unround_128bytes n, lanes
129
130 // y ^= x
131 eor TMP0.16b, Y_0.16b, X_0.16b
132 eor TMP1.16b, Y_1.16b, X_1.16b
133 eor TMP2.16b, Y_2.16b, X_2.16b
134 eor TMP3.16b, Y_3.16b, X_3.16b
135
136 // y = ror(y, 3)
137 ushr Y_0.\lanes, TMP0.\lanes, #3
138 ushr Y_1.\lanes, TMP1.\lanes, #3
139 ushr Y_2.\lanes, TMP2.\lanes, #3
140 ushr Y_3.\lanes, TMP3.\lanes, #3
141 sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
142 sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
143 sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
144 sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
145
146 // x ^= k
147 eor X_0.16b, X_0.16b, ROUND_KEY.16b
148 eor X_1.16b, X_1.16b, ROUND_KEY.16b
149 eor X_2.16b, X_2.16b, ROUND_KEY.16b
150 eor X_3.16b, X_3.16b, ROUND_KEY.16b
151
152 // x -= y
153 sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
154 sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
155 sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
156 sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
157
158 // x = rol(x, 8)
159 tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
160 tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
161 tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
162 tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
163.endm
164
165.macro _next_xts_tweak next, cur, tmp, n
166.if \n == 64
167 /*
168 * Calculate the next tweak by multiplying the current one by x,
169 * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
170 */
171 sshr \tmp\().2d, \cur\().2d, #63
172 and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
173 shl \next\().2d, \cur\().2d, #1
174 ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
175 eor \next\().16b, \next\().16b, \tmp\().16b
176.else
177 /*
178 * Calculate the next two tweaks by multiplying the current ones by x^2,
179 * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
180 */
181 ushr \tmp\().2d, \cur\().2d, #62
182 shl \next\().2d, \cur\().2d, #2
183 tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
184 eor \next\().16b, \next\().16b, \tmp\().16b
185.endif
186.endm
187
188/*
189 * _speck_xts_crypt() - Speck-XTS encryption/decryption
190 *
191 * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
192 * using Speck-XTS, specifically the variant with a block size of '2n' and round
193 * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
194 * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
195 * nonzero multiple of 128.
196 */
197.macro _speck_xts_crypt n, lanes, decrypting
198
199 /*
200 * If decrypting, modify the ROUND_KEYS parameter to point to the last
201 * round key rather than the first, since for decryption the round keys
202 * are used in reverse order.
203 */
204.if \decrypting
205 mov NROUNDS, NROUNDS /* zero the high 32 bits */
206.if \n == 64
207 add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
208 sub ROUND_KEYS, ROUND_KEYS, #8
209.else
210 add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
211 sub ROUND_KEYS, ROUND_KEYS, #4
212.endif
213.endif
214
215 // Load the index vector for tbl-based 8-bit rotates
216.if \decrypting
217 ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
218.else
219 ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
220.endif
221
222 // One-time XTS preparation
223.if \n == 64
224 // Load first tweak
225 ld1 {TWEAKV0.16b}, [TWEAK]
226
227 // Load GF(2^128) multiplication table
228 ldr GFMUL_TABLE_Q, .Lgf128mul_table
229.else
230 // Load first tweak
231 ld1 {TWEAKV0.8b}, [TWEAK]
232
233 // Load GF(2^64) multiplication table
234 ldr GFMUL_TABLE_Q, .Lgf64mul_table
235
236 // Calculate second tweak, packing it together with the first
237 ushr TMP0.2d, TWEAKV0.2d, #63
238 shl TMP1.2d, TWEAKV0.2d, #1
239 tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
240 eor TMP0.8b, TMP0.8b, TMP1.8b
241 mov TWEAKV0.d[1], TMP0.d[0]
242.endif
243
244.Lnext_128bytes_\@:
245
246 // Calculate XTS tweaks for next 128 bytes
247 _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
248 _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
249 _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
250 _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
251 _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
252 _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
253 _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
254 _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
255
256 // Load the next source blocks into {X,Y}[0-3]
257 ld1 {X_0.16b-Y_1.16b}, [SRC], #64
258 ld1 {X_2.16b-Y_3.16b}, [SRC], #64
259
260 // XOR the source blocks with their XTS tweaks
261 eor TMP0.16b, X_0.16b, TWEAKV0.16b
262 eor Y_0.16b, Y_0.16b, TWEAKV1.16b
263 eor TMP1.16b, X_1.16b, TWEAKV2.16b
264 eor Y_1.16b, Y_1.16b, TWEAKV3.16b
265 eor TMP2.16b, X_2.16b, TWEAKV4.16b
266 eor Y_2.16b, Y_2.16b, TWEAKV5.16b
267 eor TMP3.16b, X_3.16b, TWEAKV6.16b
268 eor Y_3.16b, Y_3.16b, TWEAKV7.16b
269
270 /*
271 * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
272 * that the X[0-3] registers contain only the second halves of blocks,
273 * and the Y[0-3] registers contain only the first halves of blocks.
274 * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
275 */
276 uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
277 uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
278 uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
279 uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
280 uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
281 uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
282 uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
283 uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
284
285 // Do the cipher rounds
286 mov x6, ROUND_KEYS
287 mov w7, NROUNDS
288.Lnext_round_\@:
289.if \decrypting
290 ld1r {ROUND_KEY.\lanes}, [x6]
291 sub x6, x6, #( \n / 8 )
292 _speck_unround_128bytes \n, \lanes
293.else
294 ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
295 _speck_round_128bytes \n, \lanes
296.endif
297 subs w7, w7, #1
298 bne .Lnext_round_\@
299
300 // Re-interleave the 'x' and 'y' elements of each block
301 zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
302 zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
303 zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
304 zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
305 zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
306 zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
307 zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
308 zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
309
310 // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
311 eor X_0.16b, TMP0.16b, TWEAKV0.16b
312 eor Y_0.16b, Y_0.16b, TWEAKV1.16b
313 eor X_1.16b, TMP1.16b, TWEAKV2.16b
314 eor Y_1.16b, Y_1.16b, TWEAKV3.16b
315 eor X_2.16b, TMP2.16b, TWEAKV4.16b
316 eor Y_2.16b, Y_2.16b, TWEAKV5.16b
317 eor X_3.16b, TMP3.16b, TWEAKV6.16b
318 eor Y_3.16b, Y_3.16b, TWEAKV7.16b
319 mov TWEAKV0.16b, TWEAKV_NEXT.16b
320
321 // Store the ciphertext in the destination buffer
322 st1 {X_0.16b-Y_1.16b}, [DST], #64
323 st1 {X_2.16b-Y_3.16b}, [DST], #64
324
325 // Continue if there are more 128-byte chunks remaining
326 subs NBYTES, NBYTES, #128
327 bne .Lnext_128bytes_\@
328
329 // Store the next tweak and return
330.if \n == 64
331 st1 {TWEAKV_NEXT.16b}, [TWEAK]
332.else
333 st1 {TWEAKV_NEXT.8b}, [TWEAK]
334.endif
335 ret
336.endm
337
338ENTRY(speck128_xts_encrypt_neon)
339 _speck_xts_crypt n=64, lanes=2d, decrypting=0
340ENDPROC(speck128_xts_encrypt_neon)
341
342ENTRY(speck128_xts_decrypt_neon)
343 _speck_xts_crypt n=64, lanes=2d, decrypting=1
344ENDPROC(speck128_xts_decrypt_neon)
345
346ENTRY(speck64_xts_encrypt_neon)
347 _speck_xts_crypt n=32, lanes=4s, decrypting=0
348ENDPROC(speck64_xts_encrypt_neon)
349
350ENTRY(speck64_xts_decrypt_neon)
351 _speck_xts_crypt n=32, lanes=4s, decrypting=1
352ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c
new file mode 100644
index 000000000000..6e233aeb4ff4
--- /dev/null
+++ b/arch/arm64/crypto/speck-neon-glue.c
@@ -0,0 +1,282 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
4 * (64-bit version; based on the 32-bit version)
5 *
6 * Copyright (c) 2018 Google, Inc
7 */
8
9#include <asm/hwcap.h>
10#include <asm/neon.h>
11#include <asm/simd.h>
12#include <crypto/algapi.h>
13#include <crypto/gf128mul.h>
14#include <crypto/internal/skcipher.h>
15#include <crypto/speck.h>
16#include <crypto/xts.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19
20/* The assembly functions only handle multiples of 128 bytes */
21#define SPECK_NEON_CHUNK_SIZE 128
22
23/* Speck128 */
24
25struct speck128_xts_tfm_ctx {
26 struct speck128_tfm_ctx main_key;
27 struct speck128_tfm_ctx tweak_key;
28};
29
30asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
31 void *dst, const void *src,
32 unsigned int nbytes, void *tweak);
33
34asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
35 void *dst, const void *src,
36 unsigned int nbytes, void *tweak);
37
38typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
39 u8 *, const u8 *);
40typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
41 const void *, unsigned int, void *);
42
43static __always_inline int
44__speck128_xts_crypt(struct skcipher_request *req,
45 speck128_crypt_one_t crypt_one,
46 speck128_xts_crypt_many_t crypt_many)
47{
48 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
49 const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
50 struct skcipher_walk walk;
51 le128 tweak;
52 int err;
53
54 err = skcipher_walk_virt(&walk, req, true);
55
56 crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
57
58 while (walk.nbytes > 0) {
59 unsigned int nbytes = walk.nbytes;
60 u8 *dst = walk.dst.virt.addr;
61 const u8 *src = walk.src.virt.addr;
62
63 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
64 unsigned int count;
65
66 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
67 kernel_neon_begin();
68 (*crypt_many)(ctx->main_key.round_keys,
69 ctx->main_key.nrounds,
70 dst, src, count, &tweak);
71 kernel_neon_end();
72 dst += count;
73 src += count;
74 nbytes -= count;
75 }
76
77 /* Handle any remainder with generic code */
78 while (nbytes >= sizeof(tweak)) {
79 le128_xor((le128 *)dst, (const le128 *)src, &tweak);
80 (*crypt_one)(&ctx->main_key, dst, dst);
81 le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
82 gf128mul_x_ble(&tweak, &tweak);
83
84 dst += sizeof(tweak);
85 src += sizeof(tweak);
86 nbytes -= sizeof(tweak);
87 }
88 err = skcipher_walk_done(&walk, nbytes);
89 }
90
91 return err;
92}
93
94static int speck128_xts_encrypt(struct skcipher_request *req)
95{
96 return __speck128_xts_crypt(req, crypto_speck128_encrypt,
97 speck128_xts_encrypt_neon);
98}
99
100static int speck128_xts_decrypt(struct skcipher_request *req)
101{
102 return __speck128_xts_crypt(req, crypto_speck128_decrypt,
103 speck128_xts_decrypt_neon);
104}
105
106static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
107 unsigned int keylen)
108{
109 struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
110 int err;
111
112 err = xts_verify_key(tfm, key, keylen);
113 if (err)
114 return err;
115
116 keylen /= 2;
117
118 err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
119 if (err)
120 return err;
121
122 return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
123}
124
125/* Speck64 */
126
127struct speck64_xts_tfm_ctx {
128 struct speck64_tfm_ctx main_key;
129 struct speck64_tfm_ctx tweak_key;
130};
131
132asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
133 void *dst, const void *src,
134 unsigned int nbytes, void *tweak);
135
136asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
137 void *dst, const void *src,
138 unsigned int nbytes, void *tweak);
139
140typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
141 u8 *, const u8 *);
142typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
143 const void *, unsigned int, void *);
144
145static __always_inline int
146__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
147 speck64_xts_crypt_many_t crypt_many)
148{
149 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
150 const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
151 struct skcipher_walk walk;
152 __le64 tweak;
153 int err;
154
155 err = skcipher_walk_virt(&walk, req, true);
156
157 crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
158
159 while (walk.nbytes > 0) {
160 unsigned int nbytes = walk.nbytes;
161 u8 *dst = walk.dst.virt.addr;
162 const u8 *src = walk.src.virt.addr;
163
164 if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
165 unsigned int count;
166
167 count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
168 kernel_neon_begin();
169 (*crypt_many)(ctx->main_key.round_keys,
170 ctx->main_key.nrounds,
171 dst, src, count, &tweak);
172 kernel_neon_end();
173 dst += count;
174 src += count;
175 nbytes -= count;
176 }
177
178 /* Handle any remainder with generic code */
179 while (nbytes >= sizeof(tweak)) {
180 *(__le64 *)dst = *(__le64 *)src ^ tweak;
181 (*crypt_one)(&ctx->main_key, dst, dst);
182 *(__le64 *)dst ^= tweak;
183 tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
184 ((tweak & cpu_to_le64(1ULL << 63)) ?
185 0x1B : 0));
186 dst += sizeof(tweak);
187 src += sizeof(tweak);
188 nbytes -= sizeof(tweak);
189 }
190 err = skcipher_walk_done(&walk, nbytes);
191 }
192
193 return err;
194}
195
196static int speck64_xts_encrypt(struct skcipher_request *req)
197{
198 return __speck64_xts_crypt(req, crypto_speck64_encrypt,
199 speck64_xts_encrypt_neon);
200}
201
202static int speck64_xts_decrypt(struct skcipher_request *req)
203{
204 return __speck64_xts_crypt(req, crypto_speck64_decrypt,
205 speck64_xts_decrypt_neon);
206}
207
208static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
209 unsigned int keylen)
210{
211 struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
212 int err;
213
214 err = xts_verify_key(tfm, key, keylen);
215 if (err)
216 return err;
217
218 keylen /= 2;
219
220 err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
221 if (err)
222 return err;
223
224 return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
225}
226
227static struct skcipher_alg speck_algs[] = {
228 {
229 .base.cra_name = "xts(speck128)",
230 .base.cra_driver_name = "xts-speck128-neon",
231 .base.cra_priority = 300,
232 .base.cra_blocksize = SPECK128_BLOCK_SIZE,
233 .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
234 .base.cra_alignmask = 7,
235 .base.cra_module = THIS_MODULE,
236 .min_keysize = 2 * SPECK128_128_KEY_SIZE,
237 .max_keysize = 2 * SPECK128_256_KEY_SIZE,
238 .ivsize = SPECK128_BLOCK_SIZE,
239 .walksize = SPECK_NEON_CHUNK_SIZE,
240 .setkey = speck128_xts_setkey,
241 .encrypt = speck128_xts_encrypt,
242 .decrypt = speck128_xts_decrypt,
243 }, {
244 .base.cra_name = "xts(speck64)",
245 .base.cra_driver_name = "xts-speck64-neon",
246 .base.cra_priority = 300,
247 .base.cra_blocksize = SPECK64_BLOCK_SIZE,
248 .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
249 .base.cra_alignmask = 7,
250 .base.cra_module = THIS_MODULE,
251 .min_keysize = 2 * SPECK64_96_KEY_SIZE,
252 .max_keysize = 2 * SPECK64_128_KEY_SIZE,
253 .ivsize = SPECK64_BLOCK_SIZE,
254 .walksize = SPECK_NEON_CHUNK_SIZE,
255 .setkey = speck64_xts_setkey,
256 .encrypt = speck64_xts_encrypt,
257 .decrypt = speck64_xts_decrypt,
258 }
259};
260
261static int __init speck_neon_module_init(void)
262{
263 if (!(elf_hwcap & HWCAP_ASIMD))
264 return -ENODEV;
265 return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
266}
267
268static void __exit speck_neon_module_exit(void)
269{
270 crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
271}
272
273module_init(speck_neon_module_init);
274module_exit(speck_neon_module_exit);
275
276MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
277MODULE_LICENSE("GPL");
278MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
279MODULE_ALIAS_CRYPTO("xts(speck128)");
280MODULE_ALIAS_CRYPTO("xts-speck128-neon");
281MODULE_ALIAS_CRYPTO("xts(speck64)");
282MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 12e8484a8ee7..e762ef417562 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -94,23 +94,30 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
94 94
95 95
96#define STACK_OFFSET 8*3 96#define STACK_OFFSET 8*3
97#define HashKey 16*0 // store HashKey <<1 mod poly here 97
98#define HashKey_2 16*1 // store HashKey^2 <<1 mod poly here 98#define AadHash 16*0
99#define HashKey_3 16*2 // store HashKey^3 <<1 mod poly here 99#define AadLen 16*1
100#define HashKey_4 16*3 // store HashKey^4 <<1 mod poly here 100#define InLen (16*1)+8
101#define HashKey_k 16*4 // store XOR of High 64 bits and Low 64 101#define PBlockEncKey 16*2
102#define OrigIV 16*3
103#define CurCount 16*4
104#define PBlockLen 16*5
105#define HashKey 16*6 // store HashKey <<1 mod poly here
106#define HashKey_2 16*7 // store HashKey^2 <<1 mod poly here
107#define HashKey_3 16*8 // store HashKey^3 <<1 mod poly here
108#define HashKey_4 16*9 // store HashKey^4 <<1 mod poly here
109#define HashKey_k 16*10 // store XOR of High 64 bits and Low 64
102 // bits of HashKey <<1 mod poly here 110 // bits of HashKey <<1 mod poly here
103 //(for Karatsuba purposes) 111 //(for Karatsuba purposes)
104#define HashKey_2_k 16*5 // store XOR of High 64 bits and Low 64 112#define HashKey_2_k 16*11 // store XOR of High 64 bits and Low 64
105 // bits of HashKey^2 <<1 mod poly here 113 // bits of HashKey^2 <<1 mod poly here
106 // (for Karatsuba purposes) 114 // (for Karatsuba purposes)
107#define HashKey_3_k 16*6 // store XOR of High 64 bits and Low 64 115#define HashKey_3_k 16*12 // store XOR of High 64 bits and Low 64
108 // bits of HashKey^3 <<1 mod poly here 116 // bits of HashKey^3 <<1 mod poly here
109 // (for Karatsuba purposes) 117 // (for Karatsuba purposes)
110#define HashKey_4_k 16*7 // store XOR of High 64 bits and Low 64 118#define HashKey_4_k 16*13 // store XOR of High 64 bits and Low 64
111 // bits of HashKey^4 <<1 mod poly here 119 // bits of HashKey^4 <<1 mod poly here
112 // (for Karatsuba purposes) 120 // (for Karatsuba purposes)
113#define VARIABLE_OFFSET 16*8
114 121
115#define arg1 rdi 122#define arg1 rdi
116#define arg2 rsi 123#define arg2 rsi
@@ -118,10 +125,11 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
118#define arg4 rcx 125#define arg4 rcx
119#define arg5 r8 126#define arg5 r8
120#define arg6 r9 127#define arg6 r9
121#define arg7 STACK_OFFSET+8(%r14) 128#define arg7 STACK_OFFSET+8(%rsp)
122#define arg8 STACK_OFFSET+16(%r14) 129#define arg8 STACK_OFFSET+16(%rsp)
123#define arg9 STACK_OFFSET+24(%r14) 130#define arg9 STACK_OFFSET+24(%rsp)
124#define arg10 STACK_OFFSET+32(%r14) 131#define arg10 STACK_OFFSET+32(%rsp)
132#define arg11 STACK_OFFSET+40(%rsp)
125#define keysize 2*15*16(%arg1) 133#define keysize 2*15*16(%arg1)
126#endif 134#endif
127 135
@@ -171,6 +179,332 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
171#define TKEYP T1 179#define TKEYP T1
172#endif 180#endif
173 181
182.macro FUNC_SAVE
183 push %r12
184 push %r13
185 push %r14
186#
187# states of %xmm registers %xmm6:%xmm15 not saved
188# all %xmm registers are clobbered
189#
190.endm
191
192
193.macro FUNC_RESTORE
194 pop %r14
195 pop %r13
196 pop %r12
197.endm
198
199# Precompute hashkeys.
200# Input: Hash subkey.
201# Output: HashKeys stored in gcm_context_data. Only needs to be called
202# once per key.
203# clobbers r12, and tmp xmm registers.
204.macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
205 mov \SUBKEY, %r12
206 movdqu (%r12), \TMP3
207 movdqa SHUF_MASK(%rip), \TMP2
208 PSHUFB_XMM \TMP2, \TMP3
209
210 # precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
211
212 movdqa \TMP3, \TMP2
213 psllq $1, \TMP3
214 psrlq $63, \TMP2
215 movdqa \TMP2, \TMP1
216 pslldq $8, \TMP2
217 psrldq $8, \TMP1
218 por \TMP2, \TMP3
219
220 # reduce HashKey<<1
221
222 pshufd $0x24, \TMP1, \TMP2
223 pcmpeqd TWOONE(%rip), \TMP2
224 pand POLY(%rip), \TMP2
225 pxor \TMP2, \TMP3
226 movdqa \TMP3, HashKey(%arg2)
227
228 movdqa \TMP3, \TMP5
229 pshufd $78, \TMP3, \TMP1
230 pxor \TMP3, \TMP1
231 movdqa \TMP1, HashKey_k(%arg2)
232
233 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
234# TMP5 = HashKey^2<<1 (mod poly)
235 movdqa \TMP5, HashKey_2(%arg2)
236# HashKey_2 = HashKey^2<<1 (mod poly)
237 pshufd $78, \TMP5, \TMP1
238 pxor \TMP5, \TMP1
239 movdqa \TMP1, HashKey_2_k(%arg2)
240
241 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
242# TMP5 = HashKey^3<<1 (mod poly)
243 movdqa \TMP5, HashKey_3(%arg2)
244 pshufd $78, \TMP5, \TMP1
245 pxor \TMP5, \TMP1
246 movdqa \TMP1, HashKey_3_k(%arg2)
247
248 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
249# TMP5 = HashKey^3<<1 (mod poly)
250 movdqa \TMP5, HashKey_4(%arg2)
251 pshufd $78, \TMP5, \TMP1
252 pxor \TMP5, \TMP1
253 movdqa \TMP1, HashKey_4_k(%arg2)
254.endm
255
256# GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
257# Clobbers rax, r10-r13 and xmm0-xmm6, %xmm13
258.macro GCM_INIT Iv SUBKEY AAD AADLEN
259 mov \AADLEN, %r11
260 mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length
261 xor %r11, %r11
262 mov %r11, InLen(%arg2) # ctx_data.in_length = 0
263 mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0
264 mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0
265 mov \Iv, %rax
266 movdqu (%rax), %xmm0
267 movdqu %xmm0, OrigIV(%arg2) # ctx_data.orig_IV = iv
268
269 movdqa SHUF_MASK(%rip), %xmm2
270 PSHUFB_XMM %xmm2, %xmm0
271 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
272
273 PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
274 movdqa HashKey(%arg2), %xmm13
275
276 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
277 %xmm4, %xmm5, %xmm6
278.endm
279
280# GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context
281# struct has been initialized by GCM_INIT.
282# Requires the input data be at least 1 byte long because of READ_PARTIAL_BLOCK
283# Clobbers rax, r10-r13, and xmm0-xmm15
284.macro GCM_ENC_DEC operation
285 movdqu AadHash(%arg2), %xmm8
286 movdqu HashKey(%arg2), %xmm13
287 add %arg5, InLen(%arg2)
288
289 xor %r11, %r11 # initialise the data pointer offset as zero
290 PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation
291
292 sub %r11, %arg5 # sub partial block data used
293 mov %arg5, %r13 # save the number of bytes
294
295 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
296 mov %r13, %r12
297 # Encrypt/Decrypt first few blocks
298
299 and $(3<<4), %r12
300 jz _initial_num_blocks_is_0_\@
301 cmp $(2<<4), %r12
302 jb _initial_num_blocks_is_1_\@
303 je _initial_num_blocks_is_2_\@
304_initial_num_blocks_is_3_\@:
305 INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
306%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, \operation
307 sub $48, %r13
308 jmp _initial_blocks_\@
309_initial_num_blocks_is_2_\@:
310 INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
311%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, \operation
312 sub $32, %r13
313 jmp _initial_blocks_\@
314_initial_num_blocks_is_1_\@:
315 INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
316%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, \operation
317 sub $16, %r13
318 jmp _initial_blocks_\@
319_initial_num_blocks_is_0_\@:
320 INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
321%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, \operation
322_initial_blocks_\@:
323
324 # Main loop - Encrypt/Decrypt remaining blocks
325
326 cmp $0, %r13
327 je _zero_cipher_left_\@
328 sub $64, %r13
329 je _four_cipher_left_\@
330_crypt_by_4_\@:
331 GHASH_4_ENCRYPT_4_PARALLEL_\operation %xmm9, %xmm10, %xmm11, %xmm12, \
332 %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, \
333 %xmm7, %xmm8, enc
334 add $64, %r11
335 sub $64, %r13
336 jne _crypt_by_4_\@
337_four_cipher_left_\@:
338 GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
339%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
340_zero_cipher_left_\@:
341 movdqu %xmm8, AadHash(%arg2)
342 movdqu %xmm0, CurCount(%arg2)
343
344 mov %arg5, %r13
345 and $15, %r13 # %r13 = arg5 (mod 16)
346 je _multiple_of_16_bytes_\@
347
348 mov %r13, PBlockLen(%arg2)
349
350 # Handle the last <16 Byte block separately
351 paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
352 movdqu %xmm0, CurCount(%arg2)
353 movdqa SHUF_MASK(%rip), %xmm10
354 PSHUFB_XMM %xmm10, %xmm0
355
356 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
357 movdqu %xmm0, PBlockEncKey(%arg2)
358
359 cmp $16, %arg5
360 jge _large_enough_update_\@
361
362 lea (%arg4,%r11,1), %r10
363 mov %r13, %r12
364 READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
365 jmp _data_read_\@
366
367_large_enough_update_\@:
368 sub $16, %r11
369 add %r13, %r11
370
371 # receive the last <16 Byte block
372 movdqu (%arg4, %r11, 1), %xmm1
373
374 sub %r13, %r11
375 add $16, %r11
376
377 lea SHIFT_MASK+16(%rip), %r12
378 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
379 # (r13 is the number of bytes in plaintext mod 16)
380 sub %r13, %r12
381 # get the appropriate shuffle mask
382 movdqu (%r12), %xmm2
383 # shift right 16-r13 bytes
384 PSHUFB_XMM %xmm2, %xmm1
385
386_data_read_\@:
387 lea ALL_F+16(%rip), %r12
388 sub %r13, %r12
389
390.ifc \operation, dec
391 movdqa %xmm1, %xmm2
392.endif
393 pxor %xmm1, %xmm0 # XOR Encrypt(K, Yn)
394 movdqu (%r12), %xmm1
395 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
396 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
397.ifc \operation, dec
398 pand %xmm1, %xmm2
399 movdqa SHUF_MASK(%rip), %xmm10
400 PSHUFB_XMM %xmm10 ,%xmm2
401
402 pxor %xmm2, %xmm8
403.else
404 movdqa SHUF_MASK(%rip), %xmm10
405 PSHUFB_XMM %xmm10,%xmm0
406
407 pxor %xmm0, %xmm8
408.endif
409
410 movdqu %xmm8, AadHash(%arg2)
411.ifc \operation, enc
412 # GHASH computation for the last <16 byte block
413 movdqa SHUF_MASK(%rip), %xmm10
414 # shuffle xmm0 back to output as ciphertext
415 PSHUFB_XMM %xmm10, %xmm0
416.endif
417
418 # Output %r13 bytes
419 MOVQ_R64_XMM %xmm0, %rax
420 cmp $8, %r13
421 jle _less_than_8_bytes_left_\@
422 mov %rax, (%arg3 , %r11, 1)
423 add $8, %r11
424 psrldq $8, %xmm0
425 MOVQ_R64_XMM %xmm0, %rax
426 sub $8, %r13
427_less_than_8_bytes_left_\@:
428 mov %al, (%arg3, %r11, 1)
429 add $1, %r11
430 shr $8, %rax
431 sub $1, %r13
432 jne _less_than_8_bytes_left_\@
433_multiple_of_16_bytes_\@:
434.endm
435
436# GCM_COMPLETE Finishes update of tag of last partial block
437# Output: Authorization Tag (AUTH_TAG)
438# Clobbers rax, r10-r12, and xmm0, xmm1, xmm5-xmm15
439.macro GCM_COMPLETE AUTHTAG AUTHTAGLEN
440 movdqu AadHash(%arg2), %xmm8
441 movdqu HashKey(%arg2), %xmm13
442
443 mov PBlockLen(%arg2), %r12
444
445 cmp $0, %r12
446 je _partial_done\@
447
448 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
449
450_partial_done\@:
451 mov AadLen(%arg2), %r12 # %r13 = aadLen (number of bytes)
452 shl $3, %r12 # convert into number of bits
453 movd %r12d, %xmm15 # len(A) in %xmm15
454 mov InLen(%arg2), %r12
455 shl $3, %r12 # len(C) in bits (*128)
456 MOVQ_R64_XMM %r12, %xmm1
457
458 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
459 pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
460 pxor %xmm15, %xmm8
461 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
462 # final GHASH computation
463 movdqa SHUF_MASK(%rip), %xmm10
464 PSHUFB_XMM %xmm10, %xmm8
465
466 movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0
467 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0)
468 pxor %xmm8, %xmm0
469_return_T_\@:
470 mov \AUTHTAG, %r10 # %r10 = authTag
471 mov \AUTHTAGLEN, %r11 # %r11 = auth_tag_len
472 cmp $16, %r11
473 je _T_16_\@
474 cmp $8, %r11
475 jl _T_4_\@
476_T_8_\@:
477 MOVQ_R64_XMM %xmm0, %rax
478 mov %rax, (%r10)
479 add $8, %r10
480 sub $8, %r11
481 psrldq $8, %xmm0
482 cmp $0, %r11
483 je _return_T_done_\@
484_T_4_\@:
485 movd %xmm0, %eax
486 mov %eax, (%r10)
487 add $4, %r10
488 sub $4, %r11
489 psrldq $4, %xmm0
490 cmp $0, %r11
491 je _return_T_done_\@
492_T_123_\@:
493 movd %xmm0, %eax
494 cmp $2, %r11
495 jl _T_1_\@
496 mov %ax, (%r10)
497 cmp $2, %r11
498 je _return_T_done_\@
499 add $2, %r10
500 sar $16, %eax
501_T_1_\@:
502 mov %al, (%r10)
503 jmp _return_T_done_\@
504_T_16_\@:
505 movdqu %xmm0, (%r10)
506_return_T_done_\@:
507.endm
174 508
175#ifdef __x86_64__ 509#ifdef __x86_64__
176/* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) 510/* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
@@ -264,232 +598,188 @@ _read_next_byte_lt8_\@:
264_done_read_partial_block_\@: 598_done_read_partial_block_\@:
265.endm 599.endm
266 600
267/* 601# CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
268* if a = number of total plaintext bytes 602# clobbers r10-11, xmm14
269* b = floor(a/16) 603.macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
270* num_initial_blocks = b mod 4 604 TMP6 TMP7
271* encrypt the initial num_initial_blocks blocks and apply ghash on 605 MOVADQ SHUF_MASK(%rip), %xmm14
272* the ciphertext 606 mov \AAD, %r10 # %r10 = AAD
273* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers 607 mov \AADLEN, %r11 # %r11 = aadLen
274* are clobbered 608 pxor \TMP7, \TMP7
275* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified 609 pxor \TMP6, \TMP6
276*/
277
278
279.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
280XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
281 MOVADQ SHUF_MASK(%rip), %xmm14
282 mov arg7, %r10 # %r10 = AAD
283 mov arg8, %r11 # %r11 = aadLen
284 pxor %xmm\i, %xmm\i
285 pxor \XMM2, \XMM2
286 610
287 cmp $16, %r11 611 cmp $16, %r11
288 jl _get_AAD_rest\num_initial_blocks\operation 612 jl _get_AAD_rest\@
289_get_AAD_blocks\num_initial_blocks\operation: 613_get_AAD_blocks\@:
290 movdqu (%r10), %xmm\i 614 movdqu (%r10), \TMP7
291 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data 615 PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
292 pxor %xmm\i, \XMM2 616 pxor \TMP7, \TMP6
293 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 617 GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
294 add $16, %r10 618 add $16, %r10
295 sub $16, %r11 619 sub $16, %r11
296 cmp $16, %r11 620 cmp $16, %r11
297 jge _get_AAD_blocks\num_initial_blocks\operation 621 jge _get_AAD_blocks\@
298 622
299 movdqu \XMM2, %xmm\i 623 movdqu \TMP6, \TMP7
300 624
301 /* read the last <16B of AAD */ 625 /* read the last <16B of AAD */
302_get_AAD_rest\num_initial_blocks\operation: 626_get_AAD_rest\@:
303 cmp $0, %r11 627 cmp $0, %r11
304 je _get_AAD_done\num_initial_blocks\operation 628 je _get_AAD_done\@
305
306 READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
307 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
308 pxor \XMM2, %xmm\i
309 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
310 629
311_get_AAD_done\num_initial_blocks\operation: 630 READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
312 xor %r11, %r11 # initialise the data pointer offset as zero 631 PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
313 # start AES for num_initial_blocks blocks 632 pxor \TMP6, \TMP7
314 633 GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
315 mov %arg5, %rax # %rax = *Y0 634 movdqu \TMP7, \TMP6
316 movdqu (%rax), \XMM0 # XMM0 = Y0
317 PSHUFB_XMM %xmm14, \XMM0
318 635
319.if (\i == 5) || (\i == 6) || (\i == 7) 636_get_AAD_done\@:
320 MOVADQ ONE(%RIP),\TMP1 637 movdqu \TMP6, AadHash(%arg2)
321 MOVADQ (%arg1),\TMP2 638.endm
322.irpc index, \i_seq
323 paddd \TMP1, \XMM0 # INCR Y0
324 movdqa \XMM0, %xmm\index
325 PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
326 pxor \TMP2, %xmm\index
327.endr
328 lea 0x10(%arg1),%r10
329 mov keysize,%eax
330 shr $2,%eax # 128->4, 192->6, 256->8
331 add $5,%eax # 128->9, 192->11, 256->13
332
333aes_loop_initial_dec\num_initial_blocks:
334 MOVADQ (%r10),\TMP1
335.irpc index, \i_seq
336 AESENC \TMP1, %xmm\index
337.endr
338 add $16,%r10
339 sub $1,%eax
340 jnz aes_loop_initial_dec\num_initial_blocks
341
342 MOVADQ (%r10), \TMP1
343.irpc index, \i_seq
344 AESENCLAST \TMP1, %xmm\index # Last Round
345.endr
346.irpc index, \i_seq
347 movdqu (%arg3 , %r11, 1), \TMP1
348 pxor \TMP1, %xmm\index
349 movdqu %xmm\index, (%arg2 , %r11, 1)
350 # write back plaintext/ciphertext for num_initial_blocks
351 add $16, %r11
352
353 movdqa \TMP1, %xmm\index
354 PSHUFB_XMM %xmm14, %xmm\index
355 # prepare plaintext/ciphertext for GHASH computation
356.endr
357.endif
358
359 # apply GHASH on num_initial_blocks blocks
360 639
361.if \i == 5 640# PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks
362 pxor %xmm5, %xmm6 641# between update calls.
363 GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 642# Requires the input data be at least 1 byte long due to READ_PARTIAL_BLOCK
364 pxor %xmm6, %xmm7 643# Outputs encrypted bytes, and updates hash and partial info in gcm_data_context
365 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 644# Clobbers rax, r10, r12, r13, xmm0-6, xmm9-13
366 pxor %xmm7, %xmm8 645.macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
367 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 646 AAD_HASH operation
368.elseif \i == 6 647 mov PBlockLen(%arg2), %r13
369 pxor %xmm6, %xmm7 648 cmp $0, %r13
370 GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 649 je _partial_block_done_\@ # Leave Macro if no partial blocks
371 pxor %xmm7, %xmm8 650 # Read in input data without over reading
372 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 651 cmp $16, \PLAIN_CYPH_LEN
373.elseif \i == 7 652 jl _fewer_than_16_bytes_\@
374 pxor %xmm7, %xmm8 653 movups (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
375 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 654 jmp _data_read_\@
655
656_fewer_than_16_bytes_\@:
657 lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
658 mov \PLAIN_CYPH_LEN, %r12
659 READ_PARTIAL_BLOCK %r10 %r12 %xmm0 %xmm1
660
661 mov PBlockLen(%arg2), %r13
662
663_data_read_\@: # Finished reading in data
664
665 movdqu PBlockEncKey(%arg2), %xmm9
666 movdqu HashKey(%arg2), %xmm13
667
668 lea SHIFT_MASK(%rip), %r12
669
670 # adjust the shuffle mask pointer to be able to shift r13 bytes
671 # r16-r13 is the number of bytes in plaintext mod 16)
672 add %r13, %r12
673 movdqu (%r12), %xmm2 # get the appropriate shuffle mask
674 PSHUFB_XMM %xmm2, %xmm9 # shift right r13 bytes
675
676.ifc \operation, dec
677 movdqa %xmm1, %xmm3
678 pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn)
679
680 mov \PLAIN_CYPH_LEN, %r10
681 add %r13, %r10
682 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling
683 sub $16, %r10
684 # Determine if if partial block is not being filled and
685 # shift mask accordingly
686 jge _no_extra_mask_1_\@
687 sub %r10, %r12
688_no_extra_mask_1_\@:
689
690 movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
691 # get the appropriate mask to mask out bottom r13 bytes of xmm9
692 pand %xmm1, %xmm9 # mask out bottom r13 bytes of xmm9
693
694 pand %xmm1, %xmm3
695 movdqa SHUF_MASK(%rip), %xmm10
696 PSHUFB_XMM %xmm10, %xmm3
697 PSHUFB_XMM %xmm2, %xmm3
698 pxor %xmm3, \AAD_HASH
699
700 cmp $0, %r10
701 jl _partial_incomplete_1_\@
702
703 # GHASH computation for the last <16 Byte block
704 GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
705 xor %rax,%rax
706
707 mov %rax, PBlockLen(%arg2)
708 jmp _dec_done_\@
709_partial_incomplete_1_\@:
710 add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
711_dec_done_\@:
712 movdqu \AAD_HASH, AadHash(%arg2)
713.else
714 pxor %xmm1, %xmm9 # Plaintext XOR E(K, Yn)
715
716 mov \PLAIN_CYPH_LEN, %r10
717 add %r13, %r10
718 # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling
719 sub $16, %r10
720 # Determine if if partial block is not being filled and
721 # shift mask accordingly
722 jge _no_extra_mask_2_\@
723 sub %r10, %r12
724_no_extra_mask_2_\@:
725
726 movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
727 # get the appropriate mask to mask out bottom r13 bytes of xmm9
728 pand %xmm1, %xmm9
729
730 movdqa SHUF_MASK(%rip), %xmm1
731 PSHUFB_XMM %xmm1, %xmm9
732 PSHUFB_XMM %xmm2, %xmm9
733 pxor %xmm9, \AAD_HASH
734
735 cmp $0, %r10
736 jl _partial_incomplete_2_\@
737
738 # GHASH computation for the last <16 Byte block
739 GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
740 xor %rax,%rax
741
742 mov %rax, PBlockLen(%arg2)
743 jmp _encode_done_\@
744_partial_incomplete_2_\@:
745 add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
746_encode_done_\@:
747 movdqu \AAD_HASH, AadHash(%arg2)
748
749 movdqa SHUF_MASK(%rip), %xmm10
750 # shuffle xmm9 back to output as ciphertext
751 PSHUFB_XMM %xmm10, %xmm9
752 PSHUFB_XMM %xmm2, %xmm9
376.endif 753.endif
377 cmp $64, %r13 754 # output encrypted Bytes
378 jl _initial_blocks_done\num_initial_blocks\operation 755 cmp $0, %r10
379 # no need for precomputed values 756 jl _partial_fill_\@
380/* 757 mov %r13, %r12
381* 758 mov $16, %r13
382* Precomputations for HashKey parallel with encryption of first 4 blocks. 759 # Set r13 to be the number of bytes to write out
383* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i 760 sub %r12, %r13
384*/ 761 jmp _count_set_\@
385 MOVADQ ONE(%rip), \TMP1 762_partial_fill_\@:
386 paddd \TMP1, \XMM0 # INCR Y0 763 mov \PLAIN_CYPH_LEN, %r13
387 MOVADQ \XMM0, \XMM1 764_count_set_\@:
388 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap 765 movdqa %xmm9, %xmm0
389 766 MOVQ_R64_XMM %xmm0, %rax
390 paddd \TMP1, \XMM0 # INCR Y0 767 cmp $8, %r13
391 MOVADQ \XMM0, \XMM2 768 jle _less_than_8_bytes_left_\@
392 PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
393
394 paddd \TMP1, \XMM0 # INCR Y0
395 MOVADQ \XMM0, \XMM3
396 PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
397
398 paddd \TMP1, \XMM0 # INCR Y0
399 MOVADQ \XMM0, \XMM4
400 PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
401
402 MOVADQ 0(%arg1),\TMP1
403 pxor \TMP1, \XMM1
404 pxor \TMP1, \XMM2
405 pxor \TMP1, \XMM3
406 pxor \TMP1, \XMM4
407 movdqa \TMP3, \TMP5
408 pshufd $78, \TMP3, \TMP1
409 pxor \TMP3, \TMP1
410 movdqa \TMP1, HashKey_k(%rsp)
411 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
412# TMP5 = HashKey^2<<1 (mod poly)
413 movdqa \TMP5, HashKey_2(%rsp)
414# HashKey_2 = HashKey^2<<1 (mod poly)
415 pshufd $78, \TMP5, \TMP1
416 pxor \TMP5, \TMP1
417 movdqa \TMP1, HashKey_2_k(%rsp)
418.irpc index, 1234 # do 4 rounds
419 movaps 0x10*\index(%arg1), \TMP1
420 AESENC \TMP1, \XMM1
421 AESENC \TMP1, \XMM2
422 AESENC \TMP1, \XMM3
423 AESENC \TMP1, \XMM4
424.endr
425 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
426# TMP5 = HashKey^3<<1 (mod poly)
427 movdqa \TMP5, HashKey_3(%rsp)
428 pshufd $78, \TMP5, \TMP1
429 pxor \TMP5, \TMP1
430 movdqa \TMP1, HashKey_3_k(%rsp)
431.irpc index, 56789 # do next 5 rounds
432 movaps 0x10*\index(%arg1), \TMP1
433 AESENC \TMP1, \XMM1
434 AESENC \TMP1, \XMM2
435 AESENC \TMP1, \XMM3
436 AESENC \TMP1, \XMM4
437.endr
438 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
439# TMP5 = HashKey^3<<1 (mod poly)
440 movdqa \TMP5, HashKey_4(%rsp)
441 pshufd $78, \TMP5, \TMP1
442 pxor \TMP5, \TMP1
443 movdqa \TMP1, HashKey_4_k(%rsp)
444 lea 0xa0(%arg1),%r10
445 mov keysize,%eax
446 shr $2,%eax # 128->4, 192->6, 256->8
447 sub $4,%eax # 128->0, 192->2, 256->4
448 jz aes_loop_pre_dec_done\num_initial_blocks
449
450aes_loop_pre_dec\num_initial_blocks:
451 MOVADQ (%r10),\TMP2
452.irpc index, 1234
453 AESENC \TMP2, %xmm\index
454.endr
455 add $16,%r10
456 sub $1,%eax
457 jnz aes_loop_pre_dec\num_initial_blocks
458
459aes_loop_pre_dec_done\num_initial_blocks:
460 MOVADQ (%r10), \TMP2
461 AESENCLAST \TMP2, \XMM1
462 AESENCLAST \TMP2, \XMM2
463 AESENCLAST \TMP2, \XMM3
464 AESENCLAST \TMP2, \XMM4
465 movdqu 16*0(%arg3 , %r11 , 1), \TMP1
466 pxor \TMP1, \XMM1
467 movdqu \XMM1, 16*0(%arg2 , %r11 , 1)
468 movdqa \TMP1, \XMM1
469 movdqu 16*1(%arg3 , %r11 , 1), \TMP1
470 pxor \TMP1, \XMM2
471 movdqu \XMM2, 16*1(%arg2 , %r11 , 1)
472 movdqa \TMP1, \XMM2
473 movdqu 16*2(%arg3 , %r11 , 1), \TMP1
474 pxor \TMP1, \XMM3
475 movdqu \XMM3, 16*2(%arg2 , %r11 , 1)
476 movdqa \TMP1, \XMM3
477 movdqu 16*3(%arg3 , %r11 , 1), \TMP1
478 pxor \TMP1, \XMM4
479 movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
480 movdqa \TMP1, \XMM4
481 add $64, %r11
482 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
483 pxor \XMMDst, \XMM1
484# combine GHASHed value with the corresponding ciphertext
485 PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
486 PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
487 PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
488
489_initial_blocks_done\num_initial_blocks\operation:
490
491.endm
492 769
770 mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
771 add $8, \DATA_OFFSET
772 psrldq $8, %xmm0
773 MOVQ_R64_XMM %xmm0, %rax
774 sub $8, %r13
775_less_than_8_bytes_left_\@:
776 movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
777 add $1, \DATA_OFFSET
778 shr $8, %rax
779 sub $1, %r13
780 jne _less_than_8_bytes_left_\@
781_partial_block_done_\@:
782.endm # PARTIAL_BLOCK
493 783
494/* 784/*
495* if a = number of total plaintext bytes 785* if a = number of total plaintext bytes
@@ -499,49 +789,19 @@ _initial_blocks_done\num_initial_blocks\operation:
499* the ciphertext 789* the ciphertext
500* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers 790* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
501* are clobbered 791* are clobbered
502* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified 792* arg1, %arg2, %arg3 are used as a pointer only, not modified
503*/ 793*/
504 794
505 795
506.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ 796.macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
507XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation 797 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
508 MOVADQ SHUF_MASK(%rip), %xmm14 798 MOVADQ SHUF_MASK(%rip), %xmm14
509 mov arg7, %r10 # %r10 = AAD
510 mov arg8, %r11 # %r11 = aadLen
511 pxor %xmm\i, %xmm\i
512 pxor \XMM2, \XMM2
513 799
514 cmp $16, %r11 800 movdqu AadHash(%arg2), %xmm\i # XMM0 = Y0
515 jl _get_AAD_rest\num_initial_blocks\operation
516_get_AAD_blocks\num_initial_blocks\operation:
517 movdqu (%r10), %xmm\i
518 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
519 pxor %xmm\i, \XMM2
520 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
521 add $16, %r10
522 sub $16, %r11
523 cmp $16, %r11
524 jge _get_AAD_blocks\num_initial_blocks\operation
525 801
526 movdqu \XMM2, %xmm\i
527
528 /* read the last <16B of AAD */
529_get_AAD_rest\num_initial_blocks\operation:
530 cmp $0, %r11
531 je _get_AAD_done\num_initial_blocks\operation
532
533 READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
534 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
535 pxor \XMM2, %xmm\i
536 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
537
538_get_AAD_done\num_initial_blocks\operation:
539 xor %r11, %r11 # initialise the data pointer offset as zero
540 # start AES for num_initial_blocks blocks 802 # start AES for num_initial_blocks blocks
541 803
542 mov %arg5, %rax # %rax = *Y0 804 movdqu CurCount(%arg2), \XMM0 # XMM0 = Y0
543 movdqu (%rax), \XMM0 # XMM0 = Y0
544 PSHUFB_XMM %xmm14, \XMM0
545 805
546.if (\i == 5) || (\i == 6) || (\i == 7) 806.if (\i == 5) || (\i == 6) || (\i == 7)
547 807
@@ -549,7 +809,11 @@ _get_AAD_done\num_initial_blocks\operation:
549 MOVADQ 0(%arg1),\TMP2 809 MOVADQ 0(%arg1),\TMP2
550.irpc index, \i_seq 810.irpc index, \i_seq
551 paddd \TMP1, \XMM0 # INCR Y0 811 paddd \TMP1, \XMM0 # INCR Y0
812.ifc \operation, dec
813 movdqa \XMM0, %xmm\index
814.else
552 MOVADQ \XMM0, %xmm\index 815 MOVADQ \XMM0, %xmm\index
816.endif
553 PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap 817 PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
554 pxor \TMP2, %xmm\index 818 pxor \TMP2, %xmm\index
555.endr 819.endr
@@ -558,25 +822,29 @@ _get_AAD_done\num_initial_blocks\operation:
558 shr $2,%eax # 128->4, 192->6, 256->8 822 shr $2,%eax # 128->4, 192->6, 256->8
559 add $5,%eax # 128->9, 192->11, 256->13 823 add $5,%eax # 128->9, 192->11, 256->13
560 824
561aes_loop_initial_enc\num_initial_blocks: 825aes_loop_initial_\@:
562 MOVADQ (%r10),\TMP1 826 MOVADQ (%r10),\TMP1
563.irpc index, \i_seq 827.irpc index, \i_seq
564 AESENC \TMP1, %xmm\index 828 AESENC \TMP1, %xmm\index
565.endr 829.endr
566 add $16,%r10 830 add $16,%r10
567 sub $1,%eax 831 sub $1,%eax
568 jnz aes_loop_initial_enc\num_initial_blocks 832 jnz aes_loop_initial_\@
569 833
570 MOVADQ (%r10), \TMP1 834 MOVADQ (%r10), \TMP1
571.irpc index, \i_seq 835.irpc index, \i_seq
572 AESENCLAST \TMP1, %xmm\index # Last Round 836 AESENCLAST \TMP1, %xmm\index # Last Round
573.endr 837.endr
574.irpc index, \i_seq 838.irpc index, \i_seq
575 movdqu (%arg3 , %r11, 1), \TMP1 839 movdqu (%arg4 , %r11, 1), \TMP1
576 pxor \TMP1, %xmm\index 840 pxor \TMP1, %xmm\index
577 movdqu %xmm\index, (%arg2 , %r11, 1) 841 movdqu %xmm\index, (%arg3 , %r11, 1)
578 # write back plaintext/ciphertext for num_initial_blocks 842 # write back plaintext/ciphertext for num_initial_blocks
579 add $16, %r11 843 add $16, %r11
844
845.ifc \operation, dec
846 movdqa \TMP1, %xmm\index
847.endif
580 PSHUFB_XMM %xmm14, %xmm\index 848 PSHUFB_XMM %xmm14, %xmm\index
581 849
582 # prepare plaintext/ciphertext for GHASH computation 850 # prepare plaintext/ciphertext for GHASH computation
@@ -602,7 +870,7 @@ aes_loop_initial_enc\num_initial_blocks:
602 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 870 GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
603.endif 871.endif
604 cmp $64, %r13 872 cmp $64, %r13
605 jl _initial_blocks_done\num_initial_blocks\operation 873 jl _initial_blocks_done\@
606 # no need for precomputed values 874 # no need for precomputed values
607/* 875/*
608* 876*
@@ -631,17 +899,6 @@ aes_loop_initial_enc\num_initial_blocks:
631 pxor \TMP1, \XMM2 899 pxor \TMP1, \XMM2
632 pxor \TMP1, \XMM3 900 pxor \TMP1, \XMM3
633 pxor \TMP1, \XMM4 901 pxor \TMP1, \XMM4
634 movdqa \TMP3, \TMP5
635 pshufd $78, \TMP3, \TMP1
636 pxor \TMP3, \TMP1
637 movdqa \TMP1, HashKey_k(%rsp)
638 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
639# TMP5 = HashKey^2<<1 (mod poly)
640 movdqa \TMP5, HashKey_2(%rsp)
641# HashKey_2 = HashKey^2<<1 (mod poly)
642 pshufd $78, \TMP5, \TMP1
643 pxor \TMP5, \TMP1
644 movdqa \TMP1, HashKey_2_k(%rsp)
645.irpc index, 1234 # do 4 rounds 902.irpc index, 1234 # do 4 rounds
646 movaps 0x10*\index(%arg1), \TMP1 903 movaps 0x10*\index(%arg1), \TMP1
647 AESENC \TMP1, \XMM1 904 AESENC \TMP1, \XMM1
@@ -649,12 +906,6 @@ aes_loop_initial_enc\num_initial_blocks:
649 AESENC \TMP1, \XMM3 906 AESENC \TMP1, \XMM3
650 AESENC \TMP1, \XMM4 907 AESENC \TMP1, \XMM4
651.endr 908.endr
652 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
653# TMP5 = HashKey^3<<1 (mod poly)
654 movdqa \TMP5, HashKey_3(%rsp)
655 pshufd $78, \TMP5, \TMP1
656 pxor \TMP5, \TMP1
657 movdqa \TMP1, HashKey_3_k(%rsp)
658.irpc index, 56789 # do next 5 rounds 909.irpc index, 56789 # do next 5 rounds
659 movaps 0x10*\index(%arg1), \TMP1 910 movaps 0x10*\index(%arg1), \TMP1
660 AESENC \TMP1, \XMM1 911 AESENC \TMP1, \XMM1
@@ -662,45 +913,56 @@ aes_loop_initial_enc\num_initial_blocks:
662 AESENC \TMP1, \XMM3 913 AESENC \TMP1, \XMM3
663 AESENC \TMP1, \XMM4 914 AESENC \TMP1, \XMM4
664.endr 915.endr
665 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
666# TMP5 = HashKey^3<<1 (mod poly)
667 movdqa \TMP5, HashKey_4(%rsp)
668 pshufd $78, \TMP5, \TMP1
669 pxor \TMP5, \TMP1
670 movdqa \TMP1, HashKey_4_k(%rsp)
671 lea 0xa0(%arg1),%r10 916 lea 0xa0(%arg1),%r10
672 mov keysize,%eax 917 mov keysize,%eax
673 shr $2,%eax # 128->4, 192->6, 256->8 918 shr $2,%eax # 128->4, 192->6, 256->8
674 sub $4,%eax # 128->0, 192->2, 256->4 919 sub $4,%eax # 128->0, 192->2, 256->4
675 jz aes_loop_pre_enc_done\num_initial_blocks 920 jz aes_loop_pre_done\@
676 921
677aes_loop_pre_enc\num_initial_blocks: 922aes_loop_pre_\@:
678 MOVADQ (%r10),\TMP2 923 MOVADQ (%r10),\TMP2
679.irpc index, 1234 924.irpc index, 1234
680 AESENC \TMP2, %xmm\index 925 AESENC \TMP2, %xmm\index
681.endr 926.endr
682 add $16,%r10 927 add $16,%r10
683 sub $1,%eax 928 sub $1,%eax
684 jnz aes_loop_pre_enc\num_initial_blocks 929 jnz aes_loop_pre_\@
685 930
686aes_loop_pre_enc_done\num_initial_blocks: 931aes_loop_pre_done\@:
687 MOVADQ (%r10), \TMP2 932 MOVADQ (%r10), \TMP2
688 AESENCLAST \TMP2, \XMM1 933 AESENCLAST \TMP2, \XMM1
689 AESENCLAST \TMP2, \XMM2 934 AESENCLAST \TMP2, \XMM2
690 AESENCLAST \TMP2, \XMM3 935 AESENCLAST \TMP2, \XMM3
691 AESENCLAST \TMP2, \XMM4 936 AESENCLAST \TMP2, \XMM4
692 movdqu 16*0(%arg3 , %r11 , 1), \TMP1 937 movdqu 16*0(%arg4 , %r11 , 1), \TMP1
693 pxor \TMP1, \XMM1 938 pxor \TMP1, \XMM1
694 movdqu 16*1(%arg3 , %r11 , 1), \TMP1 939.ifc \operation, dec
940 movdqu \XMM1, 16*0(%arg3 , %r11 , 1)
941 movdqa \TMP1, \XMM1
942.endif
943 movdqu 16*1(%arg4 , %r11 , 1), \TMP1
695 pxor \TMP1, \XMM2 944 pxor \TMP1, \XMM2
696 movdqu 16*2(%arg3 , %r11 , 1), \TMP1 945.ifc \operation, dec
946 movdqu \XMM2, 16*1(%arg3 , %r11 , 1)
947 movdqa \TMP1, \XMM2
948.endif
949 movdqu 16*2(%arg4 , %r11 , 1), \TMP1
697 pxor \TMP1, \XMM3 950 pxor \TMP1, \XMM3
698 movdqu 16*3(%arg3 , %r11 , 1), \TMP1 951.ifc \operation, dec
952 movdqu \XMM3, 16*2(%arg3 , %r11 , 1)
953 movdqa \TMP1, \XMM3
954.endif
955 movdqu 16*3(%arg4 , %r11 , 1), \TMP1
699 pxor \TMP1, \XMM4 956 pxor \TMP1, \XMM4
700 movdqu \XMM1, 16*0(%arg2 , %r11 , 1) 957.ifc \operation, dec
701 movdqu \XMM2, 16*1(%arg2 , %r11 , 1) 958 movdqu \XMM4, 16*3(%arg3 , %r11 , 1)
702 movdqu \XMM3, 16*2(%arg2 , %r11 , 1) 959 movdqa \TMP1, \XMM4
703 movdqu \XMM4, 16*3(%arg2 , %r11 , 1) 960.else
961 movdqu \XMM1, 16*0(%arg3 , %r11 , 1)
962 movdqu \XMM2, 16*1(%arg3 , %r11 , 1)
963 movdqu \XMM3, 16*2(%arg3 , %r11 , 1)
964 movdqu \XMM4, 16*3(%arg3 , %r11 , 1)
965.endif
704 966
705 add $64, %r11 967 add $64, %r11
706 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap 968 PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
@@ -710,14 +972,14 @@ aes_loop_pre_enc_done\num_initial_blocks:
710 PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap 972 PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
711 PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap 973 PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
712 974
713_initial_blocks_done\num_initial_blocks\operation: 975_initial_blocks_done\@:
714 976
715.endm 977.endm
716 978
717/* 979/*
718* encrypt 4 blocks at a time 980* encrypt 4 blocks at a time
719* ghash the 4 previously encrypted ciphertext blocks 981* ghash the 4 previously encrypted ciphertext blocks
720* arg1, %arg2, %arg3 are used as pointers only, not modified 982* arg1, %arg3, %arg4 are used as pointers only, not modified
721* %r11 is the data offset value 983* %r11 is the data offset value
722*/ 984*/
723.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \ 985.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
@@ -735,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
735 pshufd $78, \XMM5, \TMP6 997 pshufd $78, \XMM5, \TMP6
736 pxor \XMM5, \TMP6 998 pxor \XMM5, \TMP6
737 paddd ONE(%rip), \XMM0 # INCR CNT 999 paddd ONE(%rip), \XMM0 # INCR CNT
738 movdqa HashKey_4(%rsp), \TMP5 1000 movdqa HashKey_4(%arg2), \TMP5
739 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 1001 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
740 movdqa \XMM0, \XMM1 1002 movdqa \XMM0, \XMM1
741 paddd ONE(%rip), \XMM0 # INCR CNT 1003 paddd ONE(%rip), \XMM0 # INCR CNT
@@ -754,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
754 pxor (%arg1), \XMM2 1016 pxor (%arg1), \XMM2
755 pxor (%arg1), \XMM3 1017 pxor (%arg1), \XMM3
756 pxor (%arg1), \XMM4 1018 pxor (%arg1), \XMM4
757 movdqa HashKey_4_k(%rsp), \TMP5 1019 movdqa HashKey_4_k(%arg2), \TMP5
758 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) 1020 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
759 movaps 0x10(%arg1), \TMP1 1021 movaps 0x10(%arg1), \TMP1
760 AESENC \TMP1, \XMM1 # Round 1 1022 AESENC \TMP1, \XMM1 # Round 1
@@ -769,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
769 movdqa \XMM6, \TMP1 1031 movdqa \XMM6, \TMP1
770 pshufd $78, \XMM6, \TMP2 1032 pshufd $78, \XMM6, \TMP2
771 pxor \XMM6, \TMP2 1033 pxor \XMM6, \TMP2
772 movdqa HashKey_3(%rsp), \TMP5 1034 movdqa HashKey_3(%arg2), \TMP5
773 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 1035 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
774 movaps 0x30(%arg1), \TMP3 1036 movaps 0x30(%arg1), \TMP3
775 AESENC \TMP3, \XMM1 # Round 3 1037 AESENC \TMP3, \XMM1 # Round 3
@@ -782,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
782 AESENC \TMP3, \XMM2 1044 AESENC \TMP3, \XMM2
783 AESENC \TMP3, \XMM3 1045 AESENC \TMP3, \XMM3
784 AESENC \TMP3, \XMM4 1046 AESENC \TMP3, \XMM4
785 movdqa HashKey_3_k(%rsp), \TMP5 1047 movdqa HashKey_3_k(%arg2), \TMP5
786 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1048 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
787 movaps 0x50(%arg1), \TMP3 1049 movaps 0x50(%arg1), \TMP3
788 AESENC \TMP3, \XMM1 # Round 5 1050 AESENC \TMP3, \XMM1 # Round 5
@@ -796,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
796 movdqa \XMM7, \TMP1 1058 movdqa \XMM7, \TMP1
797 pshufd $78, \XMM7, \TMP2 1059 pshufd $78, \XMM7, \TMP2
798 pxor \XMM7, \TMP2 1060 pxor \XMM7, \TMP2
799 movdqa HashKey_2(%rsp ), \TMP5 1061 movdqa HashKey_2(%arg2), \TMP5
800 1062
801 # Multiply TMP5 * HashKey using karatsuba 1063 # Multiply TMP5 * HashKey using karatsuba
802 1064
@@ -812,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
812 AESENC \TMP3, \XMM2 1074 AESENC \TMP3, \XMM2
813 AESENC \TMP3, \XMM3 1075 AESENC \TMP3, \XMM3
814 AESENC \TMP3, \XMM4 1076 AESENC \TMP3, \XMM4
815 movdqa HashKey_2_k(%rsp), \TMP5 1077 movdqa HashKey_2_k(%arg2), \TMP5
816 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1078 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
817 movaps 0x80(%arg1), \TMP3 1079 movaps 0x80(%arg1), \TMP3
818 AESENC \TMP3, \XMM1 # Round 8 1080 AESENC \TMP3, \XMM1 # Round 8
@@ -830,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
830 movdqa \XMM8, \TMP1 1092 movdqa \XMM8, \TMP1
831 pshufd $78, \XMM8, \TMP2 1093 pshufd $78, \XMM8, \TMP2
832 pxor \XMM8, \TMP2 1094 pxor \XMM8, \TMP2
833 movdqa HashKey(%rsp), \TMP5 1095 movdqa HashKey(%arg2), \TMP5
834 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1096 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
835 movaps 0x90(%arg1), \TMP3 1097 movaps 0x90(%arg1), \TMP3
836 AESENC \TMP3, \XMM1 # Round 9 1098 AESENC \TMP3, \XMM1 # Round 9
@@ -842,37 +1104,37 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
842 mov keysize,%eax 1104 mov keysize,%eax
843 shr $2,%eax # 128->4, 192->6, 256->8 1105 shr $2,%eax # 128->4, 192->6, 256->8
844 sub $4,%eax # 128->0, 192->2, 256->4 1106 sub $4,%eax # 128->0, 192->2, 256->4
845 jz aes_loop_par_enc_done 1107 jz aes_loop_par_enc_done\@
846 1108
847aes_loop_par_enc: 1109aes_loop_par_enc\@:
848 MOVADQ (%r10),\TMP3 1110 MOVADQ (%r10),\TMP3
849.irpc index, 1234 1111.irpc index, 1234
850 AESENC \TMP3, %xmm\index 1112 AESENC \TMP3, %xmm\index
851.endr 1113.endr
852 add $16,%r10 1114 add $16,%r10
853 sub $1,%eax 1115 sub $1,%eax
854 jnz aes_loop_par_enc 1116 jnz aes_loop_par_enc\@
855 1117
856aes_loop_par_enc_done: 1118aes_loop_par_enc_done\@:
857 MOVADQ (%r10), \TMP3 1119 MOVADQ (%r10), \TMP3
858 AESENCLAST \TMP3, \XMM1 # Round 10 1120 AESENCLAST \TMP3, \XMM1 # Round 10
859 AESENCLAST \TMP3, \XMM2 1121 AESENCLAST \TMP3, \XMM2
860 AESENCLAST \TMP3, \XMM3 1122 AESENCLAST \TMP3, \XMM3
861 AESENCLAST \TMP3, \XMM4 1123 AESENCLAST \TMP3, \XMM4
862 movdqa HashKey_k(%rsp), \TMP5 1124 movdqa HashKey_k(%arg2), \TMP5
863 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1125 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
864 movdqu (%arg3,%r11,1), \TMP3 1126 movdqu (%arg4,%r11,1), \TMP3
865 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK 1127 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
866 movdqu 16(%arg3,%r11,1), \TMP3 1128 movdqu 16(%arg4,%r11,1), \TMP3
867 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK 1129 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
868 movdqu 32(%arg3,%r11,1), \TMP3 1130 movdqu 32(%arg4,%r11,1), \TMP3
869 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK 1131 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
870 movdqu 48(%arg3,%r11,1), \TMP3 1132 movdqu 48(%arg4,%r11,1), \TMP3
871 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK 1133 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
872 movdqu \XMM1, (%arg2,%r11,1) # Write to the ciphertext buffer 1134 movdqu \XMM1, (%arg3,%r11,1) # Write to the ciphertext buffer
873 movdqu \XMM2, 16(%arg2,%r11,1) # Write to the ciphertext buffer 1135 movdqu \XMM2, 16(%arg3,%r11,1) # Write to the ciphertext buffer
874 movdqu \XMM3, 32(%arg2,%r11,1) # Write to the ciphertext buffer 1136 movdqu \XMM3, 32(%arg3,%r11,1) # Write to the ciphertext buffer
875 movdqu \XMM4, 48(%arg2,%r11,1) # Write to the ciphertext buffer 1137 movdqu \XMM4, 48(%arg3,%r11,1) # Write to the ciphertext buffer
876 PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap 1138 PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
877 PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap 1139 PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
878 PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap 1140 PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
@@ -925,7 +1187,7 @@ aes_loop_par_enc_done:
925/* 1187/*
926* decrypt 4 blocks at a time 1188* decrypt 4 blocks at a time
927* ghash the 4 previously decrypted ciphertext blocks 1189* ghash the 4 previously decrypted ciphertext blocks
928* arg1, %arg2, %arg3 are used as pointers only, not modified 1190* arg1, %arg3, %arg4 are used as pointers only, not modified
929* %r11 is the data offset value 1191* %r11 is the data offset value
930*/ 1192*/
931.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \ 1193.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
@@ -943,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
943 pshufd $78, \XMM5, \TMP6 1205 pshufd $78, \XMM5, \TMP6
944 pxor \XMM5, \TMP6 1206 pxor \XMM5, \TMP6
945 paddd ONE(%rip), \XMM0 # INCR CNT 1207 paddd ONE(%rip), \XMM0 # INCR CNT
946 movdqa HashKey_4(%rsp), \TMP5 1208 movdqa HashKey_4(%arg2), \TMP5
947 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 1209 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
948 movdqa \XMM0, \XMM1 1210 movdqa \XMM0, \XMM1
949 paddd ONE(%rip), \XMM0 # INCR CNT 1211 paddd ONE(%rip), \XMM0 # INCR CNT
@@ -962,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
962 pxor (%arg1), \XMM2 1224 pxor (%arg1), \XMM2
963 pxor (%arg1), \XMM3 1225 pxor (%arg1), \XMM3
964 pxor (%arg1), \XMM4 1226 pxor (%arg1), \XMM4
965 movdqa HashKey_4_k(%rsp), \TMP5 1227 movdqa HashKey_4_k(%arg2), \TMP5
966 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) 1228 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
967 movaps 0x10(%arg1), \TMP1 1229 movaps 0x10(%arg1), \TMP1
968 AESENC \TMP1, \XMM1 # Round 1 1230 AESENC \TMP1, \XMM1 # Round 1
@@ -977,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
977 movdqa \XMM6, \TMP1 1239 movdqa \XMM6, \TMP1
978 pshufd $78, \XMM6, \TMP2 1240 pshufd $78, \XMM6, \TMP2
979 pxor \XMM6, \TMP2 1241 pxor \XMM6, \TMP2
980 movdqa HashKey_3(%rsp), \TMP5 1242 movdqa HashKey_3(%arg2), \TMP5
981 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 1243 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
982 movaps 0x30(%arg1), \TMP3 1244 movaps 0x30(%arg1), \TMP3
983 AESENC \TMP3, \XMM1 # Round 3 1245 AESENC \TMP3, \XMM1 # Round 3
@@ -990,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
990 AESENC \TMP3, \XMM2 1252 AESENC \TMP3, \XMM2
991 AESENC \TMP3, \XMM3 1253 AESENC \TMP3, \XMM3
992 AESENC \TMP3, \XMM4 1254 AESENC \TMP3, \XMM4
993 movdqa HashKey_3_k(%rsp), \TMP5 1255 movdqa HashKey_3_k(%arg2), \TMP5
994 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1256 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
995 movaps 0x50(%arg1), \TMP3 1257 movaps 0x50(%arg1), \TMP3
996 AESENC \TMP3, \XMM1 # Round 5 1258 AESENC \TMP3, \XMM1 # Round 5
@@ -1004,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1004 movdqa \XMM7, \TMP1 1266 movdqa \XMM7, \TMP1
1005 pshufd $78, \XMM7, \TMP2 1267 pshufd $78, \XMM7, \TMP2
1006 pxor \XMM7, \TMP2 1268 pxor \XMM7, \TMP2
1007 movdqa HashKey_2(%rsp ), \TMP5 1269 movdqa HashKey_2(%arg2), \TMP5
1008 1270
1009 # Multiply TMP5 * HashKey using karatsuba 1271 # Multiply TMP5 * HashKey using karatsuba
1010 1272
@@ -1020,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1020 AESENC \TMP3, \XMM2 1282 AESENC \TMP3, \XMM2
1021 AESENC \TMP3, \XMM3 1283 AESENC \TMP3, \XMM3
1022 AESENC \TMP3, \XMM4 1284 AESENC \TMP3, \XMM4
1023 movdqa HashKey_2_k(%rsp), \TMP5 1285 movdqa HashKey_2_k(%arg2), \TMP5
1024 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1286 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1025 movaps 0x80(%arg1), \TMP3 1287 movaps 0x80(%arg1), \TMP3
1026 AESENC \TMP3, \XMM1 # Round 8 1288 AESENC \TMP3, \XMM1 # Round 8
@@ -1038,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1038 movdqa \XMM8, \TMP1 1300 movdqa \XMM8, \TMP1
1039 pshufd $78, \XMM8, \TMP2 1301 pshufd $78, \XMM8, \TMP2
1040 pxor \XMM8, \TMP2 1302 pxor \XMM8, \TMP2
1041 movdqa HashKey(%rsp), \TMP5 1303 movdqa HashKey(%arg2), \TMP5
1042 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1304 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1043 movaps 0x90(%arg1), \TMP3 1305 movaps 0x90(%arg1), \TMP3
1044 AESENC \TMP3, \XMM1 # Round 9 1306 AESENC \TMP3, \XMM1 # Round 9
@@ -1050,40 +1312,40 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1050 mov keysize,%eax 1312 mov keysize,%eax
1051 shr $2,%eax # 128->4, 192->6, 256->8 1313 shr $2,%eax # 128->4, 192->6, 256->8
1052 sub $4,%eax # 128->0, 192->2, 256->4 1314 sub $4,%eax # 128->0, 192->2, 256->4
1053 jz aes_loop_par_dec_done 1315 jz aes_loop_par_dec_done\@
1054 1316
1055aes_loop_par_dec: 1317aes_loop_par_dec\@:
1056 MOVADQ (%r10),\TMP3 1318 MOVADQ (%r10),\TMP3
1057.irpc index, 1234 1319.irpc index, 1234
1058 AESENC \TMP3, %xmm\index 1320 AESENC \TMP3, %xmm\index
1059.endr 1321.endr
1060 add $16,%r10 1322 add $16,%r10
1061 sub $1,%eax 1323 sub $1,%eax
1062 jnz aes_loop_par_dec 1324 jnz aes_loop_par_dec\@
1063 1325
1064aes_loop_par_dec_done: 1326aes_loop_par_dec_done\@:
1065 MOVADQ (%r10), \TMP3 1327 MOVADQ (%r10), \TMP3
1066 AESENCLAST \TMP3, \XMM1 # last round 1328 AESENCLAST \TMP3, \XMM1 # last round
1067 AESENCLAST \TMP3, \XMM2 1329 AESENCLAST \TMP3, \XMM2
1068 AESENCLAST \TMP3, \XMM3 1330 AESENCLAST \TMP3, \XMM3
1069 AESENCLAST \TMP3, \XMM4 1331 AESENCLAST \TMP3, \XMM4
1070 movdqa HashKey_k(%rsp), \TMP5 1332 movdqa HashKey_k(%arg2), \TMP5
1071 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1333 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1072 movdqu (%arg3,%r11,1), \TMP3 1334 movdqu (%arg4,%r11,1), \TMP3
1073 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK 1335 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
1074 movdqu \XMM1, (%arg2,%r11,1) # Write to plaintext buffer 1336 movdqu \XMM1, (%arg3,%r11,1) # Write to plaintext buffer
1075 movdqa \TMP3, \XMM1 1337 movdqa \TMP3, \XMM1
1076 movdqu 16(%arg3,%r11,1), \TMP3 1338 movdqu 16(%arg4,%r11,1), \TMP3
1077 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK 1339 pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
1078 movdqu \XMM2, 16(%arg2,%r11,1) # Write to plaintext buffer 1340 movdqu \XMM2, 16(%arg3,%r11,1) # Write to plaintext buffer
1079 movdqa \TMP3, \XMM2 1341 movdqa \TMP3, \XMM2
1080 movdqu 32(%arg3,%r11,1), \TMP3 1342 movdqu 32(%arg4,%r11,1), \TMP3
1081 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK 1343 pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
1082 movdqu \XMM3, 32(%arg2,%r11,1) # Write to plaintext buffer 1344 movdqu \XMM3, 32(%arg3,%r11,1) # Write to plaintext buffer
1083 movdqa \TMP3, \XMM3 1345 movdqa \TMP3, \XMM3
1084 movdqu 48(%arg3,%r11,1), \TMP3 1346 movdqu 48(%arg4,%r11,1), \TMP3
1085 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK 1347 pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
1086 movdqu \XMM4, 48(%arg2,%r11,1) # Write to plaintext buffer 1348 movdqu \XMM4, 48(%arg3,%r11,1) # Write to plaintext buffer
1087 movdqa \TMP3, \XMM4 1349 movdqa \TMP3, \XMM4
1088 PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap 1350 PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
1089 PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap 1351 PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
@@ -1143,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
1143 movdqa \XMM1, \TMP6 1405 movdqa \XMM1, \TMP6
1144 pshufd $78, \XMM1, \TMP2 1406 pshufd $78, \XMM1, \TMP2
1145 pxor \XMM1, \TMP2 1407 pxor \XMM1, \TMP2
1146 movdqa HashKey_4(%rsp), \TMP5 1408 movdqa HashKey_4(%arg2), \TMP5
1147 PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 1409 PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
1148 PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 1410 PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
1149 movdqa HashKey_4_k(%rsp), \TMP4 1411 movdqa HashKey_4_k(%arg2), \TMP4
1150 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1412 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1151 movdqa \XMM1, \XMMDst 1413 movdqa \XMM1, \XMMDst
1152 movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 1414 movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
@@ -1156,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
1156 movdqa \XMM2, \TMP1 1418 movdqa \XMM2, \TMP1
1157 pshufd $78, \XMM2, \TMP2 1419 pshufd $78, \XMM2, \TMP2
1158 pxor \XMM2, \TMP2 1420 pxor \XMM2, \TMP2
1159 movdqa HashKey_3(%rsp), \TMP5 1421 movdqa HashKey_3(%arg2), \TMP5
1160 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1422 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1161 PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 1423 PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
1162 movdqa HashKey_3_k(%rsp), \TMP4 1424 movdqa HashKey_3_k(%arg2), \TMP4
1163 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1425 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1164 pxor \TMP1, \TMP6 1426 pxor \TMP1, \TMP6
1165 pxor \XMM2, \XMMDst 1427 pxor \XMM2, \XMMDst
@@ -1171,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
1171 movdqa \XMM3, \TMP1 1433 movdqa \XMM3, \TMP1
1172 pshufd $78, \XMM3, \TMP2 1434 pshufd $78, \XMM3, \TMP2
1173 pxor \XMM3, \TMP2 1435 pxor \XMM3, \TMP2
1174 movdqa HashKey_2(%rsp), \TMP5 1436 movdqa HashKey_2(%arg2), \TMP5
1175 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1437 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1176 PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 1438 PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
1177 movdqa HashKey_2_k(%rsp), \TMP4 1439 movdqa HashKey_2_k(%arg2), \TMP4
1178 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1440 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1179 pxor \TMP1, \TMP6 1441 pxor \TMP1, \TMP6
1180 pxor \XMM3, \XMMDst 1442 pxor \XMM3, \XMMDst
@@ -1184,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
1184 movdqa \XMM4, \TMP1 1446 movdqa \XMM4, \TMP1
1185 pshufd $78, \XMM4, \TMP2 1447 pshufd $78, \XMM4, \TMP2
1186 pxor \XMM4, \TMP2 1448 pxor \XMM4, \TMP2
1187 movdqa HashKey(%rsp), \TMP5 1449 movdqa HashKey(%arg2), \TMP5
1188 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1450 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1189 PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 1451 PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
1190 movdqa HashKey_k(%rsp), \TMP4 1452 movdqa HashKey_k(%arg2), \TMP4
1191 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1453 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1192 pxor \TMP1, \TMP6 1454 pxor \TMP1, \TMP6
1193 pxor \XMM4, \XMMDst 1455 pxor \XMM4, \XMMDst
@@ -1256,6 +1518,8 @@ _esb_loop_\@:
1256.endm 1518.endm
1257/***************************************************************************** 1519/*****************************************************************************
1258* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. 1520* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
1521* struct gcm_context_data *data
1522* // Context data
1259* u8 *out, // Plaintext output. Encrypt in-place is allowed. 1523* u8 *out, // Plaintext output. Encrypt in-place is allowed.
1260* const u8 *in, // Ciphertext input 1524* const u8 *in, // Ciphertext input
1261* u64 plaintext_len, // Length of data in bytes for decryption. 1525* u64 plaintext_len, // Length of data in bytes for decryption.
@@ -1333,195 +1597,20 @@ _esb_loop_\@:
1333* 1597*
1334*****************************************************************************/ 1598*****************************************************************************/
1335ENTRY(aesni_gcm_dec) 1599ENTRY(aesni_gcm_dec)
1336 push %r12 1600 FUNC_SAVE
1337 push %r13
1338 push %r14
1339 mov %rsp, %r14
1340/*
1341* states of %xmm registers %xmm6:%xmm15 not saved
1342* all %xmm registers are clobbered
1343*/
1344 sub $VARIABLE_OFFSET, %rsp
1345 and $~63, %rsp # align rsp to 64 bytes
1346 mov %arg6, %r12
1347 movdqu (%r12), %xmm13 # %xmm13 = HashKey
1348 movdqa SHUF_MASK(%rip), %xmm2
1349 PSHUFB_XMM %xmm2, %xmm13
1350
1351
1352# Precompute HashKey<<1 (mod poly) from the hash key (required for GHASH)
1353
1354 movdqa %xmm13, %xmm2
1355 psllq $1, %xmm13
1356 psrlq $63, %xmm2
1357 movdqa %xmm2, %xmm1
1358 pslldq $8, %xmm2
1359 psrldq $8, %xmm1
1360 por %xmm2, %xmm13
1361
1362 # Reduction
1363
1364 pshufd $0x24, %xmm1, %xmm2
1365 pcmpeqd TWOONE(%rip), %xmm2
1366 pand POLY(%rip), %xmm2
1367 pxor %xmm2, %xmm13 # %xmm13 holds the HashKey<<1 (mod poly)
1368
1369
1370 # Decrypt first few blocks
1371
1372 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
1373 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
1374 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
1375 mov %r13, %r12
1376 and $(3<<4), %r12
1377 jz _initial_num_blocks_is_0_decrypt
1378 cmp $(2<<4), %r12
1379 jb _initial_num_blocks_is_1_decrypt
1380 je _initial_num_blocks_is_2_decrypt
1381_initial_num_blocks_is_3_decrypt:
1382 INITIAL_BLOCKS_DEC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
1383%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, dec
1384 sub $48, %r13
1385 jmp _initial_blocks_decrypted
1386_initial_num_blocks_is_2_decrypt:
1387 INITIAL_BLOCKS_DEC 2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
1388%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, dec
1389 sub $32, %r13
1390 jmp _initial_blocks_decrypted
1391_initial_num_blocks_is_1_decrypt:
1392 INITIAL_BLOCKS_DEC 1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
1393%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, dec
1394 sub $16, %r13
1395 jmp _initial_blocks_decrypted
1396_initial_num_blocks_is_0_decrypt:
1397 INITIAL_BLOCKS_DEC 0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
1398%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, dec
1399_initial_blocks_decrypted:
1400 cmp $0, %r13
1401 je _zero_cipher_left_decrypt
1402 sub $64, %r13
1403 je _four_cipher_left_decrypt
1404_decrypt_by_4:
1405 GHASH_4_ENCRYPT_4_PARALLEL_DEC %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
1406%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, dec
1407 add $64, %r11
1408 sub $64, %r13
1409 jne _decrypt_by_4
1410_four_cipher_left_decrypt:
1411 GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
1412%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
1413_zero_cipher_left_decrypt:
1414 mov %arg4, %r13
1415 and $15, %r13 # %r13 = arg4 (mod 16)
1416 je _multiple_of_16_bytes_decrypt
1417
1418 # Handle the last <16 byte block separately
1419
1420 paddd ONE(%rip), %xmm0 # increment CNT to get Yn
1421 movdqa SHUF_MASK(%rip), %xmm10
1422 PSHUFB_XMM %xmm10, %xmm0
1423
1424 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
1425
1426 lea (%arg3,%r11,1), %r10
1427 mov %r13, %r12
1428 READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
1429
1430 lea ALL_F+16(%rip), %r12
1431 sub %r13, %r12
1432 movdqa %xmm1, %xmm2
1433 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
1434 movdqu (%r12), %xmm1
1435 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
1436 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
1437 pand %xmm1, %xmm2
1438 movdqa SHUF_MASK(%rip), %xmm10
1439 PSHUFB_XMM %xmm10 ,%xmm2
1440
1441 pxor %xmm2, %xmm8
1442 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
1443
1444 # output %r13 bytes
1445 MOVQ_R64_XMM %xmm0, %rax
1446 cmp $8, %r13
1447 jle _less_than_8_bytes_left_decrypt
1448 mov %rax, (%arg2 , %r11, 1)
1449 add $8, %r11
1450 psrldq $8, %xmm0
1451 MOVQ_R64_XMM %xmm0, %rax
1452 sub $8, %r13
1453_less_than_8_bytes_left_decrypt:
1454 mov %al, (%arg2, %r11, 1)
1455 add $1, %r11
1456 shr $8, %rax
1457 sub $1, %r13
1458 jne _less_than_8_bytes_left_decrypt
1459_multiple_of_16_bytes_decrypt:
1460 mov arg8, %r12 # %r13 = aadLen (number of bytes)
1461 shl $3, %r12 # convert into number of bits
1462 movd %r12d, %xmm15 # len(A) in %xmm15
1463 shl $3, %arg4 # len(C) in bits (*128)
1464 MOVQ_R64_XMM %arg4, %xmm1
1465 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
1466 pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
1467 pxor %xmm15, %xmm8
1468 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
1469 # final GHASH computation
1470 movdqa SHUF_MASK(%rip), %xmm10
1471 PSHUFB_XMM %xmm10, %xmm8
1472 1601
1473 mov %arg5, %rax # %rax = *Y0 1602 GCM_INIT %arg6, arg7, arg8, arg9
1474 movdqu (%rax), %xmm0 # %xmm0 = Y0 1603 GCM_ENC_DEC dec
1475 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0) 1604 GCM_COMPLETE arg10, arg11
1476 pxor %xmm8, %xmm0 1605 FUNC_RESTORE
1477_return_T_decrypt:
1478 mov arg9, %r10 # %r10 = authTag
1479 mov arg10, %r11 # %r11 = auth_tag_len
1480 cmp $16, %r11
1481 je _T_16_decrypt
1482 cmp $8, %r11
1483 jl _T_4_decrypt
1484_T_8_decrypt:
1485 MOVQ_R64_XMM %xmm0, %rax
1486 mov %rax, (%r10)
1487 add $8, %r10
1488 sub $8, %r11
1489 psrldq $8, %xmm0
1490 cmp $0, %r11
1491 je _return_T_done_decrypt
1492_T_4_decrypt:
1493 movd %xmm0, %eax
1494 mov %eax, (%r10)
1495 add $4, %r10
1496 sub $4, %r11
1497 psrldq $4, %xmm0
1498 cmp $0, %r11
1499 je _return_T_done_decrypt
1500_T_123_decrypt:
1501 movd %xmm0, %eax
1502 cmp $2, %r11
1503 jl _T_1_decrypt
1504 mov %ax, (%r10)
1505 cmp $2, %r11
1506 je _return_T_done_decrypt
1507 add $2, %r10
1508 sar $16, %eax
1509_T_1_decrypt:
1510 mov %al, (%r10)
1511 jmp _return_T_done_decrypt
1512_T_16_decrypt:
1513 movdqu %xmm0, (%r10)
1514_return_T_done_decrypt:
1515 mov %r14, %rsp
1516 pop %r14
1517 pop %r13
1518 pop %r12
1519 ret 1606 ret
1520ENDPROC(aesni_gcm_dec) 1607ENDPROC(aesni_gcm_dec)
1521 1608
1522 1609
1523/***************************************************************************** 1610/*****************************************************************************
1524* void aesni_gcm_enc(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. 1611* void aesni_gcm_enc(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
1612* struct gcm_context_data *data
1613* // Context data
1525* u8 *out, // Ciphertext output. Encrypt in-place is allowed. 1614* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
1526* const u8 *in, // Plaintext input 1615* const u8 *in, // Plaintext input
1527* u64 plaintext_len, // Length of data in bytes for encryption. 1616* u64 plaintext_len, // Length of data in bytes for encryption.
@@ -1596,195 +1685,78 @@ ENDPROC(aesni_gcm_dec)
1596* poly = x^128 + x^127 + x^126 + x^121 + 1 1685* poly = x^128 + x^127 + x^126 + x^121 + 1
1597***************************************************************************/ 1686***************************************************************************/
1598ENTRY(aesni_gcm_enc) 1687ENTRY(aesni_gcm_enc)
1599 push %r12 1688 FUNC_SAVE
1600 push %r13
1601 push %r14
1602 mov %rsp, %r14
1603#
1604# states of %xmm registers %xmm6:%xmm15 not saved
1605# all %xmm registers are clobbered
1606#
1607 sub $VARIABLE_OFFSET, %rsp
1608 and $~63, %rsp
1609 mov %arg6, %r12
1610 movdqu (%r12), %xmm13
1611 movdqa SHUF_MASK(%rip), %xmm2
1612 PSHUFB_XMM %xmm2, %xmm13
1613
1614
1615# precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
1616
1617 movdqa %xmm13, %xmm2
1618 psllq $1, %xmm13
1619 psrlq $63, %xmm2
1620 movdqa %xmm2, %xmm1
1621 pslldq $8, %xmm2
1622 psrldq $8, %xmm1
1623 por %xmm2, %xmm13
1624
1625 # reduce HashKey<<1
1626
1627 pshufd $0x24, %xmm1, %xmm2
1628 pcmpeqd TWOONE(%rip), %xmm2
1629 pand POLY(%rip), %xmm2
1630 pxor %xmm2, %xmm13
1631 movdqa %xmm13, HashKey(%rsp)
1632 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
1633 and $-16, %r13
1634 mov %r13, %r12
1635 1689
1636 # Encrypt first few blocks 1690 GCM_INIT %arg6, arg7, arg8, arg9
1691 GCM_ENC_DEC enc
1637 1692
1638 and $(3<<4), %r12 1693 GCM_COMPLETE arg10, arg11
1639 jz _initial_num_blocks_is_0_encrypt 1694 FUNC_RESTORE
1640 cmp $(2<<4), %r12 1695 ret
1641 jb _initial_num_blocks_is_1_encrypt 1696ENDPROC(aesni_gcm_enc)
1642 je _initial_num_blocks_is_2_encrypt
1643_initial_num_blocks_is_3_encrypt:
1644 INITIAL_BLOCKS_ENC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
1645%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, enc
1646 sub $48, %r13
1647 jmp _initial_blocks_encrypted
1648_initial_num_blocks_is_2_encrypt:
1649 INITIAL_BLOCKS_ENC 2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
1650%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, enc
1651 sub $32, %r13
1652 jmp _initial_blocks_encrypted
1653_initial_num_blocks_is_1_encrypt:
1654 INITIAL_BLOCKS_ENC 1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
1655%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, enc
1656 sub $16, %r13
1657 jmp _initial_blocks_encrypted
1658_initial_num_blocks_is_0_encrypt:
1659 INITIAL_BLOCKS_ENC 0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
1660%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, enc
1661_initial_blocks_encrypted:
1662
1663 # Main loop - Encrypt remaining blocks
1664
1665 cmp $0, %r13
1666 je _zero_cipher_left_encrypt
1667 sub $64, %r13
1668 je _four_cipher_left_encrypt
1669_encrypt_by_4_encrypt:
1670 GHASH_4_ENCRYPT_4_PARALLEL_ENC %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
1671%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc
1672 add $64, %r11
1673 sub $64, %r13
1674 jne _encrypt_by_4_encrypt
1675_four_cipher_left_encrypt:
1676 GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
1677%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
1678_zero_cipher_left_encrypt:
1679 mov %arg4, %r13
1680 and $15, %r13 # %r13 = arg4 (mod 16)
1681 je _multiple_of_16_bytes_encrypt
1682
1683 # Handle the last <16 Byte block separately
1684 paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
1685 movdqa SHUF_MASK(%rip), %xmm10
1686 PSHUFB_XMM %xmm10, %xmm0
1687
1688 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
1689
1690 lea (%arg3,%r11,1), %r10
1691 mov %r13, %r12
1692 READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
1693
1694 lea ALL_F+16(%rip), %r12
1695 sub %r13, %r12
1696 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
1697 movdqu (%r12), %xmm1
1698 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
1699 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
1700 movdqa SHUF_MASK(%rip), %xmm10
1701 PSHUFB_XMM %xmm10,%xmm0
1702 1697
1703 pxor %xmm0, %xmm8 1698/*****************************************************************************
1704 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 1699* void aesni_gcm_init(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
1705 # GHASH computation for the last <16 byte block 1700* struct gcm_context_data *data,
1706 movdqa SHUF_MASK(%rip), %xmm10 1701* // context data
1707 PSHUFB_XMM %xmm10, %xmm0 1702* u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association)
1703* // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
1704* // concatenated with 0x00000001. 16-byte aligned pointer.
1705* u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary.
1706* const u8 *aad, // Additional Authentication Data (AAD)
1707* u64 aad_len) // Length of AAD in bytes.
1708*/
1709ENTRY(aesni_gcm_init)
1710 FUNC_SAVE
1711 GCM_INIT %arg3, %arg4,%arg5, %arg6
1712 FUNC_RESTORE
1713 ret
1714ENDPROC(aesni_gcm_init)
1708 1715
1709 # shuffle xmm0 back to output as ciphertext 1716/*****************************************************************************
1717* void aesni_gcm_enc_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
1718* struct gcm_context_data *data,
1719* // context data
1720* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
1721* const u8 *in, // Plaintext input
1722* u64 plaintext_len, // Length of data in bytes for encryption.
1723*/
1724ENTRY(aesni_gcm_enc_update)
1725 FUNC_SAVE
1726 GCM_ENC_DEC enc
1727 FUNC_RESTORE
1728 ret
1729ENDPROC(aesni_gcm_enc_update)
1710 1730
1711 # Output %r13 bytes 1731/*****************************************************************************
1712 MOVQ_R64_XMM %xmm0, %rax 1732* void aesni_gcm_dec_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
1713 cmp $8, %r13 1733* struct gcm_context_data *data,
1714 jle _less_than_8_bytes_left_encrypt 1734* // context data
1715 mov %rax, (%arg2 , %r11, 1) 1735* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
1716 add $8, %r11 1736* const u8 *in, // Plaintext input
1717 psrldq $8, %xmm0 1737* u64 plaintext_len, // Length of data in bytes for encryption.
1718 MOVQ_R64_XMM %xmm0, %rax 1738*/
1719 sub $8, %r13 1739ENTRY(aesni_gcm_dec_update)
1720_less_than_8_bytes_left_encrypt: 1740 FUNC_SAVE
1721 mov %al, (%arg2, %r11, 1) 1741 GCM_ENC_DEC dec
1722 add $1, %r11 1742 FUNC_RESTORE
1723 shr $8, %rax 1743 ret
1724 sub $1, %r13 1744ENDPROC(aesni_gcm_dec_update)
1725 jne _less_than_8_bytes_left_encrypt
1726_multiple_of_16_bytes_encrypt:
1727 mov arg8, %r12 # %r12 = addLen (number of bytes)
1728 shl $3, %r12
1729 movd %r12d, %xmm15 # len(A) in %xmm15
1730 shl $3, %arg4 # len(C) in bits (*128)
1731 MOVQ_R64_XMM %arg4, %xmm1
1732 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
1733 pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
1734 pxor %xmm15, %xmm8
1735 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
1736 # final GHASH computation
1737 movdqa SHUF_MASK(%rip), %xmm10
1738 PSHUFB_XMM %xmm10, %xmm8 # perform a 16 byte swap
1739 1745
1740 mov %arg5, %rax # %rax = *Y0 1746/*****************************************************************************
1741 movdqu (%rax), %xmm0 # %xmm0 = Y0 1747* void aesni_gcm_finalize(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
1742 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm15 # Encrypt(K, Y0) 1748* struct gcm_context_data *data,
1743 pxor %xmm8, %xmm0 1749* // context data
1744_return_T_encrypt: 1750* u8 *auth_tag, // Authenticated Tag output.
1745 mov arg9, %r10 # %r10 = authTag 1751* u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely),
1746 mov arg10, %r11 # %r11 = auth_tag_len 1752* // 12 or 8.
1747 cmp $16, %r11 1753*/
1748 je _T_16_encrypt 1754ENTRY(aesni_gcm_finalize)
1749 cmp $8, %r11 1755 FUNC_SAVE
1750 jl _T_4_encrypt 1756 GCM_COMPLETE %arg3 %arg4
1751_T_8_encrypt: 1757 FUNC_RESTORE
1752 MOVQ_R64_XMM %xmm0, %rax
1753 mov %rax, (%r10)
1754 add $8, %r10
1755 sub $8, %r11
1756 psrldq $8, %xmm0
1757 cmp $0, %r11
1758 je _return_T_done_encrypt
1759_T_4_encrypt:
1760 movd %xmm0, %eax
1761 mov %eax, (%r10)
1762 add $4, %r10
1763 sub $4, %r11
1764 psrldq $4, %xmm0
1765 cmp $0, %r11
1766 je _return_T_done_encrypt
1767_T_123_encrypt:
1768 movd %xmm0, %eax
1769 cmp $2, %r11
1770 jl _T_1_encrypt
1771 mov %ax, (%r10)
1772 cmp $2, %r11
1773 je _return_T_done_encrypt
1774 add $2, %r10
1775 sar $16, %eax
1776_T_1_encrypt:
1777 mov %al, (%r10)
1778 jmp _return_T_done_encrypt
1779_T_16_encrypt:
1780 movdqu %xmm0, (%r10)
1781_return_T_done_encrypt:
1782 mov %r14, %rsp
1783 pop %r14
1784 pop %r13
1785 pop %r12
1786 ret 1758 ret
1787ENDPROC(aesni_gcm_enc) 1759ENDPROC(aesni_gcm_finalize)
1788 1760
1789#endif 1761#endif
1790 1762
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 34cf1c1f8c98..acbe7e8336d8 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -72,6 +72,21 @@ struct aesni_xts_ctx {
72 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; 72 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
73}; 73};
74 74
75#define GCM_BLOCK_LEN 16
76
77struct gcm_context_data {
78 /* init, update and finalize context data */
79 u8 aad_hash[GCM_BLOCK_LEN];
80 u64 aad_length;
81 u64 in_length;
82 u8 partial_block_enc_key[GCM_BLOCK_LEN];
83 u8 orig_IV[GCM_BLOCK_LEN];
84 u8 current_counter[GCM_BLOCK_LEN];
85 u64 partial_block_len;
86 u64 unused;
87 u8 hash_keys[GCM_BLOCK_LEN * 8];
88};
89
75asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, 90asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
76 unsigned int key_len); 91 unsigned int key_len);
77asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, 92asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
@@ -105,6 +120,7 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
105 120
106/* asmlinkage void aesni_gcm_enc() 121/* asmlinkage void aesni_gcm_enc()
107 * void *ctx, AES Key schedule. Starts on a 16 byte boundary. 122 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
123 * struct gcm_context_data. May be uninitialized.
108 * u8 *out, Ciphertext output. Encrypt in-place is allowed. 124 * u8 *out, Ciphertext output. Encrypt in-place is allowed.
109 * const u8 *in, Plaintext input 125 * const u8 *in, Plaintext input
110 * unsigned long plaintext_len, Length of data in bytes for encryption. 126 * unsigned long plaintext_len, Length of data in bytes for encryption.
@@ -117,13 +133,15 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
117 * unsigned long auth_tag_len), Authenticated Tag Length in bytes. 133 * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
118 * Valid values are 16 (most likely), 12 or 8. 134 * Valid values are 16 (most likely), 12 or 8.
119 */ 135 */
120asmlinkage void aesni_gcm_enc(void *ctx, u8 *out, 136asmlinkage void aesni_gcm_enc(void *ctx,
137 struct gcm_context_data *gdata, u8 *out,
121 const u8 *in, unsigned long plaintext_len, u8 *iv, 138 const u8 *in, unsigned long plaintext_len, u8 *iv,
122 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 139 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
123 u8 *auth_tag, unsigned long auth_tag_len); 140 u8 *auth_tag, unsigned long auth_tag_len);
124 141
125/* asmlinkage void aesni_gcm_dec() 142/* asmlinkage void aesni_gcm_dec()
126 * void *ctx, AES Key schedule. Starts on a 16 byte boundary. 143 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
144 * struct gcm_context_data. May be uninitialized.
127 * u8 *out, Plaintext output. Decrypt in-place is allowed. 145 * u8 *out, Plaintext output. Decrypt in-place is allowed.
128 * const u8 *in, Ciphertext input 146 * const u8 *in, Ciphertext input
129 * unsigned long ciphertext_len, Length of data in bytes for decryption. 147 * unsigned long ciphertext_len, Length of data in bytes for decryption.
@@ -137,11 +155,28 @@ asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
137 * unsigned long auth_tag_len) Authenticated Tag Length in bytes. 155 * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
138 * Valid values are 16 (most likely), 12 or 8. 156 * Valid values are 16 (most likely), 12 or 8.
139 */ 157 */
140asmlinkage void aesni_gcm_dec(void *ctx, u8 *out, 158asmlinkage void aesni_gcm_dec(void *ctx,
159 struct gcm_context_data *gdata, u8 *out,
141 const u8 *in, unsigned long ciphertext_len, u8 *iv, 160 const u8 *in, unsigned long ciphertext_len, u8 *iv,
142 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 161 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
143 u8 *auth_tag, unsigned long auth_tag_len); 162 u8 *auth_tag, unsigned long auth_tag_len);
144 163
164/* Scatter / Gather routines, with args similar to above */
165asmlinkage void aesni_gcm_init(void *ctx,
166 struct gcm_context_data *gdata,
167 u8 *iv,
168 u8 *hash_subkey, const u8 *aad,
169 unsigned long aad_len);
170asmlinkage void aesni_gcm_enc_update(void *ctx,
171 struct gcm_context_data *gdata, u8 *out,
172 const u8 *in, unsigned long plaintext_len);
173asmlinkage void aesni_gcm_dec_update(void *ctx,
174 struct gcm_context_data *gdata, u8 *out,
175 const u8 *in,
176 unsigned long ciphertext_len);
177asmlinkage void aesni_gcm_finalize(void *ctx,
178 struct gcm_context_data *gdata,
179 u8 *auth_tag, unsigned long auth_tag_len);
145 180
146#ifdef CONFIG_AS_AVX 181#ifdef CONFIG_AS_AVX
147asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, 182asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
@@ -167,15 +202,17 @@ asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
167 const u8 *aad, unsigned long aad_len, 202 const u8 *aad, unsigned long aad_len,
168 u8 *auth_tag, unsigned long auth_tag_len); 203 u8 *auth_tag, unsigned long auth_tag_len);
169 204
170static void aesni_gcm_enc_avx(void *ctx, u8 *out, 205static void aesni_gcm_enc_avx(void *ctx,
206 struct gcm_context_data *data, u8 *out,
171 const u8 *in, unsigned long plaintext_len, u8 *iv, 207 const u8 *in, unsigned long plaintext_len, u8 *iv,
172 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 208 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
173 u8 *auth_tag, unsigned long auth_tag_len) 209 u8 *auth_tag, unsigned long auth_tag_len)
174{ 210{
175 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 211 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
176 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){ 212 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
177 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, 213 aesni_gcm_enc(ctx, data, out, in,
178 aad_len, auth_tag, auth_tag_len); 214 plaintext_len, iv, hash_subkey, aad,
215 aad_len, auth_tag, auth_tag_len);
179 } else { 216 } else {
180 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); 217 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
181 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, 218 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
@@ -183,15 +220,17 @@ static void aesni_gcm_enc_avx(void *ctx, u8 *out,
183 } 220 }
184} 221}
185 222
186static void aesni_gcm_dec_avx(void *ctx, u8 *out, 223static void aesni_gcm_dec_avx(void *ctx,
224 struct gcm_context_data *data, u8 *out,
187 const u8 *in, unsigned long ciphertext_len, u8 *iv, 225 const u8 *in, unsigned long ciphertext_len, u8 *iv,
188 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 226 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
189 u8 *auth_tag, unsigned long auth_tag_len) 227 u8 *auth_tag, unsigned long auth_tag_len)
190{ 228{
191 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 229 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
192 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { 230 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
193 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad, 231 aesni_gcm_dec(ctx, data, out, in,
194 aad_len, auth_tag, auth_tag_len); 232 ciphertext_len, iv, hash_subkey, aad,
233 aad_len, auth_tag, auth_tag_len);
195 } else { 234 } else {
196 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); 235 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
197 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, 236 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
@@ -218,15 +257,17 @@ asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
218 const u8 *aad, unsigned long aad_len, 257 const u8 *aad, unsigned long aad_len,
219 u8 *auth_tag, unsigned long auth_tag_len); 258 u8 *auth_tag, unsigned long auth_tag_len);
220 259
221static void aesni_gcm_enc_avx2(void *ctx, u8 *out, 260static void aesni_gcm_enc_avx2(void *ctx,
261 struct gcm_context_data *data, u8 *out,
222 const u8 *in, unsigned long plaintext_len, u8 *iv, 262 const u8 *in, unsigned long plaintext_len, u8 *iv,
223 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 263 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
224 u8 *auth_tag, unsigned long auth_tag_len) 264 u8 *auth_tag, unsigned long auth_tag_len)
225{ 265{
226 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 266 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
227 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { 267 if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
228 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, 268 aesni_gcm_enc(ctx, data, out, in,
229 aad_len, auth_tag, auth_tag_len); 269 plaintext_len, iv, hash_subkey, aad,
270 aad_len, auth_tag, auth_tag_len);
230 } else if (plaintext_len < AVX_GEN4_OPTSIZE) { 271 } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
231 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); 272 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
232 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, 273 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
@@ -238,15 +279,17 @@ static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
238 } 279 }
239} 280}
240 281
241static void aesni_gcm_dec_avx2(void *ctx, u8 *out, 282static void aesni_gcm_dec_avx2(void *ctx,
283 struct gcm_context_data *data, u8 *out,
242 const u8 *in, unsigned long ciphertext_len, u8 *iv, 284 const u8 *in, unsigned long ciphertext_len, u8 *iv,
243 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 285 u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
244 u8 *auth_tag, unsigned long auth_tag_len) 286 u8 *auth_tag, unsigned long auth_tag_len)
245{ 287{
246 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; 288 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
247 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) { 289 if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
248 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, 290 aesni_gcm_dec(ctx, data, out, in,
249 aad, aad_len, auth_tag, auth_tag_len); 291 ciphertext_len, iv, hash_subkey,
292 aad, aad_len, auth_tag, auth_tag_len);
250 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) { 293 } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
251 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); 294 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
252 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, 295 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
@@ -259,15 +302,19 @@ static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
259} 302}
260#endif 303#endif
261 304
262static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out, 305static void (*aesni_gcm_enc_tfm)(void *ctx,
263 const u8 *in, unsigned long plaintext_len, u8 *iv, 306 struct gcm_context_data *data, u8 *out,
264 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 307 const u8 *in, unsigned long plaintext_len,
265 u8 *auth_tag, unsigned long auth_tag_len); 308 u8 *iv, u8 *hash_subkey, const u8 *aad,
309 unsigned long aad_len, u8 *auth_tag,
310 unsigned long auth_tag_len);
266 311
267static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out, 312static void (*aesni_gcm_dec_tfm)(void *ctx,
268 const u8 *in, unsigned long ciphertext_len, u8 *iv, 313 struct gcm_context_data *data, u8 *out,
269 u8 *hash_subkey, const u8 *aad, unsigned long aad_len, 314 const u8 *in, unsigned long ciphertext_len,
270 u8 *auth_tag, unsigned long auth_tag_len); 315 u8 *iv, u8 *hash_subkey, const u8 *aad,
316 unsigned long aad_len, u8 *auth_tag,
317 unsigned long auth_tag_len);
271 318
272static inline struct 319static inline struct
273aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) 320aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
@@ -744,6 +791,127 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
744 return 0; 791 return 0;
745} 792}
746 793
794static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
795 unsigned int assoclen, u8 *hash_subkey,
796 u8 *iv, void *aes_ctx)
797{
798 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
799 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
800 struct gcm_context_data data AESNI_ALIGN_ATTR;
801 struct scatter_walk dst_sg_walk = {};
802 unsigned long left = req->cryptlen;
803 unsigned long len, srclen, dstlen;
804 struct scatter_walk assoc_sg_walk;
805 struct scatter_walk src_sg_walk;
806 struct scatterlist src_start[2];
807 struct scatterlist dst_start[2];
808 struct scatterlist *src_sg;
809 struct scatterlist *dst_sg;
810 u8 *src, *dst, *assoc;
811 u8 *assocmem = NULL;
812 u8 authTag[16];
813
814 if (!enc)
815 left -= auth_tag_len;
816
817 /* Linearize assoc, if not already linear */
818 if (req->src->length >= assoclen && req->src->length &&
819 (!PageHighMem(sg_page(req->src)) ||
820 req->src->offset + req->src->length < PAGE_SIZE)) {
821 scatterwalk_start(&assoc_sg_walk, req->src);
822 assoc = scatterwalk_map(&assoc_sg_walk);
823 } else {
824 /* assoc can be any length, so must be on heap */
825 assocmem = kmalloc(assoclen, GFP_ATOMIC);
826 if (unlikely(!assocmem))
827 return -ENOMEM;
828 assoc = assocmem;
829
830 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
831 }
832
833 src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
834 scatterwalk_start(&src_sg_walk, src_sg);
835 if (req->src != req->dst) {
836 dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
837 scatterwalk_start(&dst_sg_walk, dst_sg);
838 }
839
840 kernel_fpu_begin();
841 aesni_gcm_init(aes_ctx, &data, iv,
842 hash_subkey, assoc, assoclen);
843 if (req->src != req->dst) {
844 while (left) {
845 src = scatterwalk_map(&src_sg_walk);
846 dst = scatterwalk_map(&dst_sg_walk);
847 srclen = scatterwalk_clamp(&src_sg_walk, left);
848 dstlen = scatterwalk_clamp(&dst_sg_walk, left);
849 len = min(srclen, dstlen);
850 if (len) {
851 if (enc)
852 aesni_gcm_enc_update(aes_ctx, &data,
853 dst, src, len);
854 else
855 aesni_gcm_dec_update(aes_ctx, &data,
856 dst, src, len);
857 }
858 left -= len;
859
860 scatterwalk_unmap(src);
861 scatterwalk_unmap(dst);
862 scatterwalk_advance(&src_sg_walk, len);
863 scatterwalk_advance(&dst_sg_walk, len);
864 scatterwalk_done(&src_sg_walk, 0, left);
865 scatterwalk_done(&dst_sg_walk, 1, left);
866 }
867 } else {
868 while (left) {
869 dst = src = scatterwalk_map(&src_sg_walk);
870 len = scatterwalk_clamp(&src_sg_walk, left);
871 if (len) {
872 if (enc)
873 aesni_gcm_enc_update(aes_ctx, &data,
874 src, src, len);
875 else
876 aesni_gcm_dec_update(aes_ctx, &data,
877 src, src, len);
878 }
879 left -= len;
880 scatterwalk_unmap(src);
881 scatterwalk_advance(&src_sg_walk, len);
882 scatterwalk_done(&src_sg_walk, 1, left);
883 }
884 }
885 aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len);
886 kernel_fpu_end();
887
888 if (!assocmem)
889 scatterwalk_unmap(assoc);
890 else
891 kfree(assocmem);
892
893 if (!enc) {
894 u8 authTagMsg[16];
895
896 /* Copy out original authTag */
897 scatterwalk_map_and_copy(authTagMsg, req->src,
898 req->assoclen + req->cryptlen -
899 auth_tag_len,
900 auth_tag_len, 0);
901
902 /* Compare generated tag with passed in tag. */
903 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
904 -EBADMSG : 0;
905 }
906
907 /* Copy in the authTag */
908 scatterwalk_map_and_copy(authTag, req->dst,
909 req->assoclen + req->cryptlen,
910 auth_tag_len, 1);
911
912 return 0;
913}
914
747static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen, 915static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
748 u8 *hash_subkey, u8 *iv, void *aes_ctx) 916 u8 *hash_subkey, u8 *iv, void *aes_ctx)
749{ 917{
@@ -753,7 +921,14 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
753 unsigned long auth_tag_len = crypto_aead_authsize(tfm); 921 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
754 struct scatter_walk src_sg_walk; 922 struct scatter_walk src_sg_walk;
755 struct scatter_walk dst_sg_walk = {}; 923 struct scatter_walk dst_sg_walk = {};
924 struct gcm_context_data data AESNI_ALIGN_ATTR;
756 925
926 if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
927 aesni_gcm_enc_tfm == aesni_gcm_enc ||
928 req->cryptlen < AVX_GEN2_OPTSIZE) {
929 return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
930 aes_ctx);
931 }
757 if (sg_is_last(req->src) && 932 if (sg_is_last(req->src) &&
758 (!PageHighMem(sg_page(req->src)) || 933 (!PageHighMem(sg_page(req->src)) ||
759 req->src->offset + req->src->length <= PAGE_SIZE) && 934 req->src->offset + req->src->length <= PAGE_SIZE) &&
@@ -782,7 +957,7 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
782 } 957 }
783 958
784 kernel_fpu_begin(); 959 kernel_fpu_begin();
785 aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, 960 aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv,
786 hash_subkey, assoc, assoclen, 961 hash_subkey, assoc, assoclen,
787 dst + req->cryptlen, auth_tag_len); 962 dst + req->cryptlen, auth_tag_len);
788 kernel_fpu_end(); 963 kernel_fpu_end();
@@ -817,8 +992,15 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
817 u8 authTag[16]; 992 u8 authTag[16];
818 struct scatter_walk src_sg_walk; 993 struct scatter_walk src_sg_walk;
819 struct scatter_walk dst_sg_walk = {}; 994 struct scatter_walk dst_sg_walk = {};
995 struct gcm_context_data data AESNI_ALIGN_ATTR;
820 int retval = 0; 996 int retval = 0;
821 997
998 if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
999 aesni_gcm_enc_tfm == aesni_gcm_enc ||
1000 req->cryptlen < AVX_GEN2_OPTSIZE) {
1001 return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
1002 aes_ctx);
1003 }
822 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); 1004 tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
823 1005
824 if (sg_is_last(req->src) && 1006 if (sg_is_last(req->src) &&
@@ -849,7 +1031,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
849 1031
850 1032
851 kernel_fpu_begin(); 1033 kernel_fpu_begin();
852 aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, 1034 aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv,
853 hash_subkey, assoc, assoclen, 1035 hash_subkey, assoc, assoclen,
854 authTag, auth_tag_len); 1036 authTag, auth_tag_len);
855 kernel_fpu_end(); 1037 kernel_fpu_end();
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index f9eca34301e2..3e0c07cc9124 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -25,13 +25,13 @@
25 * 25 *
26 */ 26 */
27 27
28#include <asm/processor.h> 28#include <crypto/algapi.h>
29#include <crypto/blowfish.h> 29#include <crypto/blowfish.h>
30#include <crypto/internal/skcipher.h>
30#include <linux/crypto.h> 31#include <linux/crypto.h>
31#include <linux/init.h> 32#include <linux/init.h>
32#include <linux/module.h> 33#include <linux/module.h>
33#include <linux/types.h> 34#include <linux/types.h>
34#include <crypto/algapi.h>
35 35
36/* regular block cipher functions */ 36/* regular block cipher functions */
37asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src, 37asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src,
@@ -77,20 +77,28 @@ static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
77 blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src); 77 blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
78} 78}
79 79
80static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, 80static int blowfish_setkey_skcipher(struct crypto_skcipher *tfm,
81 const u8 *key, unsigned int keylen)
82{
83 return blowfish_setkey(&tfm->base, key, keylen);
84}
85
86static int ecb_crypt(struct skcipher_request *req,
81 void (*fn)(struct bf_ctx *, u8 *, const u8 *), 87 void (*fn)(struct bf_ctx *, u8 *, const u8 *),
82 void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *)) 88 void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *))
83{ 89{
84 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
85 unsigned int bsize = BF_BLOCK_SIZE; 90 unsigned int bsize = BF_BLOCK_SIZE;
91 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
92 struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
93 struct skcipher_walk walk;
86 unsigned int nbytes; 94 unsigned int nbytes;
87 int err; 95 int err;
88 96
89 err = blkcipher_walk_virt(desc, walk); 97 err = skcipher_walk_virt(&walk, req, false);
90 98
91 while ((nbytes = walk->nbytes)) { 99 while ((nbytes = walk.nbytes)) {
92 u8 *wsrc = walk->src.virt.addr; 100 u8 *wsrc = walk.src.virt.addr;
93 u8 *wdst = walk->dst.virt.addr; 101 u8 *wdst = walk.dst.virt.addr;
94 102
95 /* Process four block batch */ 103 /* Process four block batch */
96 if (nbytes >= bsize * 4) { 104 if (nbytes >= bsize * 4) {
@@ -116,34 +124,25 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
116 } while (nbytes >= bsize); 124 } while (nbytes >= bsize);
117 125
118done: 126done:
119 err = blkcipher_walk_done(desc, walk, nbytes); 127 err = skcipher_walk_done(&walk, nbytes);
120 } 128 }
121 129
122 return err; 130 return err;
123} 131}
124 132
125static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 133static int ecb_encrypt(struct skcipher_request *req)
126 struct scatterlist *src, unsigned int nbytes)
127{ 134{
128 struct blkcipher_walk walk; 135 return ecb_crypt(req, blowfish_enc_blk, blowfish_enc_blk_4way);
129
130 blkcipher_walk_init(&walk, dst, src, nbytes);
131 return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way);
132} 136}
133 137
134static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 138static int ecb_decrypt(struct skcipher_request *req)
135 struct scatterlist *src, unsigned int nbytes)
136{ 139{
137 struct blkcipher_walk walk; 140 return ecb_crypt(req, blowfish_dec_blk, blowfish_dec_blk_4way);
138
139 blkcipher_walk_init(&walk, dst, src, nbytes);
140 return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way);
141} 141}
142 142
143static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, 143static unsigned int __cbc_encrypt(struct bf_ctx *ctx,
144 struct blkcipher_walk *walk) 144 struct skcipher_walk *walk)
145{ 145{
146 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
147 unsigned int bsize = BF_BLOCK_SIZE; 146 unsigned int bsize = BF_BLOCK_SIZE;
148 unsigned int nbytes = walk->nbytes; 147 unsigned int nbytes = walk->nbytes;
149 u64 *src = (u64 *)walk->src.virt.addr; 148 u64 *src = (u64 *)walk->src.virt.addr;
@@ -164,27 +163,27 @@ static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
164 return nbytes; 163 return nbytes;
165} 164}
166 165
167static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 166static int cbc_encrypt(struct skcipher_request *req)
168 struct scatterlist *src, unsigned int nbytes)
169{ 167{
170 struct blkcipher_walk walk; 168 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
169 struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
170 struct skcipher_walk walk;
171 unsigned int nbytes;
171 int err; 172 int err;
172 173
173 blkcipher_walk_init(&walk, dst, src, nbytes); 174 err = skcipher_walk_virt(&walk, req, false);
174 err = blkcipher_walk_virt(desc, &walk);
175 175
176 while ((nbytes = walk.nbytes)) { 176 while ((nbytes = walk.nbytes)) {
177 nbytes = __cbc_encrypt(desc, &walk); 177 nbytes = __cbc_encrypt(ctx, &walk);
178 err = blkcipher_walk_done(desc, &walk, nbytes); 178 err = skcipher_walk_done(&walk, nbytes);
179 } 179 }
180 180
181 return err; 181 return err;
182} 182}
183 183
184static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, 184static unsigned int __cbc_decrypt(struct bf_ctx *ctx,
185 struct blkcipher_walk *walk) 185 struct skcipher_walk *walk)
186{ 186{
187 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
188 unsigned int bsize = BF_BLOCK_SIZE; 187 unsigned int bsize = BF_BLOCK_SIZE;
189 unsigned int nbytes = walk->nbytes; 188 unsigned int nbytes = walk->nbytes;
190 u64 *src = (u64 *)walk->src.virt.addr; 189 u64 *src = (u64 *)walk->src.virt.addr;
@@ -245,24 +244,25 @@ done:
245 return nbytes; 244 return nbytes;
246} 245}
247 246
248static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 247static int cbc_decrypt(struct skcipher_request *req)
249 struct scatterlist *src, unsigned int nbytes)
250{ 248{
251 struct blkcipher_walk walk; 249 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
250 struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
251 struct skcipher_walk walk;
252 unsigned int nbytes;
252 int err; 253 int err;
253 254
254 blkcipher_walk_init(&walk, dst, src, nbytes); 255 err = skcipher_walk_virt(&walk, req, false);
255 err = blkcipher_walk_virt(desc, &walk);
256 256
257 while ((nbytes = walk.nbytes)) { 257 while ((nbytes = walk.nbytes)) {
258 nbytes = __cbc_decrypt(desc, &walk); 258 nbytes = __cbc_decrypt(ctx, &walk);
259 err = blkcipher_walk_done(desc, &walk, nbytes); 259 err = skcipher_walk_done(&walk, nbytes);
260 } 260 }
261 261
262 return err; 262 return err;
263} 263}
264 264
265static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk) 265static void ctr_crypt_final(struct bf_ctx *ctx, struct skcipher_walk *walk)
266{ 266{
267 u8 *ctrblk = walk->iv; 267 u8 *ctrblk = walk->iv;
268 u8 keystream[BF_BLOCK_SIZE]; 268 u8 keystream[BF_BLOCK_SIZE];
@@ -276,10 +276,8 @@ static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
276 crypto_inc(ctrblk, BF_BLOCK_SIZE); 276 crypto_inc(ctrblk, BF_BLOCK_SIZE);
277} 277}
278 278
279static unsigned int __ctr_crypt(struct blkcipher_desc *desc, 279static unsigned int __ctr_crypt(struct bf_ctx *ctx, struct skcipher_walk *walk)
280 struct blkcipher_walk *walk)
281{ 280{
282 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
283 unsigned int bsize = BF_BLOCK_SIZE; 281 unsigned int bsize = BF_BLOCK_SIZE;
284 unsigned int nbytes = walk->nbytes; 282 unsigned int nbytes = walk->nbytes;
285 u64 *src = (u64 *)walk->src.virt.addr; 283 u64 *src = (u64 *)walk->src.virt.addr;
@@ -332,29 +330,30 @@ done:
332 return nbytes; 330 return nbytes;
333} 331}
334 332
335static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 333static int ctr_crypt(struct skcipher_request *req)
336 struct scatterlist *src, unsigned int nbytes)
337{ 334{
338 struct blkcipher_walk walk; 335 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
336 struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
337 struct skcipher_walk walk;
338 unsigned int nbytes;
339 int err; 339 int err;
340 340
341 blkcipher_walk_init(&walk, dst, src, nbytes); 341 err = skcipher_walk_virt(&walk, req, false);
342 err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE);
343 342
344 while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) { 343 while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
345 nbytes = __ctr_crypt(desc, &walk); 344 nbytes = __ctr_crypt(ctx, &walk);
346 err = blkcipher_walk_done(desc, &walk, nbytes); 345 err = skcipher_walk_done(&walk, nbytes);
347 } 346 }
348 347
349 if (walk.nbytes) { 348 if (nbytes) {
350 ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); 349 ctr_crypt_final(ctx, &walk);
351 err = blkcipher_walk_done(desc, &walk, 0); 350 err = skcipher_walk_done(&walk, 0);
352 } 351 }
353 352
354 return err; 353 return err;
355} 354}
356 355
357static struct crypto_alg bf_algs[4] = { { 356static struct crypto_alg bf_cipher_alg = {
358 .cra_name = "blowfish", 357 .cra_name = "blowfish",
359 .cra_driver_name = "blowfish-asm", 358 .cra_driver_name = "blowfish-asm",
360 .cra_priority = 200, 359 .cra_priority = 200,
@@ -372,66 +371,50 @@ static struct crypto_alg bf_algs[4] = { {
372 .cia_decrypt = blowfish_decrypt, 371 .cia_decrypt = blowfish_decrypt,
373 } 372 }
374 } 373 }
375}, { 374};
376 .cra_name = "ecb(blowfish)", 375
377 .cra_driver_name = "ecb-blowfish-asm", 376static struct skcipher_alg bf_skcipher_algs[] = {
378 .cra_priority = 300, 377 {
379 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 378 .base.cra_name = "ecb(blowfish)",
380 .cra_blocksize = BF_BLOCK_SIZE, 379 .base.cra_driver_name = "ecb-blowfish-asm",
381 .cra_ctxsize = sizeof(struct bf_ctx), 380 .base.cra_priority = 300,
382 .cra_alignmask = 0, 381 .base.cra_blocksize = BF_BLOCK_SIZE,
383 .cra_type = &crypto_blkcipher_type, 382 .base.cra_ctxsize = sizeof(struct bf_ctx),
384 .cra_module = THIS_MODULE, 383 .base.cra_module = THIS_MODULE,
385 .cra_u = { 384 .min_keysize = BF_MIN_KEY_SIZE,
386 .blkcipher = { 385 .max_keysize = BF_MAX_KEY_SIZE,
387 .min_keysize = BF_MIN_KEY_SIZE, 386 .setkey = blowfish_setkey_skcipher,
388 .max_keysize = BF_MAX_KEY_SIZE, 387 .encrypt = ecb_encrypt,
389 .setkey = blowfish_setkey, 388 .decrypt = ecb_decrypt,
390 .encrypt = ecb_encrypt, 389 }, {
391 .decrypt = ecb_decrypt, 390 .base.cra_name = "cbc(blowfish)",
392 }, 391 .base.cra_driver_name = "cbc-blowfish-asm",
392 .base.cra_priority = 300,
393 .base.cra_blocksize = BF_BLOCK_SIZE,
394 .base.cra_ctxsize = sizeof(struct bf_ctx),
395 .base.cra_module = THIS_MODULE,
396 .min_keysize = BF_MIN_KEY_SIZE,
397 .max_keysize = BF_MAX_KEY_SIZE,
398 .ivsize = BF_BLOCK_SIZE,
399 .setkey = blowfish_setkey_skcipher,
400 .encrypt = cbc_encrypt,
401 .decrypt = cbc_decrypt,
402 }, {
403 .base.cra_name = "ctr(blowfish)",
404 .base.cra_driver_name = "ctr-blowfish-asm",
405 .base.cra_priority = 300,
406 .base.cra_blocksize = 1,
407 .base.cra_ctxsize = sizeof(struct bf_ctx),
408 .base.cra_module = THIS_MODULE,
409 .min_keysize = BF_MIN_KEY_SIZE,
410 .max_keysize = BF_MAX_KEY_SIZE,
411 .ivsize = BF_BLOCK_SIZE,
412 .chunksize = BF_BLOCK_SIZE,
413 .setkey = blowfish_setkey_skcipher,
414 .encrypt = ctr_crypt,
415 .decrypt = ctr_crypt,
393 }, 416 },
394}, { 417};
395 .cra_name = "cbc(blowfish)",
396 .cra_driver_name = "cbc-blowfish-asm",
397 .cra_priority = 300,
398 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
399 .cra_blocksize = BF_BLOCK_SIZE,
400 .cra_ctxsize = sizeof(struct bf_ctx),
401 .cra_alignmask = 0,
402 .cra_type = &crypto_blkcipher_type,
403 .cra_module = THIS_MODULE,
404 .cra_u = {
405 .blkcipher = {
406 .min_keysize = BF_MIN_KEY_SIZE,
407 .max_keysize = BF_MAX_KEY_SIZE,
408 .ivsize = BF_BLOCK_SIZE,
409 .setkey = blowfish_setkey,
410 .encrypt = cbc_encrypt,
411 .decrypt = cbc_decrypt,
412 },
413 },
414}, {
415 .cra_name = "ctr(blowfish)",
416 .cra_driver_name = "ctr-blowfish-asm",
417 .cra_priority = 300,
418 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
419 .cra_blocksize = 1,
420 .cra_ctxsize = sizeof(struct bf_ctx),
421 .cra_alignmask = 0,
422 .cra_type = &crypto_blkcipher_type,
423 .cra_module = THIS_MODULE,
424 .cra_u = {
425 .blkcipher = {
426 .min_keysize = BF_MIN_KEY_SIZE,
427 .max_keysize = BF_MAX_KEY_SIZE,
428 .ivsize = BF_BLOCK_SIZE,
429 .setkey = blowfish_setkey,
430 .encrypt = ctr_crypt,
431 .decrypt = ctr_crypt,
432 },
433 },
434} };
435 418
436static bool is_blacklisted_cpu(void) 419static bool is_blacklisted_cpu(void)
437{ 420{
@@ -456,6 +439,8 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
456 439
457static int __init init(void) 440static int __init init(void)
458{ 441{
442 int err;
443
459 if (!force && is_blacklisted_cpu()) { 444 if (!force && is_blacklisted_cpu()) {
460 printk(KERN_INFO 445 printk(KERN_INFO
461 "blowfish-x86_64: performance on this CPU " 446 "blowfish-x86_64: performance on this CPU "
@@ -464,12 +449,23 @@ static int __init init(void)
464 return -ENODEV; 449 return -ENODEV;
465 } 450 }
466 451
467 return crypto_register_algs(bf_algs, ARRAY_SIZE(bf_algs)); 452 err = crypto_register_alg(&bf_cipher_alg);
453 if (err)
454 return err;
455
456 err = crypto_register_skciphers(bf_skcipher_algs,
457 ARRAY_SIZE(bf_skcipher_algs));
458 if (err)
459 crypto_unregister_alg(&bf_cipher_alg);
460
461 return err;
468} 462}
469 463
470static void __exit fini(void) 464static void __exit fini(void)
471{ 465{
472 crypto_unregister_algs(bf_algs, ARRAY_SIZE(bf_algs)); 466 crypto_unregister_alg(&bf_cipher_alg);
467 crypto_unregister_skciphers(bf_skcipher_algs,
468 ARRAY_SIZE(bf_skcipher_algs));
473} 469}
474 470
475module_init(init); 471module_init(init);
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 60907c139c4e..d4992e458f92 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -10,18 +10,15 @@
10 * 10 *
11 */ 11 */
12 12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/crypto.h>
16#include <linux/err.h>
17#include <crypto/ablk_helper.h>
18#include <crypto/algapi.h>
19#include <crypto/ctr.h>
20#include <crypto/lrw.h>
21#include <crypto/xts.h>
22#include <asm/fpu/api.h>
23#include <asm/crypto/camellia.h> 13#include <asm/crypto/camellia.h>
24#include <asm/crypto/glue_helper.h> 14#include <asm/crypto/glue_helper.h>
15#include <crypto/algapi.h>
16#include <crypto/internal/simd.h>
17#include <crypto/xts.h>
18#include <linux/crypto.h>
19#include <linux/err.h>
20#include <linux/module.h>
21#include <linux/types.h>
25 22
26#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 23#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
27#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32 24#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
@@ -150,413 +147,120 @@ static const struct common_glue_ctx camellia_dec_xts = {
150 } } 147 } }
151}; 148};
152 149
153static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 150static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
154 struct scatterlist *src, unsigned int nbytes) 151 unsigned int keylen)
155{ 152{
156 return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes); 153 return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
154 &tfm->base.crt_flags);
157} 155}
158 156
159static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 157static int ecb_encrypt(struct skcipher_request *req)
160 struct scatterlist *src, unsigned int nbytes)
161{ 158{
162 return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes); 159 return glue_ecb_req_128bit(&camellia_enc, req);
163} 160}
164 161
165static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 162static int ecb_decrypt(struct skcipher_request *req)
166 struct scatterlist *src, unsigned int nbytes)
167{
168 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
169 dst, src, nbytes);
170}
171
172static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
173 struct scatterlist *src, unsigned int nbytes)
174{
175 return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
176 nbytes);
177}
178
179static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
180 struct scatterlist *src, unsigned int nbytes)
181{
182 return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
183}
184
185static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes)
186{
187 return glue_fpu_begin(CAMELLIA_BLOCK_SIZE,
188 CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
189 nbytes);
190}
191
192static inline void camellia_fpu_end(bool fpu_enabled)
193{
194 glue_fpu_end(fpu_enabled);
195}
196
197static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
198 unsigned int key_len)
199{
200 return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
201 &tfm->crt_flags);
202}
203
204struct crypt_priv {
205 struct camellia_ctx *ctx;
206 bool fpu_enabled;
207};
208
209static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
210{ 163{
211 const unsigned int bsize = CAMELLIA_BLOCK_SIZE; 164 return glue_ecb_req_128bit(&camellia_dec, req);
212 struct crypt_priv *ctx = priv;
213 int i;
214
215 ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
216
217 if (nbytes >= CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS * bsize) {
218 camellia_ecb_enc_32way(ctx->ctx, srcdst, srcdst);
219 srcdst += bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
220 nbytes -= bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
221 }
222
223 if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
224 camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
225 srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
226 nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
227 }
228
229 while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
230 camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
231 srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
232 nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
233 }
234
235 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
236 camellia_enc_blk(ctx->ctx, srcdst, srcdst);
237} 165}
238 166
239static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) 167static int cbc_encrypt(struct skcipher_request *req)
240{ 168{
241 const unsigned int bsize = CAMELLIA_BLOCK_SIZE; 169 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
242 struct crypt_priv *ctx = priv; 170 req);
243 int i;
244
245 ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
246
247 if (nbytes >= CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS * bsize) {
248 camellia_ecb_dec_32way(ctx->ctx, srcdst, srcdst);
249 srcdst += bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
250 nbytes -= bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
251 }
252
253 if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
254 camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
255 srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
256 nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
257 }
258
259 while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
260 camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
261 srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
262 nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
263 }
264
265 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
266 camellia_dec_blk(ctx->ctx, srcdst, srcdst);
267} 171}
268 172
269static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 173static int cbc_decrypt(struct skcipher_request *req)
270 struct scatterlist *src, unsigned int nbytes)
271{ 174{
272 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 175 return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
273 be128 buf[CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS];
274 struct crypt_priv crypt_ctx = {
275 .ctx = &ctx->camellia_ctx,
276 .fpu_enabled = false,
277 };
278 struct lrw_crypt_req req = {
279 .tbuf = buf,
280 .tbuflen = sizeof(buf),
281
282 .table_ctx = &ctx->lrw_table,
283 .crypt_ctx = &crypt_ctx,
284 .crypt_fn = encrypt_callback,
285 };
286 int ret;
287
288 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
289 ret = lrw_crypt(desc, dst, src, nbytes, &req);
290 camellia_fpu_end(crypt_ctx.fpu_enabled);
291
292 return ret;
293} 176}
294 177
295static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 178static int ctr_crypt(struct skcipher_request *req)
296 struct scatterlist *src, unsigned int nbytes)
297{ 179{
298 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 180 return glue_ctr_req_128bit(&camellia_ctr, req);
299 be128 buf[CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS];
300 struct crypt_priv crypt_ctx = {
301 .ctx = &ctx->camellia_ctx,
302 .fpu_enabled = false,
303 };
304 struct lrw_crypt_req req = {
305 .tbuf = buf,
306 .tbuflen = sizeof(buf),
307
308 .table_ctx = &ctx->lrw_table,
309 .crypt_ctx = &crypt_ctx,
310 .crypt_fn = decrypt_callback,
311 };
312 int ret;
313
314 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
315 ret = lrw_crypt(desc, dst, src, nbytes, &req);
316 camellia_fpu_end(crypt_ctx.fpu_enabled);
317
318 return ret;
319} 181}
320 182
321static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 183static int xts_encrypt(struct skcipher_request *req)
322 struct scatterlist *src, unsigned int nbytes)
323{ 184{
324 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 185 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
186 struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
325 187
326 return glue_xts_crypt_128bit(&camellia_enc_xts, desc, dst, src, nbytes, 188 return glue_xts_req_128bit(&camellia_enc_xts, req,
327 XTS_TWEAK_CAST(camellia_enc_blk), 189 XTS_TWEAK_CAST(camellia_enc_blk),
328 &ctx->tweak_ctx, &ctx->crypt_ctx); 190 &ctx->tweak_ctx, &ctx->crypt_ctx);
329} 191}
330 192
331static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 193static int xts_decrypt(struct skcipher_request *req)
332 struct scatterlist *src, unsigned int nbytes)
333{ 194{
334 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 195 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
196 struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
335 197
336 return glue_xts_crypt_128bit(&camellia_dec_xts, desc, dst, src, nbytes, 198 return glue_xts_req_128bit(&camellia_dec_xts, req,
337 XTS_TWEAK_CAST(camellia_enc_blk), 199 XTS_TWEAK_CAST(camellia_enc_blk),
338 &ctx->tweak_ctx, &ctx->crypt_ctx); 200 &ctx->tweak_ctx, &ctx->crypt_ctx);
339} 201}
340 202
341static struct crypto_alg cmll_algs[10] = { { 203static struct skcipher_alg camellia_algs[] = {
342 .cra_name = "__ecb-camellia-aesni-avx2", 204 {
343 .cra_driver_name = "__driver-ecb-camellia-aesni-avx2", 205 .base.cra_name = "__ecb(camellia)",
344 .cra_priority = 0, 206 .base.cra_driver_name = "__ecb-camellia-aesni-avx2",
345 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 207 .base.cra_priority = 500,
346 CRYPTO_ALG_INTERNAL, 208 .base.cra_flags = CRYPTO_ALG_INTERNAL,
347 .cra_blocksize = CAMELLIA_BLOCK_SIZE, 209 .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
348 .cra_ctxsize = sizeof(struct camellia_ctx), 210 .base.cra_ctxsize = sizeof(struct camellia_ctx),
349 .cra_alignmask = 0, 211 .base.cra_module = THIS_MODULE,
350 .cra_type = &crypto_blkcipher_type, 212 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
351 .cra_module = THIS_MODULE, 213 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
352 .cra_u = { 214 .setkey = camellia_setkey,
353 .blkcipher = { 215 .encrypt = ecb_encrypt,
354 .min_keysize = CAMELLIA_MIN_KEY_SIZE, 216 .decrypt = ecb_decrypt,
355 .max_keysize = CAMELLIA_MAX_KEY_SIZE, 217 }, {
356 .setkey = camellia_setkey, 218 .base.cra_name = "__cbc(camellia)",
357 .encrypt = ecb_encrypt, 219 .base.cra_driver_name = "__cbc-camellia-aesni-avx2",
358 .decrypt = ecb_decrypt, 220 .base.cra_priority = 500,
359 }, 221 .base.cra_flags = CRYPTO_ALG_INTERNAL,
360 }, 222 .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
361}, { 223 .base.cra_ctxsize = sizeof(struct camellia_ctx),
362 .cra_name = "__cbc-camellia-aesni-avx2", 224 .base.cra_module = THIS_MODULE,
363 .cra_driver_name = "__driver-cbc-camellia-aesni-avx2", 225 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
364 .cra_priority = 0, 226 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
365 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 227 .ivsize = CAMELLIA_BLOCK_SIZE,
366 CRYPTO_ALG_INTERNAL, 228 .setkey = camellia_setkey,
367 .cra_blocksize = CAMELLIA_BLOCK_SIZE, 229 .encrypt = cbc_encrypt,
368 .cra_ctxsize = sizeof(struct camellia_ctx), 230 .decrypt = cbc_decrypt,
369 .cra_alignmask = 0, 231 }, {
370 .cra_type = &crypto_blkcipher_type, 232 .base.cra_name = "__ctr(camellia)",
371 .cra_module = THIS_MODULE, 233 .base.cra_driver_name = "__ctr-camellia-aesni-avx2",
372 .cra_u = { 234 .base.cra_priority = 500,
373 .blkcipher = { 235 .base.cra_flags = CRYPTO_ALG_INTERNAL,
374 .min_keysize = CAMELLIA_MIN_KEY_SIZE, 236 .base.cra_blocksize = 1,
375 .max_keysize = CAMELLIA_MAX_KEY_SIZE, 237 .base.cra_ctxsize = sizeof(struct camellia_ctx),
376 .setkey = camellia_setkey, 238 .base.cra_module = THIS_MODULE,
377 .encrypt = cbc_encrypt, 239 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
378 .decrypt = cbc_decrypt, 240 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
379 }, 241 .ivsize = CAMELLIA_BLOCK_SIZE,
380 }, 242 .chunksize = CAMELLIA_BLOCK_SIZE,
381}, { 243 .setkey = camellia_setkey,
382 .cra_name = "__ctr-camellia-aesni-avx2", 244 .encrypt = ctr_crypt,
383 .cra_driver_name = "__driver-ctr-camellia-aesni-avx2", 245 .decrypt = ctr_crypt,
384 .cra_priority = 0, 246 }, {
385 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 247 .base.cra_name = "__xts(camellia)",
386 CRYPTO_ALG_INTERNAL, 248 .base.cra_driver_name = "__xts-camellia-aesni-avx2",
387 .cra_blocksize = 1, 249 .base.cra_priority = 500,
388 .cra_ctxsize = sizeof(struct camellia_ctx), 250 .base.cra_flags = CRYPTO_ALG_INTERNAL,
389 .cra_alignmask = 0, 251 .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
390 .cra_type = &crypto_blkcipher_type, 252 .base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
391 .cra_module = THIS_MODULE, 253 .base.cra_module = THIS_MODULE,
392 .cra_u = { 254 .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
393 .blkcipher = { 255 .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
394 .min_keysize = CAMELLIA_MIN_KEY_SIZE, 256 .ivsize = CAMELLIA_BLOCK_SIZE,
395 .max_keysize = CAMELLIA_MAX_KEY_SIZE, 257 .setkey = xts_camellia_setkey,
396 .ivsize = CAMELLIA_BLOCK_SIZE, 258 .encrypt = xts_encrypt,
397 .setkey = camellia_setkey, 259 .decrypt = xts_decrypt,
398 .encrypt = ctr_crypt,
399 .decrypt = ctr_crypt,
400 },
401 },
402}, {
403 .cra_name = "__lrw-camellia-aesni-avx2",
404 .cra_driver_name = "__driver-lrw-camellia-aesni-avx2",
405 .cra_priority = 0,
406 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
407 CRYPTO_ALG_INTERNAL,
408 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
409 .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
410 .cra_alignmask = 0,
411 .cra_type = &crypto_blkcipher_type,
412 .cra_module = THIS_MODULE,
413 .cra_exit = lrw_camellia_exit_tfm,
414 .cra_u = {
415 .blkcipher = {
416 .min_keysize = CAMELLIA_MIN_KEY_SIZE +
417 CAMELLIA_BLOCK_SIZE,
418 .max_keysize = CAMELLIA_MAX_KEY_SIZE +
419 CAMELLIA_BLOCK_SIZE,
420 .ivsize = CAMELLIA_BLOCK_SIZE,
421 .setkey = lrw_camellia_setkey,
422 .encrypt = lrw_encrypt,
423 .decrypt = lrw_decrypt,
424 },
425 },
426}, {
427 .cra_name = "__xts-camellia-aesni-avx2",
428 .cra_driver_name = "__driver-xts-camellia-aesni-avx2",
429 .cra_priority = 0,
430 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
431 CRYPTO_ALG_INTERNAL,
432 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
433 .cra_ctxsize = sizeof(struct camellia_xts_ctx),
434 .cra_alignmask = 0,
435 .cra_type = &crypto_blkcipher_type,
436 .cra_module = THIS_MODULE,
437 .cra_u = {
438 .blkcipher = {
439 .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
440 .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
441 .ivsize = CAMELLIA_BLOCK_SIZE,
442 .setkey = xts_camellia_setkey,
443 .encrypt = xts_encrypt,
444 .decrypt = xts_decrypt,
445 },
446 },
447}, {
448 .cra_name = "ecb(camellia)",
449 .cra_driver_name = "ecb-camellia-aesni-avx2",
450 .cra_priority = 500,
451 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
452 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
453 .cra_ctxsize = sizeof(struct async_helper_ctx),
454 .cra_alignmask = 0,
455 .cra_type = &crypto_ablkcipher_type,
456 .cra_module = THIS_MODULE,
457 .cra_init = ablk_init,
458 .cra_exit = ablk_exit,
459 .cra_u = {
460 .ablkcipher = {
461 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
462 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
463 .setkey = ablk_set_key,
464 .encrypt = ablk_encrypt,
465 .decrypt = ablk_decrypt,
466 },
467 },
468}, {
469 .cra_name = "cbc(camellia)",
470 .cra_driver_name = "cbc-camellia-aesni-avx2",
471 .cra_priority = 500,
472 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
473 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
474 .cra_ctxsize = sizeof(struct async_helper_ctx),
475 .cra_alignmask = 0,
476 .cra_type = &crypto_ablkcipher_type,
477 .cra_module = THIS_MODULE,
478 .cra_init = ablk_init,
479 .cra_exit = ablk_exit,
480 .cra_u = {
481 .ablkcipher = {
482 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
483 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
484 .ivsize = CAMELLIA_BLOCK_SIZE,
485 .setkey = ablk_set_key,
486 .encrypt = __ablk_encrypt,
487 .decrypt = ablk_decrypt,
488 },
489 },
490}, {
491 .cra_name = "ctr(camellia)",
492 .cra_driver_name = "ctr-camellia-aesni-avx2",
493 .cra_priority = 500,
494 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
495 .cra_blocksize = 1,
496 .cra_ctxsize = sizeof(struct async_helper_ctx),
497 .cra_alignmask = 0,
498 .cra_type = &crypto_ablkcipher_type,
499 .cra_module = THIS_MODULE,
500 .cra_init = ablk_init,
501 .cra_exit = ablk_exit,
502 .cra_u = {
503 .ablkcipher = {
504 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
505 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
506 .ivsize = CAMELLIA_BLOCK_SIZE,
507 .setkey = ablk_set_key,
508 .encrypt = ablk_encrypt,
509 .decrypt = ablk_encrypt,
510 .geniv = "chainiv",
511 },
512 },
513}, {
514 .cra_name = "lrw(camellia)",
515 .cra_driver_name = "lrw-camellia-aesni-avx2",
516 .cra_priority = 500,
517 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
518 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
519 .cra_ctxsize = sizeof(struct async_helper_ctx),
520 .cra_alignmask = 0,
521 .cra_type = &crypto_ablkcipher_type,
522 .cra_module = THIS_MODULE,
523 .cra_init = ablk_init,
524 .cra_exit = ablk_exit,
525 .cra_u = {
526 .ablkcipher = {
527 .min_keysize = CAMELLIA_MIN_KEY_SIZE +
528 CAMELLIA_BLOCK_SIZE,
529 .max_keysize = CAMELLIA_MAX_KEY_SIZE +
530 CAMELLIA_BLOCK_SIZE,
531 .ivsize = CAMELLIA_BLOCK_SIZE,
532 .setkey = ablk_set_key,
533 .encrypt = ablk_encrypt,
534 .decrypt = ablk_decrypt,
535 },
536 },
537}, {
538 .cra_name = "xts(camellia)",
539 .cra_driver_name = "xts-camellia-aesni-avx2",
540 .cra_priority = 500,
541 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
542 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
543 .cra_ctxsize = sizeof(struct async_helper_ctx),
544 .cra_alignmask = 0,
545 .cra_type = &crypto_ablkcipher_type,
546 .cra_module = THIS_MODULE,
547 .cra_init = ablk_init,
548 .cra_exit = ablk_exit,
549 .cra_u = {
550 .ablkcipher = {
551 .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
552 .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
553 .ivsize = CAMELLIA_BLOCK_SIZE,
554 .setkey = ablk_set_key,
555 .encrypt = ablk_encrypt,
556 .decrypt = ablk_decrypt,
557 },
558 }, 260 },
559} }; 261};
262
263static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
560 264
561static int __init camellia_aesni_init(void) 265static int __init camellia_aesni_init(void)
562{ 266{
@@ -576,12 +280,15 @@ static int __init camellia_aesni_init(void)
576 return -ENODEV; 280 return -ENODEV;
577 } 281 }
578 282
579 return crypto_register_algs(cmll_algs, ARRAY_SIZE(cmll_algs)); 283 return simd_register_skciphers_compat(camellia_algs,
284 ARRAY_SIZE(camellia_algs),
285 camellia_simd_algs);
580} 286}
581 287
582static void __exit camellia_aesni_fini(void) 288static void __exit camellia_aesni_fini(void)
583{ 289{
584 crypto_unregister_algs(cmll_algs, ARRAY_SIZE(cmll_algs)); 290 simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
291 camellia_simd_algs);
585} 292}
586 293
587module_init(camellia_aesni_init); 294module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index d96429da88eb..d09f6521466a 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -10,18 +10,15 @@
10 * 10 *
11 */ 11 */
12 12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/crypto.h>
16#include <linux/err.h>
17#include <crypto/ablk_helper.h>
18#include <crypto/algapi.h>
19#include <crypto/ctr.h>
20#include <crypto/lrw.h>
21#include <crypto/xts.h>
22#include <asm/fpu/api.h>
23#include <asm/crypto/camellia.h> 13#include <asm/crypto/camellia.h>
24#include <asm/crypto/glue_helper.h> 14#include <asm/crypto/glue_helper.h>
15#include <crypto/algapi.h>
16#include <crypto/internal/simd.h>
17#include <crypto/xts.h>
18#include <linux/crypto.h>
19#include <linux/err.h>
20#include <linux/module.h>
21#include <linux/types.h>
25 22
26#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 23#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
27 24
@@ -154,401 +151,142 @@ static const struct common_glue_ctx camellia_dec_xts = {
154 } } 151 } }
155}; 152};
156 153
157static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 154static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
158 struct scatterlist *src, unsigned int nbytes) 155 unsigned int keylen)
159{
160 return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
161}
162
163static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
164 struct scatterlist *src, unsigned int nbytes)
165{ 156{
166 return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes); 157 return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
158 &tfm->base.crt_flags);
167} 159}
168 160
169static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 161static int ecb_encrypt(struct skcipher_request *req)
170 struct scatterlist *src, unsigned int nbytes)
171{ 162{
172 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc, 163 return glue_ecb_req_128bit(&camellia_enc, req);
173 dst, src, nbytes);
174} 164}
175 165
176static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 166static int ecb_decrypt(struct skcipher_request *req)
177 struct scatterlist *src, unsigned int nbytes)
178{ 167{
179 return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src, 168 return glue_ecb_req_128bit(&camellia_dec, req);
180 nbytes);
181} 169}
182 170
183static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 171static int cbc_encrypt(struct skcipher_request *req)
184 struct scatterlist *src, unsigned int nbytes)
185{ 172{
186 return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes); 173 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
174 req);
187} 175}
188 176
189static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes) 177static int cbc_decrypt(struct skcipher_request *req)
190{ 178{
191 return glue_fpu_begin(CAMELLIA_BLOCK_SIZE, 179 return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
192 CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
193 nbytes);
194} 180}
195 181
196static inline void camellia_fpu_end(bool fpu_enabled) 182static int ctr_crypt(struct skcipher_request *req)
197{ 183{
198 glue_fpu_end(fpu_enabled); 184 return glue_ctr_req_128bit(&camellia_ctr, req);
199} 185}
200 186
201static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key, 187int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
202 unsigned int key_len) 188 unsigned int keylen)
203{ 189{
204 return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len, 190 struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
205 &tfm->crt_flags); 191 u32 *flags = &tfm->base.crt_flags;
192 int err;
193
194 err = xts_verify_key(tfm, key, keylen);
195 if (err)
196 return err;
197
198 /* first half of xts-key is for crypt */
199 err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
200 if (err)
201 return err;
202
203 /* second half of xts-key is for tweak */
204 return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
205 flags);
206} 206}
207EXPORT_SYMBOL_GPL(xts_camellia_setkey);
207 208
208struct crypt_priv { 209static int xts_encrypt(struct skcipher_request *req)
209 struct camellia_ctx *ctx;
210 bool fpu_enabled;
211};
212
213static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
214{ 210{
215 const unsigned int bsize = CAMELLIA_BLOCK_SIZE; 211 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
216 struct crypt_priv *ctx = priv; 212 struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
217 int i;
218
219 ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
220
221 if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
222 camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
223 srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
224 nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
225 }
226
227 while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
228 camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
229 srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
230 nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
231 }
232 213
233 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) 214 return glue_xts_req_128bit(&camellia_enc_xts, req,
234 camellia_enc_blk(ctx->ctx, srcdst, srcdst); 215 XTS_TWEAK_CAST(camellia_enc_blk),
216 &ctx->tweak_ctx, &ctx->crypt_ctx);
235} 217}
236 218
237static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) 219static int xts_decrypt(struct skcipher_request *req)
238{ 220{
239 const unsigned int bsize = CAMELLIA_BLOCK_SIZE; 221 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
240 struct crypt_priv *ctx = priv; 222 struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
241 int i;
242
243 ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
244
245 if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
246 camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
247 srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
248 nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
249 }
250 223
251 while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) { 224 return glue_xts_req_128bit(&camellia_dec_xts, req,
252 camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst); 225 XTS_TWEAK_CAST(camellia_enc_blk),
253 srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS; 226 &ctx->tweak_ctx, &ctx->crypt_ctx);
254 nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
255 }
256
257 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
258 camellia_dec_blk(ctx->ctx, srcdst, srcdst);
259} 227}
260 228
261static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 229static struct skcipher_alg camellia_algs[] = {
262 struct scatterlist *src, unsigned int nbytes) 230 {
263{ 231 .base.cra_name = "__ecb(camellia)",
264 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 232 .base.cra_driver_name = "__ecb-camellia-aesni",
265 be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS]; 233 .base.cra_priority = 400,
266 struct crypt_priv crypt_ctx = { 234 .base.cra_flags = CRYPTO_ALG_INTERNAL,
267 .ctx = &ctx->camellia_ctx, 235 .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
268 .fpu_enabled = false, 236 .base.cra_ctxsize = sizeof(struct camellia_ctx),
269 }; 237 .base.cra_module = THIS_MODULE,
270 struct lrw_crypt_req req = { 238 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
271 .tbuf = buf, 239 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
272 .tbuflen = sizeof(buf), 240 .setkey = camellia_setkey,
273 241 .encrypt = ecb_encrypt,
274 .table_ctx = &ctx->lrw_table, 242 .decrypt = ecb_decrypt,
275 .crypt_ctx = &crypt_ctx, 243 }, {
276 .crypt_fn = encrypt_callback, 244 .base.cra_name = "__cbc(camellia)",
277 }; 245 .base.cra_driver_name = "__cbc-camellia-aesni",
278 int ret; 246 .base.cra_priority = 400,
279 247 .base.cra_flags = CRYPTO_ALG_INTERNAL,
280 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 248 .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
281 ret = lrw_crypt(desc, dst, src, nbytes, &req); 249 .base.cra_ctxsize = sizeof(struct camellia_ctx),
282 camellia_fpu_end(crypt_ctx.fpu_enabled); 250 .base.cra_module = THIS_MODULE,
283 251 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
284 return ret; 252 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
285} 253 .ivsize = CAMELLIA_BLOCK_SIZE,
286 254 .setkey = camellia_setkey,
287static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 255 .encrypt = cbc_encrypt,
288 struct scatterlist *src, unsigned int nbytes) 256 .decrypt = cbc_decrypt,
289{ 257 }, {
290 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 258 .base.cra_name = "__ctr(camellia)",
291 be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS]; 259 .base.cra_driver_name = "__ctr-camellia-aesni",
292 struct crypt_priv crypt_ctx = { 260 .base.cra_priority = 400,
293 .ctx = &ctx->camellia_ctx, 261 .base.cra_flags = CRYPTO_ALG_INTERNAL,
294 .fpu_enabled = false, 262 .base.cra_blocksize = 1,
295 }; 263 .base.cra_ctxsize = sizeof(struct camellia_ctx),
296 struct lrw_crypt_req req = { 264 .base.cra_module = THIS_MODULE,
297 .tbuf = buf, 265 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
298 .tbuflen = sizeof(buf), 266 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
299 267 .ivsize = CAMELLIA_BLOCK_SIZE,
300 .table_ctx = &ctx->lrw_table, 268 .chunksize = CAMELLIA_BLOCK_SIZE,
301 .crypt_ctx = &crypt_ctx, 269 .setkey = camellia_setkey,
302 .crypt_fn = decrypt_callback, 270 .encrypt = ctr_crypt,
303 }; 271 .decrypt = ctr_crypt,
304 int ret; 272 }, {
305 273 .base.cra_name = "__xts(camellia)",
306 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 274 .base.cra_driver_name = "__xts-camellia-aesni",
307 ret = lrw_crypt(desc, dst, src, nbytes, &req); 275 .base.cra_priority = 400,
308 camellia_fpu_end(crypt_ctx.fpu_enabled); 276 .base.cra_flags = CRYPTO_ALG_INTERNAL,
309 277 .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
310 return ret; 278 .base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
311} 279 .base.cra_module = THIS_MODULE,
312 280 .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
313static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 281 .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
314 struct scatterlist *src, unsigned int nbytes) 282 .ivsize = CAMELLIA_BLOCK_SIZE,
315{ 283 .setkey = xts_camellia_setkey,
316 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 284 .encrypt = xts_encrypt,
317 285 .decrypt = xts_decrypt,
318 return glue_xts_crypt_128bit(&camellia_enc_xts, desc, dst, src, nbytes,
319 XTS_TWEAK_CAST(camellia_enc_blk),
320 &ctx->tweak_ctx, &ctx->crypt_ctx);
321}
322
323static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
324 struct scatterlist *src, unsigned int nbytes)
325{
326 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
327
328 return glue_xts_crypt_128bit(&camellia_dec_xts, desc, dst, src, nbytes,
329 XTS_TWEAK_CAST(camellia_enc_blk),
330 &ctx->tweak_ctx, &ctx->crypt_ctx);
331}
332
333static struct crypto_alg cmll_algs[10] = { {
334 .cra_name = "__ecb-camellia-aesni",
335 .cra_driver_name = "__driver-ecb-camellia-aesni",
336 .cra_priority = 0,
337 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
338 CRYPTO_ALG_INTERNAL,
339 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
340 .cra_ctxsize = sizeof(struct camellia_ctx),
341 .cra_alignmask = 0,
342 .cra_type = &crypto_blkcipher_type,
343 .cra_module = THIS_MODULE,
344 .cra_u = {
345 .blkcipher = {
346 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
347 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
348 .setkey = camellia_setkey,
349 .encrypt = ecb_encrypt,
350 .decrypt = ecb_decrypt,
351 },
352 },
353}, {
354 .cra_name = "__cbc-camellia-aesni",
355 .cra_driver_name = "__driver-cbc-camellia-aesni",
356 .cra_priority = 0,
357 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
358 CRYPTO_ALG_INTERNAL,
359 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
360 .cra_ctxsize = sizeof(struct camellia_ctx),
361 .cra_alignmask = 0,
362 .cra_type = &crypto_blkcipher_type,
363 .cra_module = THIS_MODULE,
364 .cra_u = {
365 .blkcipher = {
366 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
367 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
368 .setkey = camellia_setkey,
369 .encrypt = cbc_encrypt,
370 .decrypt = cbc_decrypt,
371 },
372 },
373}, {
374 .cra_name = "__ctr-camellia-aesni",
375 .cra_driver_name = "__driver-ctr-camellia-aesni",
376 .cra_priority = 0,
377 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
378 CRYPTO_ALG_INTERNAL,
379 .cra_blocksize = 1,
380 .cra_ctxsize = sizeof(struct camellia_ctx),
381 .cra_alignmask = 0,
382 .cra_type = &crypto_blkcipher_type,
383 .cra_module = THIS_MODULE,
384 .cra_u = {
385 .blkcipher = {
386 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
387 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
388 .ivsize = CAMELLIA_BLOCK_SIZE,
389 .setkey = camellia_setkey,
390 .encrypt = ctr_crypt,
391 .decrypt = ctr_crypt,
392 },
393 },
394}, {
395 .cra_name = "__lrw-camellia-aesni",
396 .cra_driver_name = "__driver-lrw-camellia-aesni",
397 .cra_priority = 0,
398 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
399 CRYPTO_ALG_INTERNAL,
400 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
401 .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
402 .cra_alignmask = 0,
403 .cra_type = &crypto_blkcipher_type,
404 .cra_module = THIS_MODULE,
405 .cra_exit = lrw_camellia_exit_tfm,
406 .cra_u = {
407 .blkcipher = {
408 .min_keysize = CAMELLIA_MIN_KEY_SIZE +
409 CAMELLIA_BLOCK_SIZE,
410 .max_keysize = CAMELLIA_MAX_KEY_SIZE +
411 CAMELLIA_BLOCK_SIZE,
412 .ivsize = CAMELLIA_BLOCK_SIZE,
413 .setkey = lrw_camellia_setkey,
414 .encrypt = lrw_encrypt,
415 .decrypt = lrw_decrypt,
416 },
417 },
418}, {
419 .cra_name = "__xts-camellia-aesni",
420 .cra_driver_name = "__driver-xts-camellia-aesni",
421 .cra_priority = 0,
422 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
423 CRYPTO_ALG_INTERNAL,
424 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
425 .cra_ctxsize = sizeof(struct camellia_xts_ctx),
426 .cra_alignmask = 0,
427 .cra_type = &crypto_blkcipher_type,
428 .cra_module = THIS_MODULE,
429 .cra_u = {
430 .blkcipher = {
431 .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
432 .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
433 .ivsize = CAMELLIA_BLOCK_SIZE,
434 .setkey = xts_camellia_setkey,
435 .encrypt = xts_encrypt,
436 .decrypt = xts_decrypt,
437 },
438 },
439}, {
440 .cra_name = "ecb(camellia)",
441 .cra_driver_name = "ecb-camellia-aesni",
442 .cra_priority = 400,
443 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
444 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
445 .cra_ctxsize = sizeof(struct async_helper_ctx),
446 .cra_alignmask = 0,
447 .cra_type = &crypto_ablkcipher_type,
448 .cra_module = THIS_MODULE,
449 .cra_init = ablk_init,
450 .cra_exit = ablk_exit,
451 .cra_u = {
452 .ablkcipher = {
453 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
454 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
455 .setkey = ablk_set_key,
456 .encrypt = ablk_encrypt,
457 .decrypt = ablk_decrypt,
458 },
459 },
460}, {
461 .cra_name = "cbc(camellia)",
462 .cra_driver_name = "cbc-camellia-aesni",
463 .cra_priority = 400,
464 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
465 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
466 .cra_ctxsize = sizeof(struct async_helper_ctx),
467 .cra_alignmask = 0,
468 .cra_type = &crypto_ablkcipher_type,
469 .cra_module = THIS_MODULE,
470 .cra_init = ablk_init,
471 .cra_exit = ablk_exit,
472 .cra_u = {
473 .ablkcipher = {
474 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
475 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
476 .ivsize = CAMELLIA_BLOCK_SIZE,
477 .setkey = ablk_set_key,
478 .encrypt = __ablk_encrypt,
479 .decrypt = ablk_decrypt,
480 },
481 },
482}, {
483 .cra_name = "ctr(camellia)",
484 .cra_driver_name = "ctr-camellia-aesni",
485 .cra_priority = 400,
486 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
487 .cra_blocksize = 1,
488 .cra_ctxsize = sizeof(struct async_helper_ctx),
489 .cra_alignmask = 0,
490 .cra_type = &crypto_ablkcipher_type,
491 .cra_module = THIS_MODULE,
492 .cra_init = ablk_init,
493 .cra_exit = ablk_exit,
494 .cra_u = {
495 .ablkcipher = {
496 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
497 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
498 .ivsize = CAMELLIA_BLOCK_SIZE,
499 .setkey = ablk_set_key,
500 .encrypt = ablk_encrypt,
501 .decrypt = ablk_encrypt,
502 .geniv = "chainiv",
503 },
504 },
505}, {
506 .cra_name = "lrw(camellia)",
507 .cra_driver_name = "lrw-camellia-aesni",
508 .cra_priority = 400,
509 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
510 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
511 .cra_ctxsize = sizeof(struct async_helper_ctx),
512 .cra_alignmask = 0,
513 .cra_type = &crypto_ablkcipher_type,
514 .cra_module = THIS_MODULE,
515 .cra_init = ablk_init,
516 .cra_exit = ablk_exit,
517 .cra_u = {
518 .ablkcipher = {
519 .min_keysize = CAMELLIA_MIN_KEY_SIZE +
520 CAMELLIA_BLOCK_SIZE,
521 .max_keysize = CAMELLIA_MAX_KEY_SIZE +
522 CAMELLIA_BLOCK_SIZE,
523 .ivsize = CAMELLIA_BLOCK_SIZE,
524 .setkey = ablk_set_key,
525 .encrypt = ablk_encrypt,
526 .decrypt = ablk_decrypt,
527 },
528 },
529}, {
530 .cra_name = "xts(camellia)",
531 .cra_driver_name = "xts-camellia-aesni",
532 .cra_priority = 400,
533 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
534 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
535 .cra_ctxsize = sizeof(struct async_helper_ctx),
536 .cra_alignmask = 0,
537 .cra_type = &crypto_ablkcipher_type,
538 .cra_module = THIS_MODULE,
539 .cra_init = ablk_init,
540 .cra_exit = ablk_exit,
541 .cra_u = {
542 .ablkcipher = {
543 .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
544 .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
545 .ivsize = CAMELLIA_BLOCK_SIZE,
546 .setkey = ablk_set_key,
547 .encrypt = ablk_encrypt,
548 .decrypt = ablk_decrypt,
549 },
550 }, 286 },
551} }; 287};
288
289static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
552 290
553static int __init camellia_aesni_init(void) 291static int __init camellia_aesni_init(void)
554{ 292{
@@ -567,12 +305,15 @@ static int __init camellia_aesni_init(void)
567 return -ENODEV; 305 return -ENODEV;
568 } 306 }
569 307
570 return crypto_register_algs(cmll_algs, ARRAY_SIZE(cmll_algs)); 308 return simd_register_skciphers_compat(camellia_algs,
309 ARRAY_SIZE(camellia_algs),
310 camellia_simd_algs);
571} 311}
572 312
573static void __exit camellia_aesni_fini(void) 313static void __exit camellia_aesni_fini(void)
574{ 314{
575 crypto_unregister_algs(cmll_algs, ARRAY_SIZE(cmll_algs)); 315 simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
316 camellia_simd_algs);
576} 317}
577 318
578module_init(camellia_aesni_init); 319module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index af4840ab2a3d..dcd5e0f71b00 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -23,15 +23,12 @@
23 * 23 *
24 */ 24 */
25 25
26#include <asm/processor.h>
27#include <asm/unaligned.h> 26#include <asm/unaligned.h>
28#include <linux/crypto.h> 27#include <linux/crypto.h>
29#include <linux/init.h> 28#include <linux/init.h>
30#include <linux/module.h> 29#include <linux/module.h>
31#include <linux/types.h> 30#include <linux/types.h>
32#include <crypto/algapi.h> 31#include <crypto/algapi.h>
33#include <crypto/lrw.h>
34#include <crypto/xts.h>
35#include <asm/crypto/camellia.h> 32#include <asm/crypto/camellia.h>
36#include <asm/crypto/glue_helper.h> 33#include <asm/crypto/glue_helper.h>
37 34
@@ -1272,13 +1269,19 @@ int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
1272} 1269}
1273EXPORT_SYMBOL_GPL(__camellia_setkey); 1270EXPORT_SYMBOL_GPL(__camellia_setkey);
1274 1271
1275static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key, 1272static int camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
1276 unsigned int key_len) 1273 unsigned int key_len)
1277{ 1274{
1278 return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len, 1275 return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len,
1279 &tfm->crt_flags); 1276 &tfm->crt_flags);
1280} 1277}
1281 1278
1279static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
1280 unsigned int key_len)
1281{
1282 return camellia_setkey(&tfm->base, key, key_len);
1283}
1284
1282void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) 1285void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
1283{ 1286{
1284 u128 iv = *src; 1287 u128 iv = *src;
@@ -1373,188 +1376,33 @@ static const struct common_glue_ctx camellia_dec_cbc = {
1373 } } 1376 } }
1374}; 1377};
1375 1378
1376static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 1379static int ecb_encrypt(struct skcipher_request *req)
1377 struct scatterlist *src, unsigned int nbytes)
1378{
1379 return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
1380}
1381
1382static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1383 struct scatterlist *src, unsigned int nbytes)
1384{
1385 return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
1386}
1387
1388static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1389 struct scatterlist *src, unsigned int nbytes)
1390{
1391 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
1392 dst, src, nbytes);
1393}
1394
1395static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1396 struct scatterlist *src, unsigned int nbytes)
1397{
1398 return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
1399 nbytes);
1400}
1401
1402static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1403 struct scatterlist *src, unsigned int nbytes)
1404{
1405 return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
1406}
1407
1408static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
1409{
1410 const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
1411 struct camellia_ctx *ctx = priv;
1412 int i;
1413
1414 while (nbytes >= 2 * bsize) {
1415 camellia_enc_blk_2way(ctx, srcdst, srcdst);
1416 srcdst += bsize * 2;
1417 nbytes -= bsize * 2;
1418 }
1419
1420 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
1421 camellia_enc_blk(ctx, srcdst, srcdst);
1422}
1423
1424static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
1425{
1426 const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
1427 struct camellia_ctx *ctx = priv;
1428 int i;
1429
1430 while (nbytes >= 2 * bsize) {
1431 camellia_dec_blk_2way(ctx, srcdst, srcdst);
1432 srcdst += bsize * 2;
1433 nbytes -= bsize * 2;
1434 }
1435
1436 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
1437 camellia_dec_blk(ctx, srcdst, srcdst);
1438}
1439
1440int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
1441 unsigned int keylen)
1442{
1443 struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
1444 int err;
1445
1446 err = __camellia_setkey(&ctx->camellia_ctx, key,
1447 keylen - CAMELLIA_BLOCK_SIZE,
1448 &tfm->crt_flags);
1449 if (err)
1450 return err;
1451
1452 return lrw_init_table(&ctx->lrw_table,
1453 key + keylen - CAMELLIA_BLOCK_SIZE);
1454}
1455EXPORT_SYMBOL_GPL(lrw_camellia_setkey);
1456
1457static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1458 struct scatterlist *src, unsigned int nbytes)
1459{
1460 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
1461 be128 buf[2 * 4];
1462 struct lrw_crypt_req req = {
1463 .tbuf = buf,
1464 .tbuflen = sizeof(buf),
1465
1466 .table_ctx = &ctx->lrw_table,
1467 .crypt_ctx = &ctx->camellia_ctx,
1468 .crypt_fn = encrypt_callback,
1469 };
1470
1471 return lrw_crypt(desc, dst, src, nbytes, &req);
1472}
1473
1474static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
1475 struct scatterlist *src, unsigned int nbytes)
1476{ 1380{
1477 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 1381 return glue_ecb_req_128bit(&camellia_enc, req);
1478 be128 buf[2 * 4];
1479 struct lrw_crypt_req req = {
1480 .tbuf = buf,
1481 .tbuflen = sizeof(buf),
1482
1483 .table_ctx = &ctx->lrw_table,
1484 .crypt_ctx = &ctx->camellia_ctx,
1485 .crypt_fn = decrypt_callback,
1486 };
1487
1488 return lrw_crypt(desc, dst, src, nbytes, &req);
1489} 1382}
1490 1383
1491void lrw_camellia_exit_tfm(struct crypto_tfm *tfm) 1384static int ecb_decrypt(struct skcipher_request *req)
1492{ 1385{
1493 struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm); 1386 return glue_ecb_req_128bit(&camellia_dec, req);
1494
1495 lrw_free_table(&ctx->lrw_table);
1496} 1387}
1497EXPORT_SYMBOL_GPL(lrw_camellia_exit_tfm);
1498 1388
1499int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key, 1389static int cbc_encrypt(struct skcipher_request *req)
1500 unsigned int keylen)
1501{ 1390{
1502 struct camellia_xts_ctx *ctx = crypto_tfm_ctx(tfm); 1391 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
1503 u32 *flags = &tfm->crt_flags; 1392 req);
1504 int err;
1505
1506 err = xts_check_key(tfm, key, keylen);
1507 if (err)
1508 return err;
1509
1510 /* first half of xts-key is for crypt */
1511 err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
1512 if (err)
1513 return err;
1514
1515 /* second half of xts-key is for tweak */
1516 return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
1517 flags);
1518} 1393}
1519EXPORT_SYMBOL_GPL(xts_camellia_setkey);
1520 1394
1521static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 1395static int cbc_decrypt(struct skcipher_request *req)
1522 struct scatterlist *src, unsigned int nbytes)
1523{ 1396{
1524 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 1397 return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
1525 le128 buf[2 * 4];
1526 struct xts_crypt_req req = {
1527 .tbuf = buf,
1528 .tbuflen = sizeof(buf),
1529
1530 .tweak_ctx = &ctx->tweak_ctx,
1531 .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
1532 .crypt_ctx = &ctx->crypt_ctx,
1533 .crypt_fn = encrypt_callback,
1534 };
1535
1536 return xts_crypt(desc, dst, src, nbytes, &req);
1537} 1398}
1538 1399
1539static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 1400static int ctr_crypt(struct skcipher_request *req)
1540 struct scatterlist *src, unsigned int nbytes)
1541{ 1401{
1542 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 1402 return glue_ctr_req_128bit(&camellia_ctr, req);
1543 le128 buf[2 * 4];
1544 struct xts_crypt_req req = {
1545 .tbuf = buf,
1546 .tbuflen = sizeof(buf),
1547
1548 .tweak_ctx = &ctx->tweak_ctx,
1549 .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
1550 .crypt_ctx = &ctx->crypt_ctx,
1551 .crypt_fn = decrypt_callback,
1552 };
1553
1554 return xts_crypt(desc, dst, src, nbytes, &req);
1555} 1403}
1556 1404
1557static struct crypto_alg camellia_algs[6] = { { 1405static struct crypto_alg camellia_cipher_alg = {
1558 .cra_name = "camellia", 1406 .cra_name = "camellia",
1559 .cra_driver_name = "camellia-asm", 1407 .cra_driver_name = "camellia-asm",
1560 .cra_priority = 200, 1408 .cra_priority = 200,
@@ -1572,109 +1420,50 @@ static struct crypto_alg camellia_algs[6] = { {
1572 .cia_decrypt = camellia_decrypt 1420 .cia_decrypt = camellia_decrypt
1573 } 1421 }
1574 } 1422 }
1575}, { 1423};
1576 .cra_name = "ecb(camellia)", 1424
1577 .cra_driver_name = "ecb-camellia-asm", 1425static struct skcipher_alg camellia_skcipher_algs[] = {
1578 .cra_priority = 300, 1426 {
1579 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 1427 .base.cra_name = "ecb(camellia)",
1580 .cra_blocksize = CAMELLIA_BLOCK_SIZE, 1428 .base.cra_driver_name = "ecb-camellia-asm",
1581 .cra_ctxsize = sizeof(struct camellia_ctx), 1429 .base.cra_priority = 300,
1582 .cra_alignmask = 0, 1430 .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
1583 .cra_type = &crypto_blkcipher_type, 1431 .base.cra_ctxsize = sizeof(struct camellia_ctx),
1584 .cra_module = THIS_MODULE, 1432 .base.cra_module = THIS_MODULE,
1585 .cra_u = { 1433 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
1586 .blkcipher = { 1434 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
1587 .min_keysize = CAMELLIA_MIN_KEY_SIZE, 1435 .setkey = camellia_setkey_skcipher,
1588 .max_keysize = CAMELLIA_MAX_KEY_SIZE, 1436 .encrypt = ecb_encrypt,
1589 .setkey = camellia_setkey, 1437 .decrypt = ecb_decrypt,
1590 .encrypt = ecb_encrypt, 1438 }, {
1591 .decrypt = ecb_decrypt, 1439 .base.cra_name = "cbc(camellia)",
1592 }, 1440 .base.cra_driver_name = "cbc-camellia-asm",
1593 }, 1441 .base.cra_priority = 300,
1594}, { 1442 .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
1595 .cra_name = "cbc(camellia)", 1443 .base.cra_ctxsize = sizeof(struct camellia_ctx),
1596 .cra_driver_name = "cbc-camellia-asm", 1444 .base.cra_module = THIS_MODULE,
1597 .cra_priority = 300, 1445 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
1598 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 1446 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
1599 .cra_blocksize = CAMELLIA_BLOCK_SIZE, 1447 .ivsize = CAMELLIA_BLOCK_SIZE,
1600 .cra_ctxsize = sizeof(struct camellia_ctx), 1448 .setkey = camellia_setkey_skcipher,
1601 .cra_alignmask = 0, 1449 .encrypt = cbc_encrypt,
1602 .cra_type = &crypto_blkcipher_type, 1450 .decrypt = cbc_decrypt,
1603 .cra_module = THIS_MODULE, 1451 }, {
1604 .cra_u = { 1452 .base.cra_name = "ctr(camellia)",
1605 .blkcipher = { 1453 .base.cra_driver_name = "ctr-camellia-asm",
1606 .min_keysize = CAMELLIA_MIN_KEY_SIZE, 1454 .base.cra_priority = 300,
1607 .max_keysize = CAMELLIA_MAX_KEY_SIZE, 1455 .base.cra_blocksize = 1,
1608 .ivsize = CAMELLIA_BLOCK_SIZE, 1456 .base.cra_ctxsize = sizeof(struct camellia_ctx),
1609 .setkey = camellia_setkey, 1457 .base.cra_module = THIS_MODULE,
1610 .encrypt = cbc_encrypt, 1458 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
1611 .decrypt = cbc_decrypt, 1459 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
1612 }, 1460 .ivsize = CAMELLIA_BLOCK_SIZE,
1613 }, 1461 .chunksize = CAMELLIA_BLOCK_SIZE,
1614}, { 1462 .setkey = camellia_setkey_skcipher,
1615 .cra_name = "ctr(camellia)", 1463 .encrypt = ctr_crypt,
1616 .cra_driver_name = "ctr-camellia-asm", 1464 .decrypt = ctr_crypt,
1617 .cra_priority = 300, 1465 }
1618 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 1466};
1619 .cra_blocksize = 1,
1620 .cra_ctxsize = sizeof(struct camellia_ctx),
1621 .cra_alignmask = 0,
1622 .cra_type = &crypto_blkcipher_type,
1623 .cra_module = THIS_MODULE,
1624 .cra_u = {
1625 .blkcipher = {
1626 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
1627 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
1628 .ivsize = CAMELLIA_BLOCK_SIZE,
1629 .setkey = camellia_setkey,
1630 .encrypt = ctr_crypt,
1631 .decrypt = ctr_crypt,
1632 },
1633 },
1634}, {
1635 .cra_name = "lrw(camellia)",
1636 .cra_driver_name = "lrw-camellia-asm",
1637 .cra_priority = 300,
1638 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1639 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
1640 .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
1641 .cra_alignmask = 0,
1642 .cra_type = &crypto_blkcipher_type,
1643 .cra_module = THIS_MODULE,
1644 .cra_exit = lrw_camellia_exit_tfm,
1645 .cra_u = {
1646 .blkcipher = {
1647 .min_keysize = CAMELLIA_MIN_KEY_SIZE +
1648 CAMELLIA_BLOCK_SIZE,
1649 .max_keysize = CAMELLIA_MAX_KEY_SIZE +
1650 CAMELLIA_BLOCK_SIZE,
1651 .ivsize = CAMELLIA_BLOCK_SIZE,
1652 .setkey = lrw_camellia_setkey,
1653 .encrypt = lrw_encrypt,
1654 .decrypt = lrw_decrypt,
1655 },
1656 },
1657}, {
1658 .cra_name = "xts(camellia)",
1659 .cra_driver_name = "xts-camellia-asm",
1660 .cra_priority = 300,
1661 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1662 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
1663 .cra_ctxsize = sizeof(struct camellia_xts_ctx),
1664 .cra_alignmask = 0,
1665 .cra_type = &crypto_blkcipher_type,
1666 .cra_module = THIS_MODULE,
1667 .cra_u = {
1668 .blkcipher = {
1669 .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
1670 .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
1671 .ivsize = CAMELLIA_BLOCK_SIZE,
1672 .setkey = xts_camellia_setkey,
1673 .encrypt = xts_encrypt,
1674 .decrypt = xts_decrypt,
1675 },
1676 },
1677} };
1678 1467
1679static bool is_blacklisted_cpu(void) 1468static bool is_blacklisted_cpu(void)
1680{ 1469{
@@ -1700,6 +1489,8 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
1700 1489
1701static int __init init(void) 1490static int __init init(void)
1702{ 1491{
1492 int err;
1493
1703 if (!force && is_blacklisted_cpu()) { 1494 if (!force && is_blacklisted_cpu()) {
1704 printk(KERN_INFO 1495 printk(KERN_INFO
1705 "camellia-x86_64: performance on this CPU " 1496 "camellia-x86_64: performance on this CPU "
@@ -1708,12 +1499,23 @@ static int __init init(void)
1708 return -ENODEV; 1499 return -ENODEV;
1709 } 1500 }
1710 1501
1711 return crypto_register_algs(camellia_algs, ARRAY_SIZE(camellia_algs)); 1502 err = crypto_register_alg(&camellia_cipher_alg);
1503 if (err)
1504 return err;
1505
1506 err = crypto_register_skciphers(camellia_skcipher_algs,
1507 ARRAY_SIZE(camellia_skcipher_algs));
1508 if (err)
1509 crypto_unregister_alg(&camellia_cipher_alg);
1510
1511 return err;
1712} 1512}
1713 1513
1714static void __exit fini(void) 1514static void __exit fini(void)
1715{ 1515{
1716 crypto_unregister_algs(camellia_algs, ARRAY_SIZE(camellia_algs)); 1516 crypto_unregister_alg(&camellia_cipher_alg);
1517 crypto_unregister_skciphers(camellia_skcipher_algs,
1518 ARRAY_SIZE(camellia_skcipher_algs));
1717} 1519}
1718 1520
1719module_init(init); 1521module_init(init);
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index dbea6020ffe7..41034745d6a2 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -21,18 +21,14 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/module.h> 24#include <asm/crypto/glue_helper.h>
25#include <linux/hardirq.h>
26#include <linux/types.h>
27#include <linux/crypto.h>
28#include <linux/err.h>
29#include <crypto/ablk_helper.h>
30#include <crypto/algapi.h> 25#include <crypto/algapi.h>
31#include <crypto/cast5.h> 26#include <crypto/cast5.h>
32#include <crypto/cryptd.h> 27#include <crypto/internal/simd.h>
33#include <crypto/ctr.h> 28#include <linux/crypto.h>
34#include <asm/fpu/api.h> 29#include <linux/err.h>
35#include <asm/crypto/glue_helper.h> 30#include <linux/module.h>
31#include <linux/types.h>
36 32
37#define CAST5_PARALLEL_BLOCKS 16 33#define CAST5_PARALLEL_BLOCKS 16
38 34
@@ -45,10 +41,17 @@ asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst,
45asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src, 41asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src,
46 __be64 *iv); 42 __be64 *iv);
47 43
48static inline bool cast5_fpu_begin(bool fpu_enabled, unsigned int nbytes) 44static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
45 unsigned int keylen)
46{
47 return cast5_setkey(&tfm->base, key, keylen);
48}
49
50static inline bool cast5_fpu_begin(bool fpu_enabled, struct skcipher_walk *walk,
51 unsigned int nbytes)
49{ 52{
50 return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS, 53 return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS,
51 NULL, fpu_enabled, nbytes); 54 walk, fpu_enabled, nbytes);
52} 55}
53 56
54static inline void cast5_fpu_end(bool fpu_enabled) 57static inline void cast5_fpu_end(bool fpu_enabled)
@@ -56,29 +59,28 @@ static inline void cast5_fpu_end(bool fpu_enabled)
56 return glue_fpu_end(fpu_enabled); 59 return glue_fpu_end(fpu_enabled);
57} 60}
58 61
59static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, 62static int ecb_crypt(struct skcipher_request *req, bool enc)
60 bool enc)
61{ 63{
62 bool fpu_enabled = false; 64 bool fpu_enabled = false;
63 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 65 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
66 struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
67 struct skcipher_walk walk;
64 const unsigned int bsize = CAST5_BLOCK_SIZE; 68 const unsigned int bsize = CAST5_BLOCK_SIZE;
65 unsigned int nbytes; 69 unsigned int nbytes;
66 void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src); 70 void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
67 int err; 71 int err;
68 72
69 fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way; 73 err = skcipher_walk_virt(&walk, req, false);
70
71 err = blkcipher_walk_virt(desc, walk);
72 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
73 74
74 while ((nbytes = walk->nbytes)) { 75 while ((nbytes = walk.nbytes)) {
75 u8 *wsrc = walk->src.virt.addr; 76 u8 *wsrc = walk.src.virt.addr;
76 u8 *wdst = walk->dst.virt.addr; 77 u8 *wdst = walk.dst.virt.addr;
77 78
78 fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); 79 fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
79 80
80 /* Process multi-block batch */ 81 /* Process multi-block batch */
81 if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { 82 if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
83 fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
82 do { 84 do {
83 fn(ctx, wdst, wsrc); 85 fn(ctx, wdst, wsrc);
84 86
@@ -103,76 +105,58 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
103 } while (nbytes >= bsize); 105 } while (nbytes >= bsize);
104 106
105done: 107done:
106 err = blkcipher_walk_done(desc, walk, nbytes); 108 err = skcipher_walk_done(&walk, nbytes);
107 } 109 }
108 110
109 cast5_fpu_end(fpu_enabled); 111 cast5_fpu_end(fpu_enabled);
110 return err; 112 return err;
111} 113}
112 114
113static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 115static int ecb_encrypt(struct skcipher_request *req)
114 struct scatterlist *src, unsigned int nbytes)
115{ 116{
116 struct blkcipher_walk walk; 117 return ecb_crypt(req, true);
117
118 blkcipher_walk_init(&walk, dst, src, nbytes);
119 return ecb_crypt(desc, &walk, true);
120} 118}
121 119
122static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 120static int ecb_decrypt(struct skcipher_request *req)
123 struct scatterlist *src, unsigned int nbytes)
124{ 121{
125 struct blkcipher_walk walk; 122 return ecb_crypt(req, false);
126
127 blkcipher_walk_init(&walk, dst, src, nbytes);
128 return ecb_crypt(desc, &walk, false);
129} 123}
130 124
131static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, 125static int cbc_encrypt(struct skcipher_request *req)
132 struct blkcipher_walk *walk)
133{ 126{
134 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
135 const unsigned int bsize = CAST5_BLOCK_SIZE; 127 const unsigned int bsize = CAST5_BLOCK_SIZE;
136 unsigned int nbytes = walk->nbytes; 128 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
137 u64 *src = (u64 *)walk->src.virt.addr; 129 struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
138 u64 *dst = (u64 *)walk->dst.virt.addr; 130 struct skcipher_walk walk;
139 u64 *iv = (u64 *)walk->iv; 131 unsigned int nbytes;
140
141 do {
142 *dst = *src ^ *iv;
143 __cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
144 iv = dst;
145
146 src += 1;
147 dst += 1;
148 nbytes -= bsize;
149 } while (nbytes >= bsize);
150
151 *(u64 *)walk->iv = *iv;
152 return nbytes;
153}
154
155static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
156 struct scatterlist *src, unsigned int nbytes)
157{
158 struct blkcipher_walk walk;
159 int err; 132 int err;
160 133
161 blkcipher_walk_init(&walk, dst, src, nbytes); 134 err = skcipher_walk_virt(&walk, req, false);
162 err = blkcipher_walk_virt(desc, &walk);
163 135
164 while ((nbytes = walk.nbytes)) { 136 while ((nbytes = walk.nbytes)) {
165 nbytes = __cbc_encrypt(desc, &walk); 137 u64 *src = (u64 *)walk.src.virt.addr;
166 err = blkcipher_walk_done(desc, &walk, nbytes); 138 u64 *dst = (u64 *)walk.dst.virt.addr;
139 u64 *iv = (u64 *)walk.iv;
140
141 do {
142 *dst = *src ^ *iv;
143 __cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
144 iv = dst;
145 src++;
146 dst++;
147 nbytes -= bsize;
148 } while (nbytes >= bsize);
149
150 *(u64 *)walk.iv = *iv;
151 err = skcipher_walk_done(&walk, nbytes);
167 } 152 }
168 153
169 return err; 154 return err;
170} 155}
171 156
172static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, 157static unsigned int __cbc_decrypt(struct cast5_ctx *ctx,
173 struct blkcipher_walk *walk) 158 struct skcipher_walk *walk)
174{ 159{
175 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
176 const unsigned int bsize = CAST5_BLOCK_SIZE; 160 const unsigned int bsize = CAST5_BLOCK_SIZE;
177 unsigned int nbytes = walk->nbytes; 161 unsigned int nbytes = walk->nbytes;
178 u64 *src = (u64 *)walk->src.virt.addr; 162 u64 *src = (u64 *)walk->src.virt.addr;
@@ -224,31 +208,29 @@ done:
224 return nbytes; 208 return nbytes;
225} 209}
226 210
227static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 211static int cbc_decrypt(struct skcipher_request *req)
228 struct scatterlist *src, unsigned int nbytes)
229{ 212{
213 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
214 struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
230 bool fpu_enabled = false; 215 bool fpu_enabled = false;
231 struct blkcipher_walk walk; 216 struct skcipher_walk walk;
217 unsigned int nbytes;
232 int err; 218 int err;
233 219
234 blkcipher_walk_init(&walk, dst, src, nbytes); 220 err = skcipher_walk_virt(&walk, req, false);
235 err = blkcipher_walk_virt(desc, &walk);
236 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
237 221
238 while ((nbytes = walk.nbytes)) { 222 while ((nbytes = walk.nbytes)) {
239 fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); 223 fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
240 nbytes = __cbc_decrypt(desc, &walk); 224 nbytes = __cbc_decrypt(ctx, &walk);
241 err = blkcipher_walk_done(desc, &walk, nbytes); 225 err = skcipher_walk_done(&walk, nbytes);
242 } 226 }
243 227
244 cast5_fpu_end(fpu_enabled); 228 cast5_fpu_end(fpu_enabled);
245 return err; 229 return err;
246} 230}
247 231
248static void ctr_crypt_final(struct blkcipher_desc *desc, 232static void ctr_crypt_final(struct skcipher_walk *walk, struct cast5_ctx *ctx)
249 struct blkcipher_walk *walk)
250{ 233{
251 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
252 u8 *ctrblk = walk->iv; 234 u8 *ctrblk = walk->iv;
253 u8 keystream[CAST5_BLOCK_SIZE]; 235 u8 keystream[CAST5_BLOCK_SIZE];
254 u8 *src = walk->src.virt.addr; 236 u8 *src = walk->src.virt.addr;
@@ -261,10 +243,9 @@ static void ctr_crypt_final(struct blkcipher_desc *desc,
261 crypto_inc(ctrblk, CAST5_BLOCK_SIZE); 243 crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
262} 244}
263 245
264static unsigned int __ctr_crypt(struct blkcipher_desc *desc, 246static unsigned int __ctr_crypt(struct skcipher_walk *walk,
265 struct blkcipher_walk *walk) 247 struct cast5_ctx *ctx)
266{ 248{
267 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
268 const unsigned int bsize = CAST5_BLOCK_SIZE; 249 const unsigned int bsize = CAST5_BLOCK_SIZE;
269 unsigned int nbytes = walk->nbytes; 250 unsigned int nbytes = walk->nbytes;
270 u64 *src = (u64 *)walk->src.virt.addr; 251 u64 *src = (u64 *)walk->src.virt.addr;
@@ -307,162 +288,80 @@ done:
307 return nbytes; 288 return nbytes;
308} 289}
309 290
310static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 291static int ctr_crypt(struct skcipher_request *req)
311 struct scatterlist *src, unsigned int nbytes)
312{ 292{
293 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
294 struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
313 bool fpu_enabled = false; 295 bool fpu_enabled = false;
314 struct blkcipher_walk walk; 296 struct skcipher_walk walk;
297 unsigned int nbytes;
315 int err; 298 int err;
316 299
317 blkcipher_walk_init(&walk, dst, src, nbytes); 300 err = skcipher_walk_virt(&walk, req, false);
318 err = blkcipher_walk_virt_block(desc, &walk, CAST5_BLOCK_SIZE);
319 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
320 301
321 while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { 302 while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
322 fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); 303 fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
323 nbytes = __ctr_crypt(desc, &walk); 304 nbytes = __ctr_crypt(&walk, ctx);
324 err = blkcipher_walk_done(desc, &walk, nbytes); 305 err = skcipher_walk_done(&walk, nbytes);
325 } 306 }
326 307
327 cast5_fpu_end(fpu_enabled); 308 cast5_fpu_end(fpu_enabled);
328 309
329 if (walk.nbytes) { 310 if (walk.nbytes) {
330 ctr_crypt_final(desc, &walk); 311 ctr_crypt_final(&walk, ctx);
331 err = blkcipher_walk_done(desc, &walk, 0); 312 err = skcipher_walk_done(&walk, 0);
332 } 313 }
333 314
334 return err; 315 return err;
335} 316}
336 317
318static struct skcipher_alg cast5_algs[] = {
319 {
320 .base.cra_name = "__ecb(cast5)",
321 .base.cra_driver_name = "__ecb-cast5-avx",
322 .base.cra_priority = 200,
323 .base.cra_flags = CRYPTO_ALG_INTERNAL,
324 .base.cra_blocksize = CAST5_BLOCK_SIZE,
325 .base.cra_ctxsize = sizeof(struct cast5_ctx),
326 .base.cra_module = THIS_MODULE,
327 .min_keysize = CAST5_MIN_KEY_SIZE,
328 .max_keysize = CAST5_MAX_KEY_SIZE,
329 .setkey = cast5_setkey_skcipher,
330 .encrypt = ecb_encrypt,
331 .decrypt = ecb_decrypt,
332 }, {
333 .base.cra_name = "__cbc(cast5)",
334 .base.cra_driver_name = "__cbc-cast5-avx",
335 .base.cra_priority = 200,
336 .base.cra_flags = CRYPTO_ALG_INTERNAL,
337 .base.cra_blocksize = CAST5_BLOCK_SIZE,
338 .base.cra_ctxsize = sizeof(struct cast5_ctx),
339 .base.cra_module = THIS_MODULE,
340 .min_keysize = CAST5_MIN_KEY_SIZE,
341 .max_keysize = CAST5_MAX_KEY_SIZE,
342 .ivsize = CAST5_BLOCK_SIZE,
343 .setkey = cast5_setkey_skcipher,
344 .encrypt = cbc_encrypt,
345 .decrypt = cbc_decrypt,
346 }, {
347 .base.cra_name = "__ctr(cast5)",
348 .base.cra_driver_name = "__ctr-cast5-avx",
349 .base.cra_priority = 200,
350 .base.cra_flags = CRYPTO_ALG_INTERNAL,
351 .base.cra_blocksize = 1,
352 .base.cra_ctxsize = sizeof(struct cast5_ctx),
353 .base.cra_module = THIS_MODULE,
354 .min_keysize = CAST5_MIN_KEY_SIZE,
355 .max_keysize = CAST5_MAX_KEY_SIZE,
356 .ivsize = CAST5_BLOCK_SIZE,
357 .chunksize = CAST5_BLOCK_SIZE,
358 .setkey = cast5_setkey_skcipher,
359 .encrypt = ctr_crypt,
360 .decrypt = ctr_crypt,
361 }
362};
337 363
338static struct crypto_alg cast5_algs[6] = { { 364static struct simd_skcipher_alg *cast5_simd_algs[ARRAY_SIZE(cast5_algs)];
339 .cra_name = "__ecb-cast5-avx",
340 .cra_driver_name = "__driver-ecb-cast5-avx",
341 .cra_priority = 0,
342 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
343 CRYPTO_ALG_INTERNAL,
344 .cra_blocksize = CAST5_BLOCK_SIZE,
345 .cra_ctxsize = sizeof(struct cast5_ctx),
346 .cra_alignmask = 0,
347 .cra_type = &crypto_blkcipher_type,
348 .cra_module = THIS_MODULE,
349 .cra_u = {
350 .blkcipher = {
351 .min_keysize = CAST5_MIN_KEY_SIZE,
352 .max_keysize = CAST5_MAX_KEY_SIZE,
353 .setkey = cast5_setkey,
354 .encrypt = ecb_encrypt,
355 .decrypt = ecb_decrypt,
356 },
357 },
358}, {
359 .cra_name = "__cbc-cast5-avx",
360 .cra_driver_name = "__driver-cbc-cast5-avx",
361 .cra_priority = 0,
362 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
363 CRYPTO_ALG_INTERNAL,
364 .cra_blocksize = CAST5_BLOCK_SIZE,
365 .cra_ctxsize = sizeof(struct cast5_ctx),
366 .cra_alignmask = 0,
367 .cra_type = &crypto_blkcipher_type,
368 .cra_module = THIS_MODULE,
369 .cra_u = {
370 .blkcipher = {
371 .min_keysize = CAST5_MIN_KEY_SIZE,
372 .max_keysize = CAST5_MAX_KEY_SIZE,
373 .setkey = cast5_setkey,
374 .encrypt = cbc_encrypt,
375 .decrypt = cbc_decrypt,
376 },
377 },
378}, {
379 .cra_name = "__ctr-cast5-avx",
380 .cra_driver_name = "__driver-ctr-cast5-avx",
381 .cra_priority = 0,
382 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
383 CRYPTO_ALG_INTERNAL,
384 .cra_blocksize = 1,
385 .cra_ctxsize = sizeof(struct cast5_ctx),
386 .cra_alignmask = 0,
387 .cra_type = &crypto_blkcipher_type,
388 .cra_module = THIS_MODULE,
389 .cra_u = {
390 .blkcipher = {
391 .min_keysize = CAST5_MIN_KEY_SIZE,
392 .max_keysize = CAST5_MAX_KEY_SIZE,
393 .ivsize = CAST5_BLOCK_SIZE,
394 .setkey = cast5_setkey,
395 .encrypt = ctr_crypt,
396 .decrypt = ctr_crypt,
397 },
398 },
399}, {
400 .cra_name = "ecb(cast5)",
401 .cra_driver_name = "ecb-cast5-avx",
402 .cra_priority = 200,
403 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
404 .cra_blocksize = CAST5_BLOCK_SIZE,
405 .cra_ctxsize = sizeof(struct async_helper_ctx),
406 .cra_alignmask = 0,
407 .cra_type = &crypto_ablkcipher_type,
408 .cra_module = THIS_MODULE,
409 .cra_init = ablk_init,
410 .cra_exit = ablk_exit,
411 .cra_u = {
412 .ablkcipher = {
413 .min_keysize = CAST5_MIN_KEY_SIZE,
414 .max_keysize = CAST5_MAX_KEY_SIZE,
415 .setkey = ablk_set_key,
416 .encrypt = ablk_encrypt,
417 .decrypt = ablk_decrypt,
418 },
419 },
420}, {
421 .cra_name = "cbc(cast5)",
422 .cra_driver_name = "cbc-cast5-avx",
423 .cra_priority = 200,
424 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
425 .cra_blocksize = CAST5_BLOCK_SIZE,
426 .cra_ctxsize = sizeof(struct async_helper_ctx),
427 .cra_alignmask = 0,
428 .cra_type = &crypto_ablkcipher_type,
429 .cra_module = THIS_MODULE,
430 .cra_init = ablk_init,
431 .cra_exit = ablk_exit,
432 .cra_u = {
433 .ablkcipher = {
434 .min_keysize = CAST5_MIN_KEY_SIZE,
435 .max_keysize = CAST5_MAX_KEY_SIZE,
436 .ivsize = CAST5_BLOCK_SIZE,
437 .setkey = ablk_set_key,
438 .encrypt = __ablk_encrypt,
439 .decrypt = ablk_decrypt,
440 },
441 },
442}, {
443 .cra_name = "ctr(cast5)",
444 .cra_driver_name = "ctr-cast5-avx",
445 .cra_priority = 200,
446 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
447 .cra_blocksize = 1,
448 .cra_ctxsize = sizeof(struct async_helper_ctx),
449 .cra_alignmask = 0,
450 .cra_type = &crypto_ablkcipher_type,
451 .cra_module = THIS_MODULE,
452 .cra_init = ablk_init,
453 .cra_exit = ablk_exit,
454 .cra_u = {
455 .ablkcipher = {
456 .min_keysize = CAST5_MIN_KEY_SIZE,
457 .max_keysize = CAST5_MAX_KEY_SIZE,
458 .ivsize = CAST5_BLOCK_SIZE,
459 .setkey = ablk_set_key,
460 .encrypt = ablk_encrypt,
461 .decrypt = ablk_encrypt,
462 .geniv = "chainiv",
463 },
464 },
465} };
466 365
467static int __init cast5_init(void) 366static int __init cast5_init(void)
468{ 367{
@@ -474,12 +373,15 @@ static int __init cast5_init(void)
474 return -ENODEV; 373 return -ENODEV;
475 } 374 }
476 375
477 return crypto_register_algs(cast5_algs, ARRAY_SIZE(cast5_algs)); 376 return simd_register_skciphers_compat(cast5_algs,
377 ARRAY_SIZE(cast5_algs),
378 cast5_simd_algs);
478} 379}
479 380
480static void __exit cast5_exit(void) 381static void __exit cast5_exit(void)
481{ 382{
482 crypto_unregister_algs(cast5_algs, ARRAY_SIZE(cast5_algs)); 383 simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs),
384 cast5_simd_algs);
483} 385}
484 386
485module_init(cast5_init); 387module_init(cast5_init);
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 50e684768c55..9fb66b5e94b2 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -24,19 +24,13 @@
24 */ 24 */
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/hardirq.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/crypto.h> 28#include <linux/crypto.h>
30#include <linux/err.h> 29#include <linux/err.h>
31#include <crypto/ablk_helper.h>
32#include <crypto/algapi.h> 30#include <crypto/algapi.h>
33#include <crypto/cast6.h> 31#include <crypto/cast6.h>
34#include <crypto/cryptd.h> 32#include <crypto/internal/simd.h>
35#include <crypto/b128ops.h>
36#include <crypto/ctr.h>
37#include <crypto/lrw.h>
38#include <crypto/xts.h> 33#include <crypto/xts.h>
39#include <asm/fpu/api.h>
40#include <asm/crypto/glue_helper.h> 34#include <asm/crypto/glue_helper.h>
41 35
42#define CAST6_PARALLEL_BLOCKS 8 36#define CAST6_PARALLEL_BLOCKS 8
@@ -56,6 +50,12 @@ asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst,
56asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst, 50asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst,
57 const u8 *src, le128 *iv); 51 const u8 *src, le128 *iv);
58 52
53static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
54 const u8 *key, unsigned int keylen)
55{
56 return cast6_setkey(&tfm->base, key, keylen);
57}
58
59static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) 59static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
60{ 60{
61 glue_xts_crypt_128bit_one(ctx, dst, src, iv, 61 glue_xts_crypt_128bit_one(ctx, dst, src, iv,
@@ -157,164 +157,30 @@ static const struct common_glue_ctx cast6_dec_xts = {
157 } } 157 } }
158}; 158};
159 159
160static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 160static int ecb_encrypt(struct skcipher_request *req)
161 struct scatterlist *src, unsigned int nbytes)
162{
163 return glue_ecb_crypt_128bit(&cast6_enc, desc, dst, src, nbytes);
164}
165
166static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
167 struct scatterlist *src, unsigned int nbytes)
168{
169 return glue_ecb_crypt_128bit(&cast6_dec, desc, dst, src, nbytes);
170}
171
172static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
173 struct scatterlist *src, unsigned int nbytes)
174{
175 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__cast6_encrypt), desc,
176 dst, src, nbytes);
177}
178
179static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
180 struct scatterlist *src, unsigned int nbytes)
181{
182 return glue_cbc_decrypt_128bit(&cast6_dec_cbc, desc, dst, src,
183 nbytes);
184}
185
186static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
187 struct scatterlist *src, unsigned int nbytes)
188{ 161{
189 return glue_ctr_crypt_128bit(&cast6_ctr, desc, dst, src, nbytes); 162 return glue_ecb_req_128bit(&cast6_enc, req);
190} 163}
191 164
192static inline bool cast6_fpu_begin(bool fpu_enabled, unsigned int nbytes) 165static int ecb_decrypt(struct skcipher_request *req)
193{ 166{
194 return glue_fpu_begin(CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS, 167 return glue_ecb_req_128bit(&cast6_dec, req);
195 NULL, fpu_enabled, nbytes);
196} 168}
197 169
198static inline void cast6_fpu_end(bool fpu_enabled) 170static int cbc_encrypt(struct skcipher_request *req)
199{ 171{
200 glue_fpu_end(fpu_enabled); 172 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt),
173 req);
201} 174}
202 175
203struct crypt_priv { 176static int cbc_decrypt(struct skcipher_request *req)
204 struct cast6_ctx *ctx;
205 bool fpu_enabled;
206};
207
208static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
209{ 177{
210 const unsigned int bsize = CAST6_BLOCK_SIZE; 178 return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req);
211 struct crypt_priv *ctx = priv;
212 int i;
213
214 ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
215
216 if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
217 cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
218 return;
219 }
220
221 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
222 __cast6_encrypt(ctx->ctx, srcdst, srcdst);
223} 179}
224 180
225static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) 181static int ctr_crypt(struct skcipher_request *req)
226{ 182{
227 const unsigned int bsize = CAST6_BLOCK_SIZE; 183 return glue_ctr_req_128bit(&cast6_ctr, req);
228 struct crypt_priv *ctx = priv;
229 int i;
230
231 ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
232
233 if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
234 cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
235 return;
236 }
237
238 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
239 __cast6_decrypt(ctx->ctx, srcdst, srcdst);
240}
241
242struct cast6_lrw_ctx {
243 struct lrw_table_ctx lrw_table;
244 struct cast6_ctx cast6_ctx;
245};
246
247static int lrw_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
248 unsigned int keylen)
249{
250 struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
251 int err;
252
253 err = __cast6_setkey(&ctx->cast6_ctx, key, keylen - CAST6_BLOCK_SIZE,
254 &tfm->crt_flags);
255 if (err)
256 return err;
257
258 return lrw_init_table(&ctx->lrw_table, key + keylen - CAST6_BLOCK_SIZE);
259}
260
261static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
262 struct scatterlist *src, unsigned int nbytes)
263{
264 struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
265 be128 buf[CAST6_PARALLEL_BLOCKS];
266 struct crypt_priv crypt_ctx = {
267 .ctx = &ctx->cast6_ctx,
268 .fpu_enabled = false,
269 };
270 struct lrw_crypt_req req = {
271 .tbuf = buf,
272 .tbuflen = sizeof(buf),
273
274 .table_ctx = &ctx->lrw_table,
275 .crypt_ctx = &crypt_ctx,
276 .crypt_fn = encrypt_callback,
277 };
278 int ret;
279
280 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
281 ret = lrw_crypt(desc, dst, src, nbytes, &req);
282 cast6_fpu_end(crypt_ctx.fpu_enabled);
283
284 return ret;
285}
286
287static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
288 struct scatterlist *src, unsigned int nbytes)
289{
290 struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
291 be128 buf[CAST6_PARALLEL_BLOCKS];
292 struct crypt_priv crypt_ctx = {
293 .ctx = &ctx->cast6_ctx,
294 .fpu_enabled = false,
295 };
296 struct lrw_crypt_req req = {
297 .tbuf = buf,
298 .tbuflen = sizeof(buf),
299
300 .table_ctx = &ctx->lrw_table,
301 .crypt_ctx = &crypt_ctx,
302 .crypt_fn = decrypt_callback,
303 };
304 int ret;
305
306 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
307 ret = lrw_crypt(desc, dst, src, nbytes, &req);
308 cast6_fpu_end(crypt_ctx.fpu_enabled);
309
310 return ret;
311}
312
313static void lrw_exit_tfm(struct crypto_tfm *tfm)
314{
315 struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
316
317 lrw_free_table(&ctx->lrw_table);
318} 184}
319 185
320struct cast6_xts_ctx { 186struct cast6_xts_ctx {
@@ -322,14 +188,14 @@ struct cast6_xts_ctx {
322 struct cast6_ctx crypt_ctx; 188 struct cast6_ctx crypt_ctx;
323}; 189};
324 190
325static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key, 191static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key,
326 unsigned int keylen) 192 unsigned int keylen)
327{ 193{
328 struct cast6_xts_ctx *ctx = crypto_tfm_ctx(tfm); 194 struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
329 u32 *flags = &tfm->crt_flags; 195 u32 *flags = &tfm->base.crt_flags;
330 int err; 196 int err;
331 197
332 err = xts_check_key(tfm, key, keylen); 198 err = xts_verify_key(tfm, key, keylen);
333 if (err) 199 if (err)
334 return err; 200 return err;
335 201
@@ -343,245 +209,87 @@ static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
343 flags); 209 flags);
344} 210}
345 211
346static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 212static int xts_encrypt(struct skcipher_request *req)
347 struct scatterlist *src, unsigned int nbytes)
348{ 213{
349 struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 214 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
215 struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
350 216
351 return glue_xts_crypt_128bit(&cast6_enc_xts, desc, dst, src, nbytes, 217 return glue_xts_req_128bit(&cast6_enc_xts, req,
352 XTS_TWEAK_CAST(__cast6_encrypt), 218 XTS_TWEAK_CAST(__cast6_encrypt),
353 &ctx->tweak_ctx, &ctx->crypt_ctx); 219 &ctx->tweak_ctx, &ctx->crypt_ctx);
354} 220}
355 221
356static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 222static int xts_decrypt(struct skcipher_request *req)
357 struct scatterlist *src, unsigned int nbytes)
358{ 223{
359 struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 224 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
225 struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
360 226
361 return glue_xts_crypt_128bit(&cast6_dec_xts, desc, dst, src, nbytes, 227 return glue_xts_req_128bit(&cast6_dec_xts, req,
362 XTS_TWEAK_CAST(__cast6_encrypt), 228 XTS_TWEAK_CAST(__cast6_encrypt),
363 &ctx->tweak_ctx, &ctx->crypt_ctx); 229 &ctx->tweak_ctx, &ctx->crypt_ctx);
364} 230}
365 231
366static struct crypto_alg cast6_algs[10] = { { 232static struct skcipher_alg cast6_algs[] = {
367 .cra_name = "__ecb-cast6-avx", 233 {
368 .cra_driver_name = "__driver-ecb-cast6-avx", 234 .base.cra_name = "__ecb(cast6)",
369 .cra_priority = 0, 235 .base.cra_driver_name = "__ecb-cast6-avx",
370 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 236 .base.cra_priority = 200,
371 CRYPTO_ALG_INTERNAL, 237 .base.cra_flags = CRYPTO_ALG_INTERNAL,
372 .cra_blocksize = CAST6_BLOCK_SIZE, 238 .base.cra_blocksize = CAST6_BLOCK_SIZE,
373 .cra_ctxsize = sizeof(struct cast6_ctx), 239 .base.cra_ctxsize = sizeof(struct cast6_ctx),
374 .cra_alignmask = 0, 240 .base.cra_module = THIS_MODULE,
375 .cra_type = &crypto_blkcipher_type, 241 .min_keysize = CAST6_MIN_KEY_SIZE,
376 .cra_module = THIS_MODULE, 242 .max_keysize = CAST6_MAX_KEY_SIZE,
377 .cra_u = { 243 .setkey = cast6_setkey_skcipher,
378 .blkcipher = { 244 .encrypt = ecb_encrypt,
379 .min_keysize = CAST6_MIN_KEY_SIZE, 245 .decrypt = ecb_decrypt,
380 .max_keysize = CAST6_MAX_KEY_SIZE, 246 }, {
381 .setkey = cast6_setkey, 247 .base.cra_name = "__cbc(cast6)",
382 .encrypt = ecb_encrypt, 248 .base.cra_driver_name = "__cbc-cast6-avx",
383 .decrypt = ecb_decrypt, 249 .base.cra_priority = 200,
384 }, 250 .base.cra_flags = CRYPTO_ALG_INTERNAL,
385 }, 251 .base.cra_blocksize = CAST6_BLOCK_SIZE,
386}, { 252 .base.cra_ctxsize = sizeof(struct cast6_ctx),
387 .cra_name = "__cbc-cast6-avx", 253 .base.cra_module = THIS_MODULE,
388 .cra_driver_name = "__driver-cbc-cast6-avx", 254 .min_keysize = CAST6_MIN_KEY_SIZE,
389 .cra_priority = 0, 255 .max_keysize = CAST6_MAX_KEY_SIZE,
390 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 256 .ivsize = CAST6_BLOCK_SIZE,
391 CRYPTO_ALG_INTERNAL, 257 .setkey = cast6_setkey_skcipher,
392 .cra_blocksize = CAST6_BLOCK_SIZE, 258 .encrypt = cbc_encrypt,
393 .cra_ctxsize = sizeof(struct cast6_ctx), 259 .decrypt = cbc_decrypt,
394 .cra_alignmask = 0, 260 }, {
395 .cra_type = &crypto_blkcipher_type, 261 .base.cra_name = "__ctr(cast6)",
396 .cra_module = THIS_MODULE, 262 .base.cra_driver_name = "__ctr-cast6-avx",
397 .cra_u = { 263 .base.cra_priority = 200,
398 .blkcipher = { 264 .base.cra_flags = CRYPTO_ALG_INTERNAL,
399 .min_keysize = CAST6_MIN_KEY_SIZE, 265 .base.cra_blocksize = 1,
400 .max_keysize = CAST6_MAX_KEY_SIZE, 266 .base.cra_ctxsize = sizeof(struct cast6_ctx),
401 .setkey = cast6_setkey, 267 .base.cra_module = THIS_MODULE,
402 .encrypt = cbc_encrypt, 268 .min_keysize = CAST6_MIN_KEY_SIZE,
403 .decrypt = cbc_decrypt, 269 .max_keysize = CAST6_MAX_KEY_SIZE,
404 }, 270 .ivsize = CAST6_BLOCK_SIZE,
405 }, 271 .chunksize = CAST6_BLOCK_SIZE,
406}, { 272 .setkey = cast6_setkey_skcipher,
407 .cra_name = "__ctr-cast6-avx", 273 .encrypt = ctr_crypt,
408 .cra_driver_name = "__driver-ctr-cast6-avx", 274 .decrypt = ctr_crypt,
409 .cra_priority = 0, 275 }, {
410 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 276 .base.cra_name = "__xts(cast6)",
411 CRYPTO_ALG_INTERNAL, 277 .base.cra_driver_name = "__xts-cast6-avx",
412 .cra_blocksize = 1, 278 .base.cra_priority = 200,
413 .cra_ctxsize = sizeof(struct cast6_ctx), 279 .base.cra_flags = CRYPTO_ALG_INTERNAL,
414 .cra_alignmask = 0, 280 .base.cra_blocksize = CAST6_BLOCK_SIZE,
415 .cra_type = &crypto_blkcipher_type, 281 .base.cra_ctxsize = sizeof(struct cast6_xts_ctx),
416 .cra_module = THIS_MODULE, 282 .base.cra_module = THIS_MODULE,
417 .cra_u = { 283 .min_keysize = 2 * CAST6_MIN_KEY_SIZE,
418 .blkcipher = { 284 .max_keysize = 2 * CAST6_MAX_KEY_SIZE,
419 .min_keysize = CAST6_MIN_KEY_SIZE, 285 .ivsize = CAST6_BLOCK_SIZE,
420 .max_keysize = CAST6_MAX_KEY_SIZE, 286 .setkey = xts_cast6_setkey,
421 .ivsize = CAST6_BLOCK_SIZE, 287 .encrypt = xts_encrypt,
422 .setkey = cast6_setkey, 288 .decrypt = xts_decrypt,
423 .encrypt = ctr_crypt,
424 .decrypt = ctr_crypt,
425 },
426 },
427}, {
428 .cra_name = "__lrw-cast6-avx",
429 .cra_driver_name = "__driver-lrw-cast6-avx",
430 .cra_priority = 0,
431 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
432 CRYPTO_ALG_INTERNAL,
433 .cra_blocksize = CAST6_BLOCK_SIZE,
434 .cra_ctxsize = sizeof(struct cast6_lrw_ctx),
435 .cra_alignmask = 0,
436 .cra_type = &crypto_blkcipher_type,
437 .cra_module = THIS_MODULE,
438 .cra_exit = lrw_exit_tfm,
439 .cra_u = {
440 .blkcipher = {
441 .min_keysize = CAST6_MIN_KEY_SIZE +
442 CAST6_BLOCK_SIZE,
443 .max_keysize = CAST6_MAX_KEY_SIZE +
444 CAST6_BLOCK_SIZE,
445 .ivsize = CAST6_BLOCK_SIZE,
446 .setkey = lrw_cast6_setkey,
447 .encrypt = lrw_encrypt,
448 .decrypt = lrw_decrypt,
449 },
450 },
451}, {
452 .cra_name = "__xts-cast6-avx",
453 .cra_driver_name = "__driver-xts-cast6-avx",
454 .cra_priority = 0,
455 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
456 CRYPTO_ALG_INTERNAL,
457 .cra_blocksize = CAST6_BLOCK_SIZE,
458 .cra_ctxsize = sizeof(struct cast6_xts_ctx),
459 .cra_alignmask = 0,
460 .cra_type = &crypto_blkcipher_type,
461 .cra_module = THIS_MODULE,
462 .cra_u = {
463 .blkcipher = {
464 .min_keysize = CAST6_MIN_KEY_SIZE * 2,
465 .max_keysize = CAST6_MAX_KEY_SIZE * 2,
466 .ivsize = CAST6_BLOCK_SIZE,
467 .setkey = xts_cast6_setkey,
468 .encrypt = xts_encrypt,
469 .decrypt = xts_decrypt,
470 },
471 },
472}, {
473 .cra_name = "ecb(cast6)",
474 .cra_driver_name = "ecb-cast6-avx",
475 .cra_priority = 200,
476 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
477 .cra_blocksize = CAST6_BLOCK_SIZE,
478 .cra_ctxsize = sizeof(struct async_helper_ctx),
479 .cra_alignmask = 0,
480 .cra_type = &crypto_ablkcipher_type,
481 .cra_module = THIS_MODULE,
482 .cra_init = ablk_init,
483 .cra_exit = ablk_exit,
484 .cra_u = {
485 .ablkcipher = {
486 .min_keysize = CAST6_MIN_KEY_SIZE,
487 .max_keysize = CAST6_MAX_KEY_SIZE,
488 .setkey = ablk_set_key,
489 .encrypt = ablk_encrypt,
490 .decrypt = ablk_decrypt,
491 },
492 },
493}, {
494 .cra_name = "cbc(cast6)",
495 .cra_driver_name = "cbc-cast6-avx",
496 .cra_priority = 200,
497 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
498 .cra_blocksize = CAST6_BLOCK_SIZE,
499 .cra_ctxsize = sizeof(struct async_helper_ctx),
500 .cra_alignmask = 0,
501 .cra_type = &crypto_ablkcipher_type,
502 .cra_module = THIS_MODULE,
503 .cra_init = ablk_init,
504 .cra_exit = ablk_exit,
505 .cra_u = {
506 .ablkcipher = {
507 .min_keysize = CAST6_MIN_KEY_SIZE,
508 .max_keysize = CAST6_MAX_KEY_SIZE,
509 .ivsize = CAST6_BLOCK_SIZE,
510 .setkey = ablk_set_key,
511 .encrypt = __ablk_encrypt,
512 .decrypt = ablk_decrypt,
513 },
514 },
515}, {
516 .cra_name = "ctr(cast6)",
517 .cra_driver_name = "ctr-cast6-avx",
518 .cra_priority = 200,
519 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
520 .cra_blocksize = 1,
521 .cra_ctxsize = sizeof(struct async_helper_ctx),
522 .cra_alignmask = 0,
523 .cra_type = &crypto_ablkcipher_type,
524 .cra_module = THIS_MODULE,
525 .cra_init = ablk_init,
526 .cra_exit = ablk_exit,
527 .cra_u = {
528 .ablkcipher = {
529 .min_keysize = CAST6_MIN_KEY_SIZE,
530 .max_keysize = CAST6_MAX_KEY_SIZE,
531 .ivsize = CAST6_BLOCK_SIZE,
532 .setkey = ablk_set_key,
533 .encrypt = ablk_encrypt,
534 .decrypt = ablk_encrypt,
535 .geniv = "chainiv",
536 },
537 },
538}, {
539 .cra_name = "lrw(cast6)",
540 .cra_driver_name = "lrw-cast6-avx",
541 .cra_priority = 200,
542 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
543 .cra_blocksize = CAST6_BLOCK_SIZE,
544 .cra_ctxsize = sizeof(struct async_helper_ctx),
545 .cra_alignmask = 0,
546 .cra_type = &crypto_ablkcipher_type,
547 .cra_module = THIS_MODULE,
548 .cra_init = ablk_init,
549 .cra_exit = ablk_exit,
550 .cra_u = {
551 .ablkcipher = {
552 .min_keysize = CAST6_MIN_KEY_SIZE +
553 CAST6_BLOCK_SIZE,
554 .max_keysize = CAST6_MAX_KEY_SIZE +
555 CAST6_BLOCK_SIZE,
556 .ivsize = CAST6_BLOCK_SIZE,
557 .setkey = ablk_set_key,
558 .encrypt = ablk_encrypt,
559 .decrypt = ablk_decrypt,
560 },
561 },
562}, {
563 .cra_name = "xts(cast6)",
564 .cra_driver_name = "xts-cast6-avx",
565 .cra_priority = 200,
566 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
567 .cra_blocksize = CAST6_BLOCK_SIZE,
568 .cra_ctxsize = sizeof(struct async_helper_ctx),
569 .cra_alignmask = 0,
570 .cra_type = &crypto_ablkcipher_type,
571 .cra_module = THIS_MODULE,
572 .cra_init = ablk_init,
573 .cra_exit = ablk_exit,
574 .cra_u = {
575 .ablkcipher = {
576 .min_keysize = CAST6_MIN_KEY_SIZE * 2,
577 .max_keysize = CAST6_MAX_KEY_SIZE * 2,
578 .ivsize = CAST6_BLOCK_SIZE,
579 .setkey = ablk_set_key,
580 .encrypt = ablk_encrypt,
581 .decrypt = ablk_decrypt,
582 },
583 }, 289 },
584} }; 290};
291
292static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)];
585 293
586static int __init cast6_init(void) 294static int __init cast6_init(void)
587{ 295{
@@ -593,12 +301,15 @@ static int __init cast6_init(void)
593 return -ENODEV; 301 return -ENODEV;
594 } 302 }
595 303
596 return crypto_register_algs(cast6_algs, ARRAY_SIZE(cast6_algs)); 304 return simd_register_skciphers_compat(cast6_algs,
305 ARRAY_SIZE(cast6_algs),
306 cast6_simd_algs);
597} 307}
598 308
599static void __exit cast6_exit(void) 309static void __exit cast6_exit(void)
600{ 310{
601 crypto_unregister_algs(cast6_algs, ARRAY_SIZE(cast6_algs)); 311 simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs),
312 cast6_simd_algs);
602} 313}
603 314
604module_init(cast6_init); 315module_init(cast6_init);
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 30c0a37f4882..5c610d4ef9fc 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -20,13 +20,13 @@
20 * 20 *
21 */ 21 */
22 22
23#include <asm/processor.h> 23#include <crypto/algapi.h>
24#include <crypto/des.h> 24#include <crypto/des.h>
25#include <crypto/internal/skcipher.h>
25#include <linux/crypto.h> 26#include <linux/crypto.h>
26#include <linux/init.h> 27#include <linux/init.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/types.h> 29#include <linux/types.h>
29#include <crypto/algapi.h>
30 30
31struct des3_ede_x86_ctx { 31struct des3_ede_x86_ctx {
32 u32 enc_expkey[DES3_EDE_EXPKEY_WORDS]; 32 u32 enc_expkey[DES3_EDE_EXPKEY_WORDS];
@@ -83,18 +83,18 @@ static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
83 des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src); 83 des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
84} 84}
85 85
86static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, 86static int ecb_crypt(struct skcipher_request *req, const u32 *expkey)
87 const u32 *expkey)
88{ 87{
89 unsigned int bsize = DES3_EDE_BLOCK_SIZE; 88 const unsigned int bsize = DES3_EDE_BLOCK_SIZE;
89 struct skcipher_walk walk;
90 unsigned int nbytes; 90 unsigned int nbytes;
91 int err; 91 int err;
92 92
93 err = blkcipher_walk_virt(desc, walk); 93 err = skcipher_walk_virt(&walk, req, false);
94 94
95 while ((nbytes = walk->nbytes)) { 95 while ((nbytes = walk.nbytes)) {
96 u8 *wsrc = walk->src.virt.addr; 96 u8 *wsrc = walk.src.virt.addr;
97 u8 *wdst = walk->dst.virt.addr; 97 u8 *wdst = walk.dst.virt.addr;
98 98
99 /* Process four block batch */ 99 /* Process four block batch */
100 if (nbytes >= bsize * 3) { 100 if (nbytes >= bsize * 3) {
@@ -121,36 +121,31 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
121 } while (nbytes >= bsize); 121 } while (nbytes >= bsize);
122 122
123done: 123done:
124 err = blkcipher_walk_done(desc, walk, nbytes); 124 err = skcipher_walk_done(&walk, nbytes);
125 } 125 }
126 126
127 return err; 127 return err;
128} 128}
129 129
130static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 130static int ecb_encrypt(struct skcipher_request *req)
131 struct scatterlist *src, unsigned int nbytes)
132{ 131{
133 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 132 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
134 struct blkcipher_walk walk; 133 struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
135 134
136 blkcipher_walk_init(&walk, dst, src, nbytes); 135 return ecb_crypt(req, ctx->enc_expkey);
137 return ecb_crypt(desc, &walk, ctx->enc_expkey);
138} 136}
139 137
140static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 138static int ecb_decrypt(struct skcipher_request *req)
141 struct scatterlist *src, unsigned int nbytes)
142{ 139{
143 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 140 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
144 struct blkcipher_walk walk; 141 struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
145 142
146 blkcipher_walk_init(&walk, dst, src, nbytes); 143 return ecb_crypt(req, ctx->dec_expkey);
147 return ecb_crypt(desc, &walk, ctx->dec_expkey);
148} 144}
149 145
150static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, 146static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx,
151 struct blkcipher_walk *walk) 147 struct skcipher_walk *walk)
152{ 148{
153 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
154 unsigned int bsize = DES3_EDE_BLOCK_SIZE; 149 unsigned int bsize = DES3_EDE_BLOCK_SIZE;
155 unsigned int nbytes = walk->nbytes; 150 unsigned int nbytes = walk->nbytes;
156 u64 *src = (u64 *)walk->src.virt.addr; 151 u64 *src = (u64 *)walk->src.virt.addr;
@@ -171,27 +166,27 @@ static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
171 return nbytes; 166 return nbytes;
172} 167}
173 168
174static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 169static int cbc_encrypt(struct skcipher_request *req)
175 struct scatterlist *src, unsigned int nbytes)
176{ 170{
177 struct blkcipher_walk walk; 171 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
172 struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
173 struct skcipher_walk walk;
174 unsigned int nbytes;
178 int err; 175 int err;
179 176
180 blkcipher_walk_init(&walk, dst, src, nbytes); 177 err = skcipher_walk_virt(&walk, req, false);
181 err = blkcipher_walk_virt(desc, &walk);
182 178
183 while ((nbytes = walk.nbytes)) { 179 while ((nbytes = walk.nbytes)) {
184 nbytes = __cbc_encrypt(desc, &walk); 180 nbytes = __cbc_encrypt(ctx, &walk);
185 err = blkcipher_walk_done(desc, &walk, nbytes); 181 err = skcipher_walk_done(&walk, nbytes);
186 } 182 }
187 183
188 return err; 184 return err;
189} 185}
190 186
191static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, 187static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx,
192 struct blkcipher_walk *walk) 188 struct skcipher_walk *walk)
193{ 189{
194 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
195 unsigned int bsize = DES3_EDE_BLOCK_SIZE; 190 unsigned int bsize = DES3_EDE_BLOCK_SIZE;
196 unsigned int nbytes = walk->nbytes; 191 unsigned int nbytes = walk->nbytes;
197 u64 *src = (u64 *)walk->src.virt.addr; 192 u64 *src = (u64 *)walk->src.virt.addr;
@@ -250,25 +245,26 @@ done:
250 return nbytes; 245 return nbytes;
251} 246}
252 247
253static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 248static int cbc_decrypt(struct skcipher_request *req)
254 struct scatterlist *src, unsigned int nbytes)
255{ 249{
256 struct blkcipher_walk walk; 250 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
251 struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
252 struct skcipher_walk walk;
253 unsigned int nbytes;
257 int err; 254 int err;
258 255
259 blkcipher_walk_init(&walk, dst, src, nbytes); 256 err = skcipher_walk_virt(&walk, req, false);
260 err = blkcipher_walk_virt(desc, &walk);
261 257
262 while ((nbytes = walk.nbytes)) { 258 while ((nbytes = walk.nbytes)) {
263 nbytes = __cbc_decrypt(desc, &walk); 259 nbytes = __cbc_decrypt(ctx, &walk);
264 err = blkcipher_walk_done(desc, &walk, nbytes); 260 err = skcipher_walk_done(&walk, nbytes);
265 } 261 }
266 262
267 return err; 263 return err;
268} 264}
269 265
270static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx, 266static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
271 struct blkcipher_walk *walk) 267 struct skcipher_walk *walk)
272{ 268{
273 u8 *ctrblk = walk->iv; 269 u8 *ctrblk = walk->iv;
274 u8 keystream[DES3_EDE_BLOCK_SIZE]; 270 u8 keystream[DES3_EDE_BLOCK_SIZE];
@@ -282,10 +278,9 @@ static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
282 crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE); 278 crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
283} 279}
284 280
285static unsigned int __ctr_crypt(struct blkcipher_desc *desc, 281static unsigned int __ctr_crypt(struct des3_ede_x86_ctx *ctx,
286 struct blkcipher_walk *walk) 282 struct skcipher_walk *walk)
287{ 283{
288 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
289 unsigned int bsize = DES3_EDE_BLOCK_SIZE; 284 unsigned int bsize = DES3_EDE_BLOCK_SIZE;
290 unsigned int nbytes = walk->nbytes; 285 unsigned int nbytes = walk->nbytes;
291 __be64 *src = (__be64 *)walk->src.virt.addr; 286 __be64 *src = (__be64 *)walk->src.virt.addr;
@@ -333,23 +328,24 @@ done:
333 return nbytes; 328 return nbytes;
334} 329}
335 330
336static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 331static int ctr_crypt(struct skcipher_request *req)
337 struct scatterlist *src, unsigned int nbytes)
338{ 332{
339 struct blkcipher_walk walk; 333 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
334 struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
335 struct skcipher_walk walk;
336 unsigned int nbytes;
340 int err; 337 int err;
341 338
342 blkcipher_walk_init(&walk, dst, src, nbytes); 339 err = skcipher_walk_virt(&walk, req, false);
343 err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE);
344 340
345 while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) { 341 while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) {
346 nbytes = __ctr_crypt(desc, &walk); 342 nbytes = __ctr_crypt(ctx, &walk);
347 err = blkcipher_walk_done(desc, &walk, nbytes); 343 err = skcipher_walk_done(&walk, nbytes);
348 } 344 }
349 345
350 if (walk.nbytes) { 346 if (nbytes) {
351 ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); 347 ctr_crypt_final(ctx, &walk);
352 err = blkcipher_walk_done(desc, &walk, 0); 348 err = skcipher_walk_done(&walk, 0);
353 } 349 }
354 350
355 return err; 351 return err;
@@ -381,7 +377,14 @@ static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
381 return 0; 377 return 0;
382} 378}
383 379
384static struct crypto_alg des3_ede_algs[4] = { { 380static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm,
381 const u8 *key,
382 unsigned int keylen)
383{
384 return des3_ede_x86_setkey(&tfm->base, key, keylen);
385}
386
387static struct crypto_alg des3_ede_cipher = {
385 .cra_name = "des3_ede", 388 .cra_name = "des3_ede",
386 .cra_driver_name = "des3_ede-asm", 389 .cra_driver_name = "des3_ede-asm",
387 .cra_priority = 200, 390 .cra_priority = 200,
@@ -399,66 +402,50 @@ static struct crypto_alg des3_ede_algs[4] = { {
399 .cia_decrypt = des3_ede_x86_decrypt, 402 .cia_decrypt = des3_ede_x86_decrypt,
400 } 403 }
401 } 404 }
402}, { 405};
403 .cra_name = "ecb(des3_ede)", 406
404 .cra_driver_name = "ecb-des3_ede-asm", 407static struct skcipher_alg des3_ede_skciphers[] = {
405 .cra_priority = 300, 408 {
406 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 409 .base.cra_name = "ecb(des3_ede)",
407 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 410 .base.cra_driver_name = "ecb-des3_ede-asm",
408 .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), 411 .base.cra_priority = 300,
409 .cra_alignmask = 0, 412 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
410 .cra_type = &crypto_blkcipher_type, 413 .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
411 .cra_module = THIS_MODULE, 414 .base.cra_module = THIS_MODULE,
412 .cra_u = { 415 .min_keysize = DES3_EDE_KEY_SIZE,
413 .blkcipher = { 416 .max_keysize = DES3_EDE_KEY_SIZE,
414 .min_keysize = DES3_EDE_KEY_SIZE, 417 .setkey = des3_ede_x86_setkey_skcipher,
415 .max_keysize = DES3_EDE_KEY_SIZE, 418 .encrypt = ecb_encrypt,
416 .setkey = des3_ede_x86_setkey, 419 .decrypt = ecb_decrypt,
417 .encrypt = ecb_encrypt, 420 }, {
418 .decrypt = ecb_decrypt, 421 .base.cra_name = "cbc(des3_ede)",
419 }, 422 .base.cra_driver_name = "cbc-des3_ede-asm",
420 }, 423 .base.cra_priority = 300,
421}, { 424 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
422 .cra_name = "cbc(des3_ede)", 425 .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
423 .cra_driver_name = "cbc-des3_ede-asm", 426 .base.cra_module = THIS_MODULE,
424 .cra_priority = 300, 427 .min_keysize = DES3_EDE_KEY_SIZE,
425 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 428 .max_keysize = DES3_EDE_KEY_SIZE,
426 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 429 .ivsize = DES3_EDE_BLOCK_SIZE,
427 .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), 430 .setkey = des3_ede_x86_setkey_skcipher,
428 .cra_alignmask = 0, 431 .encrypt = cbc_encrypt,
429 .cra_type = &crypto_blkcipher_type, 432 .decrypt = cbc_decrypt,
430 .cra_module = THIS_MODULE, 433 }, {
431 .cra_u = { 434 .base.cra_name = "ctr(des3_ede)",
432 .blkcipher = { 435 .base.cra_driver_name = "ctr-des3_ede-asm",
433 .min_keysize = DES3_EDE_KEY_SIZE, 436 .base.cra_priority = 300,
434 .max_keysize = DES3_EDE_KEY_SIZE, 437 .base.cra_blocksize = 1,
435 .ivsize = DES3_EDE_BLOCK_SIZE, 438 .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
436 .setkey = des3_ede_x86_setkey, 439 .base.cra_module = THIS_MODULE,
437 .encrypt = cbc_encrypt, 440 .min_keysize = DES3_EDE_KEY_SIZE,
438 .decrypt = cbc_decrypt, 441 .max_keysize = DES3_EDE_KEY_SIZE,
439 }, 442 .ivsize = DES3_EDE_BLOCK_SIZE,
440 }, 443 .chunksize = DES3_EDE_BLOCK_SIZE,
441}, { 444 .setkey = des3_ede_x86_setkey_skcipher,
442 .cra_name = "ctr(des3_ede)", 445 .encrypt = ctr_crypt,
443 .cra_driver_name = "ctr-des3_ede-asm", 446 .decrypt = ctr_crypt,
444 .cra_priority = 300, 447 }
445 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 448};
446 .cra_blocksize = 1,
447 .cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
448 .cra_alignmask = 0,
449 .cra_type = &crypto_blkcipher_type,
450 .cra_module = THIS_MODULE,
451 .cra_u = {
452 .blkcipher = {
453 .min_keysize = DES3_EDE_KEY_SIZE,
454 .max_keysize = DES3_EDE_KEY_SIZE,
455 .ivsize = DES3_EDE_BLOCK_SIZE,
456 .setkey = des3_ede_x86_setkey,
457 .encrypt = ctr_crypt,
458 .decrypt = ctr_crypt,
459 },
460 },
461} };
462 449
463static bool is_blacklisted_cpu(void) 450static bool is_blacklisted_cpu(void)
464{ 451{
@@ -483,17 +470,30 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
483 470
484static int __init des3_ede_x86_init(void) 471static int __init des3_ede_x86_init(void)
485{ 472{
473 int err;
474
486 if (!force && is_blacklisted_cpu()) { 475 if (!force && is_blacklisted_cpu()) {
487 pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n"); 476 pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
488 return -ENODEV; 477 return -ENODEV;
489 } 478 }
490 479
491 return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); 480 err = crypto_register_alg(&des3_ede_cipher);
481 if (err)
482 return err;
483
484 err = crypto_register_skciphers(des3_ede_skciphers,
485 ARRAY_SIZE(des3_ede_skciphers));
486 if (err)
487 crypto_unregister_alg(&des3_ede_cipher);
488
489 return err;
492} 490}
493 491
494static void __exit des3_ede_x86_fini(void) 492static void __exit des3_ede_x86_fini(void)
495{ 493{
496 crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); 494 crypto_unregister_alg(&des3_ede_cipher);
495 crypto_unregister_skciphers(des3_ede_skciphers,
496 ARRAY_SIZE(des3_ede_skciphers));
497} 497}
498 498
499module_init(des3_ede_x86_init); 499module_init(des3_ede_x86_init);
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index d61e57960fe0..a78ef99a9981 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -29,313 +29,212 @@
29#include <crypto/b128ops.h> 29#include <crypto/b128ops.h>
30#include <crypto/gf128mul.h> 30#include <crypto/gf128mul.h>
31#include <crypto/internal/skcipher.h> 31#include <crypto/internal/skcipher.h>
32#include <crypto/lrw.h>
33#include <crypto/xts.h> 32#include <crypto/xts.h>
34#include <asm/crypto/glue_helper.h> 33#include <asm/crypto/glue_helper.h>
35 34
36static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, 35int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
37 struct blkcipher_desc *desc, 36 struct skcipher_request *req)
38 struct blkcipher_walk *walk)
39{ 37{
40 void *ctx = crypto_blkcipher_ctx(desc->tfm); 38 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
41 const unsigned int bsize = 128 / 8; 39 const unsigned int bsize = 128 / 8;
42 unsigned int nbytes, i, func_bytes; 40 struct skcipher_walk walk;
43 bool fpu_enabled = false; 41 bool fpu_enabled = false;
42 unsigned int nbytes;
44 int err; 43 int err;
45 44
46 err = blkcipher_walk_virt(desc, walk); 45 err = skcipher_walk_virt(&walk, req, false);
47 46
48 while ((nbytes = walk->nbytes)) { 47 while ((nbytes = walk.nbytes)) {
49 u8 *wsrc = walk->src.virt.addr; 48 const u8 *src = walk.src.virt.addr;
50 u8 *wdst = walk->dst.virt.addr; 49 u8 *dst = walk.dst.virt.addr;
50 unsigned int func_bytes;
51 unsigned int i;
51 52
52 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, 53 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
53 desc, fpu_enabled, nbytes); 54 &walk, fpu_enabled, nbytes);
54
55 for (i = 0; i < gctx->num_funcs; i++) { 55 for (i = 0; i < gctx->num_funcs; i++) {
56 func_bytes = bsize * gctx->funcs[i].num_blocks; 56 func_bytes = bsize * gctx->funcs[i].num_blocks;
57 57
58 /* Process multi-block batch */ 58 if (nbytes < func_bytes)
59 if (nbytes >= func_bytes) { 59 continue;
60 do {
61 gctx->funcs[i].fn_u.ecb(ctx, wdst,
62 wsrc);
63 60
64 wsrc += func_bytes; 61 /* Process multi-block batch */
65 wdst += func_bytes; 62 do {
66 nbytes -= func_bytes; 63 gctx->funcs[i].fn_u.ecb(ctx, dst, src);
67 } while (nbytes >= func_bytes); 64 src += func_bytes;
65 dst += func_bytes;
66 nbytes -= func_bytes;
67 } while (nbytes >= func_bytes);
68 68
69 if (nbytes < bsize) 69 if (nbytes < bsize)
70 goto done; 70 break;
71 }
72 } 71 }
73 72 err = skcipher_walk_done(&walk, nbytes);
74done:
75 err = blkcipher_walk_done(desc, walk, nbytes);
76 } 73 }
77 74
78 glue_fpu_end(fpu_enabled); 75 glue_fpu_end(fpu_enabled);
79 return err; 76 return err;
80} 77}
78EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
81 79
82int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, 80int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
83 struct blkcipher_desc *desc, struct scatterlist *dst, 81 struct skcipher_request *req)
84 struct scatterlist *src, unsigned int nbytes)
85{ 82{
86 struct blkcipher_walk walk; 83 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
87
88 blkcipher_walk_init(&walk, dst, src, nbytes);
89 return __glue_ecb_crypt_128bit(gctx, desc, &walk);
90}
91EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
92
93static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
94 struct blkcipher_desc *desc,
95 struct blkcipher_walk *walk)
96{
97 void *ctx = crypto_blkcipher_ctx(desc->tfm);
98 const unsigned int bsize = 128 / 8; 84 const unsigned int bsize = 128 / 8;
99 unsigned int nbytes = walk->nbytes; 85 struct skcipher_walk walk;
100 u128 *src = (u128 *)walk->src.virt.addr; 86 unsigned int nbytes;
101 u128 *dst = (u128 *)walk->dst.virt.addr;
102 u128 *iv = (u128 *)walk->iv;
103
104 do {
105 u128_xor(dst, src, iv);
106 fn(ctx, (u8 *)dst, (u8 *)dst);
107 iv = dst;
108
109 src += 1;
110 dst += 1;
111 nbytes -= bsize;
112 } while (nbytes >= bsize);
113
114 *(u128 *)walk->iv = *iv;
115 return nbytes;
116}
117
118int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
119 struct blkcipher_desc *desc,
120 struct scatterlist *dst,
121 struct scatterlist *src, unsigned int nbytes)
122{
123 struct blkcipher_walk walk;
124 int err; 87 int err;
125 88
126 blkcipher_walk_init(&walk, dst, src, nbytes); 89 err = skcipher_walk_virt(&walk, req, false);
127 err = blkcipher_walk_virt(desc, &walk);
128 90
129 while ((nbytes = walk.nbytes)) { 91 while ((nbytes = walk.nbytes)) {
130 nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk); 92 const u128 *src = (u128 *)walk.src.virt.addr;
131 err = blkcipher_walk_done(desc, &walk, nbytes); 93 u128 *dst = (u128 *)walk.dst.virt.addr;
94 u128 *iv = (u128 *)walk.iv;
95
96 do {
97 u128_xor(dst, src, iv);
98 fn(ctx, (u8 *)dst, (u8 *)dst);
99 iv = dst;
100 src++;
101 dst++;
102 nbytes -= bsize;
103 } while (nbytes >= bsize);
104
105 *(u128 *)walk.iv = *iv;
106 err = skcipher_walk_done(&walk, nbytes);
132 } 107 }
133
134 return err; 108 return err;
135} 109}
136EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit); 110EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
137 111
138static unsigned int 112int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
139__glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, 113 struct skcipher_request *req)
140 struct blkcipher_desc *desc,
141 struct blkcipher_walk *walk)
142{ 114{
143 void *ctx = crypto_blkcipher_ctx(desc->tfm); 115 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
144 const unsigned int bsize = 128 / 8; 116 const unsigned int bsize = 128 / 8;
145 unsigned int nbytes = walk->nbytes; 117 struct skcipher_walk walk;
146 u128 *src = (u128 *)walk->src.virt.addr; 118 bool fpu_enabled = false;
147 u128 *dst = (u128 *)walk->dst.virt.addr; 119 unsigned int nbytes;
148 u128 last_iv; 120 int err;
149 unsigned int num_blocks, func_bytes; 121
150 unsigned int i; 122 err = skcipher_walk_virt(&walk, req, false);
151 123
152 /* Start of the last block. */ 124 while ((nbytes = walk.nbytes)) {
153 src += nbytes / bsize - 1; 125 const u128 *src = walk.src.virt.addr;
154 dst += nbytes / bsize - 1; 126 u128 *dst = walk.dst.virt.addr;
127 unsigned int func_bytes, num_blocks;
128 unsigned int i;
129 u128 last_iv;
155 130
156 last_iv = *src; 131 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
132 &walk, fpu_enabled, nbytes);
133 /* Start of the last block. */
134 src += nbytes / bsize - 1;
135 dst += nbytes / bsize - 1;
157 136
158 for (i = 0; i < gctx->num_funcs; i++) { 137 last_iv = *src;
159 num_blocks = gctx->funcs[i].num_blocks;
160 func_bytes = bsize * num_blocks;
161 138
162 /* Process multi-block batch */ 139 for (i = 0; i < gctx->num_funcs; i++) {
163 if (nbytes >= func_bytes) { 140 num_blocks = gctx->funcs[i].num_blocks;
141 func_bytes = bsize * num_blocks;
142
143 if (nbytes < func_bytes)
144 continue;
145
146 /* Process multi-block batch */
164 do { 147 do {
165 nbytes -= func_bytes - bsize;
166 src -= num_blocks - 1; 148 src -= num_blocks - 1;
167 dst -= num_blocks - 1; 149 dst -= num_blocks - 1;
168 150
169 gctx->funcs[i].fn_u.cbc(ctx, dst, src); 151 gctx->funcs[i].fn_u.cbc(ctx, dst, src);
170 152
171 nbytes -= bsize; 153 nbytes -= func_bytes;
172 if (nbytes < bsize) 154 if (nbytes < bsize)
173 goto done; 155 goto done;
174 156
175 u128_xor(dst, dst, src - 1); 157 u128_xor(dst, dst, --src);
176 src -= 1; 158 dst--;
177 dst -= 1;
178 } while (nbytes >= func_bytes); 159 } while (nbytes >= func_bytes);
179 } 160 }
180 }
181
182done: 161done:
183 u128_xor(dst, dst, (u128 *)walk->iv); 162 u128_xor(dst, dst, (u128 *)walk.iv);
184 *(u128 *)walk->iv = last_iv; 163 *(u128 *)walk.iv = last_iv;
185 164 err = skcipher_walk_done(&walk, nbytes);
186 return nbytes;
187}
188
189int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
190 struct blkcipher_desc *desc,
191 struct scatterlist *dst,
192 struct scatterlist *src, unsigned int nbytes)
193{
194 const unsigned int bsize = 128 / 8;
195 bool fpu_enabled = false;
196 struct blkcipher_walk walk;
197 int err;
198
199 blkcipher_walk_init(&walk, dst, src, nbytes);
200 err = blkcipher_walk_virt(desc, &walk);
201
202 while ((nbytes = walk.nbytes)) {
203 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
204 desc, fpu_enabled, nbytes);
205 nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
206 err = blkcipher_walk_done(desc, &walk, nbytes);
207 } 165 }
208 166
209 glue_fpu_end(fpu_enabled); 167 glue_fpu_end(fpu_enabled);
210 return err; 168 return err;
211} 169}
212EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); 170EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
213 171
214static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr, 172int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
215 struct blkcipher_desc *desc, 173 struct skcipher_request *req)
216 struct blkcipher_walk *walk)
217{ 174{
218 void *ctx = crypto_blkcipher_ctx(desc->tfm); 175 void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
219 u8 *src = (u8 *)walk->src.virt.addr; 176 const unsigned int bsize = 128 / 8;
220 u8 *dst = (u8 *)walk->dst.virt.addr; 177 struct skcipher_walk walk;
221 unsigned int nbytes = walk->nbytes; 178 bool fpu_enabled = false;
222 le128 ctrblk; 179 unsigned int nbytes;
223 u128 tmp; 180 int err;
224 181
225 be128_to_le128(&ctrblk, (be128 *)walk->iv); 182 err = skcipher_walk_virt(&walk, req, false);
226 183
227 memcpy(&tmp, src, nbytes); 184 while ((nbytes = walk.nbytes) >= bsize) {
228 fn_ctr(ctx, &tmp, &tmp, &ctrblk); 185 const u128 *src = walk.src.virt.addr;
229 memcpy(dst, &tmp, nbytes); 186 u128 *dst = walk.dst.virt.addr;
187 unsigned int func_bytes, num_blocks;
188 unsigned int i;
189 le128 ctrblk;
230 190
231 le128_to_be128((be128 *)walk->iv, &ctrblk); 191 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
232} 192 &walk, fpu_enabled, nbytes);
233 193
234static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, 194 be128_to_le128(&ctrblk, (be128 *)walk.iv);
235 struct blkcipher_desc *desc,
236 struct blkcipher_walk *walk)
237{
238 const unsigned int bsize = 128 / 8;
239 void *ctx = crypto_blkcipher_ctx(desc->tfm);
240 unsigned int nbytes = walk->nbytes;
241 u128 *src = (u128 *)walk->src.virt.addr;
242 u128 *dst = (u128 *)walk->dst.virt.addr;
243 le128 ctrblk;
244 unsigned int num_blocks, func_bytes;
245 unsigned int i;
246 195
247 be128_to_le128(&ctrblk, (be128 *)walk->iv); 196 for (i = 0; i < gctx->num_funcs; i++) {
197 num_blocks = gctx->funcs[i].num_blocks;
198 func_bytes = bsize * num_blocks;
248 199
249 /* Process multi-block batch */ 200 if (nbytes < func_bytes)
250 for (i = 0; i < gctx->num_funcs; i++) { 201 continue;
251 num_blocks = gctx->funcs[i].num_blocks;
252 func_bytes = bsize * num_blocks;
253 202
254 if (nbytes >= func_bytes) { 203 /* Process multi-block batch */
255 do { 204 do {
256 gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk); 205 gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
257
258 src += num_blocks; 206 src += num_blocks;
259 dst += num_blocks; 207 dst += num_blocks;
260 nbytes -= func_bytes; 208 nbytes -= func_bytes;
261 } while (nbytes >= func_bytes); 209 } while (nbytes >= func_bytes);
262 210
263 if (nbytes < bsize) 211 if (nbytes < bsize)
264 goto done; 212 break;
265 } 213 }
266 }
267
268done:
269 le128_to_be128((be128 *)walk->iv, &ctrblk);
270 return nbytes;
271}
272
273int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
274 struct blkcipher_desc *desc, struct scatterlist *dst,
275 struct scatterlist *src, unsigned int nbytes)
276{
277 const unsigned int bsize = 128 / 8;
278 bool fpu_enabled = false;
279 struct blkcipher_walk walk;
280 int err;
281
282 blkcipher_walk_init(&walk, dst, src, nbytes);
283 err = blkcipher_walk_virt_block(desc, &walk, bsize);
284 214
285 while ((nbytes = walk.nbytes) >= bsize) { 215 le128_to_be128((be128 *)walk.iv, &ctrblk);
286 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, 216 err = skcipher_walk_done(&walk, nbytes);
287 desc, fpu_enabled, nbytes);
288 nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
289 err = blkcipher_walk_done(desc, &walk, nbytes);
290 } 217 }
291 218
292 glue_fpu_end(fpu_enabled); 219 glue_fpu_end(fpu_enabled);
293 220
294 if (walk.nbytes) { 221 if (nbytes) {
295 glue_ctr_crypt_final_128bit( 222 le128 ctrblk;
296 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); 223 u128 tmp;
297 err = blkcipher_walk_done(desc, &walk, 0);
298 }
299
300 return err;
301}
302EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
303
304static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
305 void *ctx,
306 struct blkcipher_desc *desc,
307 struct blkcipher_walk *walk)
308{
309 const unsigned int bsize = 128 / 8;
310 unsigned int nbytes = walk->nbytes;
311 u128 *src = (u128 *)walk->src.virt.addr;
312 u128 *dst = (u128 *)walk->dst.virt.addr;
313 unsigned int num_blocks, func_bytes;
314 unsigned int i;
315
316 /* Process multi-block batch */
317 for (i = 0; i < gctx->num_funcs; i++) {
318 num_blocks = gctx->funcs[i].num_blocks;
319 func_bytes = bsize * num_blocks;
320
321 if (nbytes >= func_bytes) {
322 do {
323 gctx->funcs[i].fn_u.xts(ctx, dst, src,
324 (le128 *)walk->iv);
325 224
326 src += num_blocks; 225 be128_to_le128(&ctrblk, (be128 *)walk.iv);
327 dst += num_blocks; 226 memcpy(&tmp, walk.src.virt.addr, nbytes);
328 nbytes -= func_bytes; 227 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
329 } while (nbytes >= func_bytes); 228 &ctrblk);
229 memcpy(walk.dst.virt.addr, &tmp, nbytes);
230 le128_to_be128((be128 *)walk.iv, &ctrblk);
330 231
331 if (nbytes < bsize) 232 err = skcipher_walk_done(&walk, 0);
332 goto done;
333 }
334 } 233 }
335 234
336done: 235 return err;
337 return nbytes;
338} 236}
237EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
339 238
340static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx, 239static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
341 void *ctx, 240 void *ctx,
@@ -372,46 +271,6 @@ done:
372 return nbytes; 271 return nbytes;
373} 272}
374 273
375/* for implementations implementing faster XTS IV generator */
376int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
377 struct blkcipher_desc *desc, struct scatterlist *dst,
378 struct scatterlist *src, unsigned int nbytes,
379 void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
380 void *tweak_ctx, void *crypt_ctx)
381{
382 const unsigned int bsize = 128 / 8;
383 bool fpu_enabled = false;
384 struct blkcipher_walk walk;
385 int err;
386
387 blkcipher_walk_init(&walk, dst, src, nbytes);
388
389 err = blkcipher_walk_virt(desc, &walk);
390 nbytes = walk.nbytes;
391 if (!nbytes)
392 return err;
393
394 /* set minimum length to bsize, for tweak_fn */
395 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
396 desc, fpu_enabled,
397 nbytes < bsize ? bsize : nbytes);
398
399 /* calculate first value of T */
400 tweak_fn(tweak_ctx, walk.iv, walk.iv);
401
402 while (nbytes) {
403 nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
404
405 err = blkcipher_walk_done(desc, &walk, nbytes);
406 nbytes = walk.nbytes;
407 }
408
409 glue_fpu_end(fpu_enabled);
410
411 return err;
412}
413EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
414
415int glue_xts_req_128bit(const struct common_glue_ctx *gctx, 274int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
416 struct skcipher_request *req, 275 struct skcipher_request *req,
417 common_glue_func_t tweak_fn, void *tweak_ctx, 276 common_glue_func_t tweak_fn, void *tweak_ctx,
@@ -429,9 +288,9 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
429 return err; 288 return err;
430 289
431 /* set minimum length to bsize, for tweak_fn */ 290 /* set minimum length to bsize, for tweak_fn */
432 fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit, 291 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
433 &walk, fpu_enabled, 292 &walk, fpu_enabled,
434 nbytes < bsize ? bsize : nbytes); 293 nbytes < bsize ? bsize : nbytes);
435 294
436 /* calculate first value of T */ 295 /* calculate first value of T */
437 tweak_fn(tweak_ctx, walk.iv, walk.iv); 296 tweak_fn(tweak_ctx, walk.iv, walk.iv);
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 870f6d812a2d..03347b16ac9d 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -14,15 +14,12 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <crypto/ablk_helper.h>
18#include <crypto/algapi.h> 17#include <crypto/algapi.h>
19#include <crypto/ctr.h> 18#include <crypto/internal/simd.h>
20#include <crypto/lrw.h>
21#include <crypto/xts.h>
22#include <crypto/serpent.h> 19#include <crypto/serpent.h>
23#include <asm/fpu/api.h> 20#include <crypto/xts.h>
24#include <asm/crypto/serpent-avx.h>
25#include <asm/crypto/glue_helper.h> 21#include <asm/crypto/glue_helper.h>
22#include <asm/crypto/serpent-avx.h>
26 23
27#define SERPENT_AVX2_PARALLEL_BLOCKS 16 24#define SERPENT_AVX2_PARALLEL_BLOCKS 16
28 25
@@ -40,6 +37,12 @@ asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
40asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst, 37asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
41 const u8 *src, le128 *iv); 38 const u8 *src, le128 *iv);
42 39
40static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
41 const u8 *key, unsigned int keylen)
42{
43 return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
44}
45
43static const struct common_glue_ctx serpent_enc = { 46static const struct common_glue_ctx serpent_enc = {
44 .num_funcs = 3, 47 .num_funcs = 3,
45 .fpu_blocks_limit = 8, 48 .fpu_blocks_limit = 8,
@@ -136,403 +139,113 @@ static const struct common_glue_ctx serpent_dec_xts = {
136 } } 139 } }
137}; 140};
138 141
139static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 142static int ecb_encrypt(struct skcipher_request *req)
140 struct scatterlist *src, unsigned int nbytes)
141{ 143{
142 return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes); 144 return glue_ecb_req_128bit(&serpent_enc, req);
143} 145}
144 146
145static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 147static int ecb_decrypt(struct skcipher_request *req)
146 struct scatterlist *src, unsigned int nbytes)
147{ 148{
148 return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes); 149 return glue_ecb_req_128bit(&serpent_dec, req);
149} 150}
150 151
151static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 152static int cbc_encrypt(struct skcipher_request *req)
152 struct scatterlist *src, unsigned int nbytes)
153{ 153{
154 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc, 154 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
155 dst, src, nbytes); 155 req);
156} 156}
157 157
158static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 158static int cbc_decrypt(struct skcipher_request *req)
159 struct scatterlist *src, unsigned int nbytes)
160{ 159{
161 return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src, 160 return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
162 nbytes);
163} 161}
164 162
165static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 163static int ctr_crypt(struct skcipher_request *req)
166 struct scatterlist *src, unsigned int nbytes)
167{ 164{
168 return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes); 165 return glue_ctr_req_128bit(&serpent_ctr, req);
169} 166}
170 167
171static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes) 168static int xts_encrypt(struct skcipher_request *req)
172{ 169{
173 /* since reusing AVX functions, starts using FPU at 8 parallel blocks */ 170 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
174 return glue_fpu_begin(SERPENT_BLOCK_SIZE, 8, NULL, fpu_enabled, nbytes); 171 struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
175}
176 172
177static inline void serpent_fpu_end(bool fpu_enabled) 173 return glue_xts_req_128bit(&serpent_enc_xts, req,
178{ 174 XTS_TWEAK_CAST(__serpent_encrypt),
179 glue_fpu_end(fpu_enabled); 175 &ctx->tweak_ctx, &ctx->crypt_ctx);
180} 176}
181 177
182struct crypt_priv { 178static int xts_decrypt(struct skcipher_request *req)
183 struct serpent_ctx *ctx;
184 bool fpu_enabled;
185};
186
187static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
188{ 179{
189 const unsigned int bsize = SERPENT_BLOCK_SIZE; 180 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
190 struct crypt_priv *ctx = priv; 181 struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
191 int i;
192
193 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
194
195 if (nbytes >= SERPENT_AVX2_PARALLEL_BLOCKS * bsize) {
196 serpent_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
197 srcdst += bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
198 nbytes -= bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
199 }
200 182
201 while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) { 183 return glue_xts_req_128bit(&serpent_dec_xts, req,
202 serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst); 184 XTS_TWEAK_CAST(__serpent_encrypt),
203 srcdst += bsize * SERPENT_PARALLEL_BLOCKS; 185 &ctx->tweak_ctx, &ctx->crypt_ctx);
204 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
205 }
206
207 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
208 __serpent_encrypt(ctx->ctx, srcdst, srcdst);
209} 186}
210 187
211static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) 188static struct skcipher_alg serpent_algs[] = {
212{ 189 {
213 const unsigned int bsize = SERPENT_BLOCK_SIZE; 190 .base.cra_name = "__ecb(serpent)",
214 struct crypt_priv *ctx = priv; 191 .base.cra_driver_name = "__ecb-serpent-avx2",
215 int i; 192 .base.cra_priority = 600,
216 193 .base.cra_flags = CRYPTO_ALG_INTERNAL,
217 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); 194 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
218 195 .base.cra_ctxsize = sizeof(struct serpent_ctx),
219 if (nbytes >= SERPENT_AVX2_PARALLEL_BLOCKS * bsize) { 196 .base.cra_module = THIS_MODULE,
220 serpent_ecb_dec_16way(ctx->ctx, srcdst, srcdst); 197 .min_keysize = SERPENT_MIN_KEY_SIZE,
221 srcdst += bsize * SERPENT_AVX2_PARALLEL_BLOCKS; 198 .max_keysize = SERPENT_MAX_KEY_SIZE,
222 nbytes -= bsize * SERPENT_AVX2_PARALLEL_BLOCKS; 199 .setkey = serpent_setkey_skcipher,
223 } 200 .encrypt = ecb_encrypt,
224 201 .decrypt = ecb_decrypt,
225 while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) { 202 }, {
226 serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst); 203 .base.cra_name = "__cbc(serpent)",
227 srcdst += bsize * SERPENT_PARALLEL_BLOCKS; 204 .base.cra_driver_name = "__cbc-serpent-avx2",
228 nbytes -= bsize * SERPENT_PARALLEL_BLOCKS; 205 .base.cra_priority = 600,
229 } 206 .base.cra_flags = CRYPTO_ALG_INTERNAL,
230 207 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
231 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) 208 .base.cra_ctxsize = sizeof(struct serpent_ctx),
232 __serpent_decrypt(ctx->ctx, srcdst, srcdst); 209 .base.cra_module = THIS_MODULE,
233} 210 .min_keysize = SERPENT_MIN_KEY_SIZE,
234 211 .max_keysize = SERPENT_MAX_KEY_SIZE,
235static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 212 .ivsize = SERPENT_BLOCK_SIZE,
236 struct scatterlist *src, unsigned int nbytes) 213 .setkey = serpent_setkey_skcipher,
237{ 214 .encrypt = cbc_encrypt,
238 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 215 .decrypt = cbc_decrypt,
239 be128 buf[SERPENT_AVX2_PARALLEL_BLOCKS]; 216 }, {
240 struct crypt_priv crypt_ctx = { 217 .base.cra_name = "__ctr(serpent)",
241 .ctx = &ctx->serpent_ctx, 218 .base.cra_driver_name = "__ctr-serpent-avx2",
242 .fpu_enabled = false, 219 .base.cra_priority = 600,
243 }; 220 .base.cra_flags = CRYPTO_ALG_INTERNAL,
244 struct lrw_crypt_req req = { 221 .base.cra_blocksize = 1,
245 .tbuf = buf, 222 .base.cra_ctxsize = sizeof(struct serpent_ctx),
246 .tbuflen = sizeof(buf), 223 .base.cra_module = THIS_MODULE,
247 224 .min_keysize = SERPENT_MIN_KEY_SIZE,
248 .table_ctx = &ctx->lrw_table, 225 .max_keysize = SERPENT_MAX_KEY_SIZE,
249 .crypt_ctx = &crypt_ctx, 226 .ivsize = SERPENT_BLOCK_SIZE,
250 .crypt_fn = encrypt_callback, 227 .chunksize = SERPENT_BLOCK_SIZE,
251 }; 228 .setkey = serpent_setkey_skcipher,
252 int ret; 229 .encrypt = ctr_crypt,
253 230 .decrypt = ctr_crypt,
254 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 231 }, {
255 ret = lrw_crypt(desc, dst, src, nbytes, &req); 232 .base.cra_name = "__xts(serpent)",
256 serpent_fpu_end(crypt_ctx.fpu_enabled); 233 .base.cra_driver_name = "__xts-serpent-avx2",
257 234 .base.cra_priority = 600,
258 return ret; 235 .base.cra_flags = CRYPTO_ALG_INTERNAL,
259} 236 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
260 237 .base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
261static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 238 .base.cra_module = THIS_MODULE,
262 struct scatterlist *src, unsigned int nbytes) 239 .min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
263{ 240 .max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
264 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 241 .ivsize = SERPENT_BLOCK_SIZE,
265 be128 buf[SERPENT_AVX2_PARALLEL_BLOCKS]; 242 .setkey = xts_serpent_setkey,
266 struct crypt_priv crypt_ctx = { 243 .encrypt = xts_encrypt,
267 .ctx = &ctx->serpent_ctx, 244 .decrypt = xts_decrypt,
268 .fpu_enabled = false,
269 };
270 struct lrw_crypt_req req = {
271 .tbuf = buf,
272 .tbuflen = sizeof(buf),
273
274 .table_ctx = &ctx->lrw_table,
275 .crypt_ctx = &crypt_ctx,
276 .crypt_fn = decrypt_callback,
277 };
278 int ret;
279
280 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
281 ret = lrw_crypt(desc, dst, src, nbytes, &req);
282 serpent_fpu_end(crypt_ctx.fpu_enabled);
283
284 return ret;
285}
286
287static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
288 struct scatterlist *src, unsigned int nbytes)
289{
290 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
291
292 return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
293 XTS_TWEAK_CAST(__serpent_encrypt),
294 &ctx->tweak_ctx, &ctx->crypt_ctx);
295}
296
297static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
298 struct scatterlist *src, unsigned int nbytes)
299{
300 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
301
302 return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
303 XTS_TWEAK_CAST(__serpent_encrypt),
304 &ctx->tweak_ctx, &ctx->crypt_ctx);
305}
306
307static struct crypto_alg srp_algs[10] = { {
308 .cra_name = "__ecb-serpent-avx2",
309 .cra_driver_name = "__driver-ecb-serpent-avx2",
310 .cra_priority = 0,
311 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
312 CRYPTO_ALG_INTERNAL,
313 .cra_blocksize = SERPENT_BLOCK_SIZE,
314 .cra_ctxsize = sizeof(struct serpent_ctx),
315 .cra_alignmask = 0,
316 .cra_type = &crypto_blkcipher_type,
317 .cra_module = THIS_MODULE,
318 .cra_list = LIST_HEAD_INIT(srp_algs[0].cra_list),
319 .cra_u = {
320 .blkcipher = {
321 .min_keysize = SERPENT_MIN_KEY_SIZE,
322 .max_keysize = SERPENT_MAX_KEY_SIZE,
323 .setkey = serpent_setkey,
324 .encrypt = ecb_encrypt,
325 .decrypt = ecb_decrypt,
326 },
327 },
328}, {
329 .cra_name = "__cbc-serpent-avx2",
330 .cra_driver_name = "__driver-cbc-serpent-avx2",
331 .cra_priority = 0,
332 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
333 CRYPTO_ALG_INTERNAL,
334 .cra_blocksize = SERPENT_BLOCK_SIZE,
335 .cra_ctxsize = sizeof(struct serpent_ctx),
336 .cra_alignmask = 0,
337 .cra_type = &crypto_blkcipher_type,
338 .cra_module = THIS_MODULE,
339 .cra_list = LIST_HEAD_INIT(srp_algs[1].cra_list),
340 .cra_u = {
341 .blkcipher = {
342 .min_keysize = SERPENT_MIN_KEY_SIZE,
343 .max_keysize = SERPENT_MAX_KEY_SIZE,
344 .setkey = serpent_setkey,
345 .encrypt = cbc_encrypt,
346 .decrypt = cbc_decrypt,
347 },
348 },
349}, {
350 .cra_name = "__ctr-serpent-avx2",
351 .cra_driver_name = "__driver-ctr-serpent-avx2",
352 .cra_priority = 0,
353 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
354 CRYPTO_ALG_INTERNAL,
355 .cra_blocksize = 1,
356 .cra_ctxsize = sizeof(struct serpent_ctx),
357 .cra_alignmask = 0,
358 .cra_type = &crypto_blkcipher_type,
359 .cra_module = THIS_MODULE,
360 .cra_list = LIST_HEAD_INIT(srp_algs[2].cra_list),
361 .cra_u = {
362 .blkcipher = {
363 .min_keysize = SERPENT_MIN_KEY_SIZE,
364 .max_keysize = SERPENT_MAX_KEY_SIZE,
365 .ivsize = SERPENT_BLOCK_SIZE,
366 .setkey = serpent_setkey,
367 .encrypt = ctr_crypt,
368 .decrypt = ctr_crypt,
369 },
370 },
371}, {
372 .cra_name = "__lrw-serpent-avx2",
373 .cra_driver_name = "__driver-lrw-serpent-avx2",
374 .cra_priority = 0,
375 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
376 CRYPTO_ALG_INTERNAL,
377 .cra_blocksize = SERPENT_BLOCK_SIZE,
378 .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
379 .cra_alignmask = 0,
380 .cra_type = &crypto_blkcipher_type,
381 .cra_module = THIS_MODULE,
382 .cra_list = LIST_HEAD_INIT(srp_algs[3].cra_list),
383 .cra_exit = lrw_serpent_exit_tfm,
384 .cra_u = {
385 .blkcipher = {
386 .min_keysize = SERPENT_MIN_KEY_SIZE +
387 SERPENT_BLOCK_SIZE,
388 .max_keysize = SERPENT_MAX_KEY_SIZE +
389 SERPENT_BLOCK_SIZE,
390 .ivsize = SERPENT_BLOCK_SIZE,
391 .setkey = lrw_serpent_setkey,
392 .encrypt = lrw_encrypt,
393 .decrypt = lrw_decrypt,
394 },
395 },
396}, {
397 .cra_name = "__xts-serpent-avx2",
398 .cra_driver_name = "__driver-xts-serpent-avx2",
399 .cra_priority = 0,
400 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
401 CRYPTO_ALG_INTERNAL,
402 .cra_blocksize = SERPENT_BLOCK_SIZE,
403 .cra_ctxsize = sizeof(struct serpent_xts_ctx),
404 .cra_alignmask = 0,
405 .cra_type = &crypto_blkcipher_type,
406 .cra_module = THIS_MODULE,
407 .cra_list = LIST_HEAD_INIT(srp_algs[4].cra_list),
408 .cra_u = {
409 .blkcipher = {
410 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
411 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
412 .ivsize = SERPENT_BLOCK_SIZE,
413 .setkey = xts_serpent_setkey,
414 .encrypt = xts_encrypt,
415 .decrypt = xts_decrypt,
416 },
417 },
418}, {
419 .cra_name = "ecb(serpent)",
420 .cra_driver_name = "ecb-serpent-avx2",
421 .cra_priority = 600,
422 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
423 .cra_blocksize = SERPENT_BLOCK_SIZE,
424 .cra_ctxsize = sizeof(struct async_helper_ctx),
425 .cra_alignmask = 0,
426 .cra_type = &crypto_ablkcipher_type,
427 .cra_module = THIS_MODULE,
428 .cra_list = LIST_HEAD_INIT(srp_algs[5].cra_list),
429 .cra_init = ablk_init,
430 .cra_exit = ablk_exit,
431 .cra_u = {
432 .ablkcipher = {
433 .min_keysize = SERPENT_MIN_KEY_SIZE,
434 .max_keysize = SERPENT_MAX_KEY_SIZE,
435 .setkey = ablk_set_key,
436 .encrypt = ablk_encrypt,
437 .decrypt = ablk_decrypt,
438 },
439 },
440}, {
441 .cra_name = "cbc(serpent)",
442 .cra_driver_name = "cbc-serpent-avx2",
443 .cra_priority = 600,
444 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
445 .cra_blocksize = SERPENT_BLOCK_SIZE,
446 .cra_ctxsize = sizeof(struct async_helper_ctx),
447 .cra_alignmask = 0,
448 .cra_type = &crypto_ablkcipher_type,
449 .cra_module = THIS_MODULE,
450 .cra_list = LIST_HEAD_INIT(srp_algs[6].cra_list),
451 .cra_init = ablk_init,
452 .cra_exit = ablk_exit,
453 .cra_u = {
454 .ablkcipher = {
455 .min_keysize = SERPENT_MIN_KEY_SIZE,
456 .max_keysize = SERPENT_MAX_KEY_SIZE,
457 .ivsize = SERPENT_BLOCK_SIZE,
458 .setkey = ablk_set_key,
459 .encrypt = __ablk_encrypt,
460 .decrypt = ablk_decrypt,
461 },
462 },
463}, {
464 .cra_name = "ctr(serpent)",
465 .cra_driver_name = "ctr-serpent-avx2",
466 .cra_priority = 600,
467 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
468 .cra_blocksize = 1,
469 .cra_ctxsize = sizeof(struct async_helper_ctx),
470 .cra_alignmask = 0,
471 .cra_type = &crypto_ablkcipher_type,
472 .cra_module = THIS_MODULE,
473 .cra_list = LIST_HEAD_INIT(srp_algs[7].cra_list),
474 .cra_init = ablk_init,
475 .cra_exit = ablk_exit,
476 .cra_u = {
477 .ablkcipher = {
478 .min_keysize = SERPENT_MIN_KEY_SIZE,
479 .max_keysize = SERPENT_MAX_KEY_SIZE,
480 .ivsize = SERPENT_BLOCK_SIZE,
481 .setkey = ablk_set_key,
482 .encrypt = ablk_encrypt,
483 .decrypt = ablk_encrypt,
484 .geniv = "chainiv",
485 },
486 },
487}, {
488 .cra_name = "lrw(serpent)",
489 .cra_driver_name = "lrw-serpent-avx2",
490 .cra_priority = 600,
491 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
492 .cra_blocksize = SERPENT_BLOCK_SIZE,
493 .cra_ctxsize = sizeof(struct async_helper_ctx),
494 .cra_alignmask = 0,
495 .cra_type = &crypto_ablkcipher_type,
496 .cra_module = THIS_MODULE,
497 .cra_list = LIST_HEAD_INIT(srp_algs[8].cra_list),
498 .cra_init = ablk_init,
499 .cra_exit = ablk_exit,
500 .cra_u = {
501 .ablkcipher = {
502 .min_keysize = SERPENT_MIN_KEY_SIZE +
503 SERPENT_BLOCK_SIZE,
504 .max_keysize = SERPENT_MAX_KEY_SIZE +
505 SERPENT_BLOCK_SIZE,
506 .ivsize = SERPENT_BLOCK_SIZE,
507 .setkey = ablk_set_key,
508 .encrypt = ablk_encrypt,
509 .decrypt = ablk_decrypt,
510 },
511 },
512}, {
513 .cra_name = "xts(serpent)",
514 .cra_driver_name = "xts-serpent-avx2",
515 .cra_priority = 600,
516 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
517 .cra_blocksize = SERPENT_BLOCK_SIZE,
518 .cra_ctxsize = sizeof(struct async_helper_ctx),
519 .cra_alignmask = 0,
520 .cra_type = &crypto_ablkcipher_type,
521 .cra_module = THIS_MODULE,
522 .cra_list = LIST_HEAD_INIT(srp_algs[9].cra_list),
523 .cra_init = ablk_init,
524 .cra_exit = ablk_exit,
525 .cra_u = {
526 .ablkcipher = {
527 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
528 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
529 .ivsize = SERPENT_BLOCK_SIZE,
530 .setkey = ablk_set_key,
531 .encrypt = ablk_encrypt,
532 .decrypt = ablk_decrypt,
533 },
534 }, 245 },
535} }; 246};
247
248static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
536 249
537static int __init init(void) 250static int __init init(void)
538{ 251{
@@ -548,12 +261,15 @@ static int __init init(void)
548 return -ENODEV; 261 return -ENODEV;
549 } 262 }
550 263
551 return crypto_register_algs(srp_algs, ARRAY_SIZE(srp_algs)); 264 return simd_register_skciphers_compat(serpent_algs,
265 ARRAY_SIZE(serpent_algs),
266 serpent_simd_algs);
552} 267}
553 268
554static void __exit fini(void) 269static void __exit fini(void)
555{ 270{
556 crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs)); 271 simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
272 serpent_simd_algs);
557} 273}
558 274
559module_init(init); 275module_init(init);
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 6f778d3daa22..458567ecf76c 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -24,21 +24,15 @@
24 */ 24 */
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/hardirq.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/crypto.h> 28#include <linux/crypto.h>
30#include <linux/err.h> 29#include <linux/err.h>
31#include <crypto/ablk_helper.h>
32#include <crypto/algapi.h> 30#include <crypto/algapi.h>
31#include <crypto/internal/simd.h>
33#include <crypto/serpent.h> 32#include <crypto/serpent.h>
34#include <crypto/cryptd.h>
35#include <crypto/b128ops.h>
36#include <crypto/ctr.h>
37#include <crypto/lrw.h>
38#include <crypto/xts.h> 33#include <crypto/xts.h>
39#include <asm/fpu/api.h>
40#include <asm/crypto/serpent-avx.h>
41#include <asm/crypto/glue_helper.h> 34#include <asm/crypto/glue_helper.h>
35#include <asm/crypto/serpent-avx.h>
42 36
43/* 8-way parallel cipher functions */ 37/* 8-way parallel cipher functions */
44asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, 38asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
@@ -91,6 +85,31 @@ void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
91} 85}
92EXPORT_SYMBOL_GPL(serpent_xts_dec); 86EXPORT_SYMBOL_GPL(serpent_xts_dec);
93 87
88static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
89 const u8 *key, unsigned int keylen)
90{
91 return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
92}
93
94int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
95 unsigned int keylen)
96{
97 struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
98 int err;
99
100 err = xts_verify_key(tfm, key, keylen);
101 if (err)
102 return err;
103
104 /* first half of xts-key is for crypt */
105 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
106 if (err)
107 return err;
108
109 /* second half of xts-key is for tweak */
110 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
111}
112EXPORT_SYMBOL_GPL(xts_serpent_setkey);
94 113
95static const struct common_glue_ctx serpent_enc = { 114static const struct common_glue_ctx serpent_enc = {
96 .num_funcs = 2, 115 .num_funcs = 2,
@@ -170,423 +189,113 @@ static const struct common_glue_ctx serpent_dec_xts = {
170 } } 189 } }
171}; 190};
172 191
173static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 192static int ecb_encrypt(struct skcipher_request *req)
174 struct scatterlist *src, unsigned int nbytes)
175{ 193{
176 return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes); 194 return glue_ecb_req_128bit(&serpent_enc, req);
177} 195}
178 196
179static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 197static int ecb_decrypt(struct skcipher_request *req)
180 struct scatterlist *src, unsigned int nbytes)
181{ 198{
182 return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes); 199 return glue_ecb_req_128bit(&serpent_dec, req);
183} 200}
184 201
185static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 202static int cbc_encrypt(struct skcipher_request *req)
186 struct scatterlist *src, unsigned int nbytes)
187{ 203{
188 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc, 204 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
189 dst, src, nbytes); 205 req);
190} 206}
191 207
192static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 208static int cbc_decrypt(struct skcipher_request *req)
193 struct scatterlist *src, unsigned int nbytes)
194{ 209{
195 return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src, 210 return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
196 nbytes);
197} 211}
198 212
199static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 213static int ctr_crypt(struct skcipher_request *req)
200 struct scatterlist *src, unsigned int nbytes)
201{ 214{
202 return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes); 215 return glue_ctr_req_128bit(&serpent_ctr, req);
203} 216}
204 217
205static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes) 218static int xts_encrypt(struct skcipher_request *req)
206{ 219{
207 return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS, 220 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
208 NULL, fpu_enabled, nbytes); 221 struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
209}
210 222
211static inline void serpent_fpu_end(bool fpu_enabled) 223 return glue_xts_req_128bit(&serpent_enc_xts, req,
212{ 224 XTS_TWEAK_CAST(__serpent_encrypt),
213 glue_fpu_end(fpu_enabled); 225 &ctx->tweak_ctx, &ctx->crypt_ctx);
214} 226}
215 227
216struct crypt_priv { 228static int xts_decrypt(struct skcipher_request *req)
217 struct serpent_ctx *ctx;
218 bool fpu_enabled;
219};
220
221static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
222{
223 const unsigned int bsize = SERPENT_BLOCK_SIZE;
224 struct crypt_priv *ctx = priv;
225 int i;
226
227 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
228
229 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
230 serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
231 return;
232 }
233
234 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
235 __serpent_encrypt(ctx->ctx, srcdst, srcdst);
236}
237
238static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
239{ 229{
240 const unsigned int bsize = SERPENT_BLOCK_SIZE; 230 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
241 struct crypt_priv *ctx = priv; 231 struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
242 int i;
243
244 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
245 232
246 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) { 233 return glue_xts_req_128bit(&serpent_dec_xts, req,
247 serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst); 234 XTS_TWEAK_CAST(__serpent_encrypt),
248 return; 235 &ctx->tweak_ctx, &ctx->crypt_ctx);
249 }
250
251 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
252 __serpent_decrypt(ctx->ctx, srcdst, srcdst);
253}
254
255int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
256 unsigned int keylen)
257{
258 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
259 int err;
260
261 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
262 SERPENT_BLOCK_SIZE);
263 if (err)
264 return err;
265
266 return lrw_init_table(&ctx->lrw_table, key + keylen -
267 SERPENT_BLOCK_SIZE);
268}
269EXPORT_SYMBOL_GPL(lrw_serpent_setkey);
270
271static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
272 struct scatterlist *src, unsigned int nbytes)
273{
274 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
275 be128 buf[SERPENT_PARALLEL_BLOCKS];
276 struct crypt_priv crypt_ctx = {
277 .ctx = &ctx->serpent_ctx,
278 .fpu_enabled = false,
279 };
280 struct lrw_crypt_req req = {
281 .tbuf = buf,
282 .tbuflen = sizeof(buf),
283
284 .table_ctx = &ctx->lrw_table,
285 .crypt_ctx = &crypt_ctx,
286 .crypt_fn = encrypt_callback,
287 };
288 int ret;
289
290 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
291 ret = lrw_crypt(desc, dst, src, nbytes, &req);
292 serpent_fpu_end(crypt_ctx.fpu_enabled);
293
294 return ret;
295} 236}
296 237
297static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 238static struct skcipher_alg serpent_algs[] = {
298 struct scatterlist *src, unsigned int nbytes) 239 {
299{ 240 .base.cra_name = "__ecb(serpent)",
300 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 241 .base.cra_driver_name = "__ecb-serpent-avx",
301 be128 buf[SERPENT_PARALLEL_BLOCKS]; 242 .base.cra_priority = 500,
302 struct crypt_priv crypt_ctx = { 243 .base.cra_flags = CRYPTO_ALG_INTERNAL,
303 .ctx = &ctx->serpent_ctx, 244 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
304 .fpu_enabled = false, 245 .base.cra_ctxsize = sizeof(struct serpent_ctx),
305 }; 246 .base.cra_module = THIS_MODULE,
306 struct lrw_crypt_req req = { 247 .min_keysize = SERPENT_MIN_KEY_SIZE,
307 .tbuf = buf, 248 .max_keysize = SERPENT_MAX_KEY_SIZE,
308 .tbuflen = sizeof(buf), 249 .setkey = serpent_setkey_skcipher,
309 250 .encrypt = ecb_encrypt,
310 .table_ctx = &ctx->lrw_table, 251 .decrypt = ecb_decrypt,
311 .crypt_ctx = &crypt_ctx, 252 }, {
312 .crypt_fn = decrypt_callback, 253 .base.cra_name = "__cbc(serpent)",
313 }; 254 .base.cra_driver_name = "__cbc-serpent-avx",
314 int ret; 255 .base.cra_priority = 500,
315 256 .base.cra_flags = CRYPTO_ALG_INTERNAL,
316 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 257 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
317 ret = lrw_crypt(desc, dst, src, nbytes, &req); 258 .base.cra_ctxsize = sizeof(struct serpent_ctx),
318 serpent_fpu_end(crypt_ctx.fpu_enabled); 259 .base.cra_module = THIS_MODULE,
319 260 .min_keysize = SERPENT_MIN_KEY_SIZE,
320 return ret; 261 .max_keysize = SERPENT_MAX_KEY_SIZE,
321} 262 .ivsize = SERPENT_BLOCK_SIZE,
322 263 .setkey = serpent_setkey_skcipher,
323void lrw_serpent_exit_tfm(struct crypto_tfm *tfm) 264 .encrypt = cbc_encrypt,
324{ 265 .decrypt = cbc_decrypt,
325 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm); 266 }, {
326 267 .base.cra_name = "__ctr(serpent)",
327 lrw_free_table(&ctx->lrw_table); 268 .base.cra_driver_name = "__ctr-serpent-avx",
328} 269 .base.cra_priority = 500,
329EXPORT_SYMBOL_GPL(lrw_serpent_exit_tfm); 270 .base.cra_flags = CRYPTO_ALG_INTERNAL,
330 271 .base.cra_blocksize = 1,
331int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key, 272 .base.cra_ctxsize = sizeof(struct serpent_ctx),
332 unsigned int keylen) 273 .base.cra_module = THIS_MODULE,
333{ 274 .min_keysize = SERPENT_MIN_KEY_SIZE,
334 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm); 275 .max_keysize = SERPENT_MAX_KEY_SIZE,
335 int err; 276 .ivsize = SERPENT_BLOCK_SIZE,
336 277 .chunksize = SERPENT_BLOCK_SIZE,
337 err = xts_check_key(tfm, key, keylen); 278 .setkey = serpent_setkey_skcipher,
338 if (err) 279 .encrypt = ctr_crypt,
339 return err; 280 .decrypt = ctr_crypt,
340 281 }, {
341 /* first half of xts-key is for crypt */ 282 .base.cra_name = "__xts(serpent)",
342 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2); 283 .base.cra_driver_name = "__xts-serpent-avx",
343 if (err) 284 .base.cra_priority = 500,
344 return err; 285 .base.cra_flags = CRYPTO_ALG_INTERNAL,
345 286 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
346 /* second half of xts-key is for tweak */ 287 .base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
347 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); 288 .base.cra_module = THIS_MODULE,
348} 289 .min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
349EXPORT_SYMBOL_GPL(xts_serpent_setkey); 290 .max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
350 291 .ivsize = SERPENT_BLOCK_SIZE,
351static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 292 .setkey = xts_serpent_setkey,
352 struct scatterlist *src, unsigned int nbytes) 293 .encrypt = xts_encrypt,
353{ 294 .decrypt = xts_decrypt,
354 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
355
356 return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
357 XTS_TWEAK_CAST(__serpent_encrypt),
358 &ctx->tweak_ctx, &ctx->crypt_ctx);
359}
360
361static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
362 struct scatterlist *src, unsigned int nbytes)
363{
364 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
365
366 return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
367 XTS_TWEAK_CAST(__serpent_encrypt),
368 &ctx->tweak_ctx, &ctx->crypt_ctx);
369}
370
371static struct crypto_alg serpent_algs[10] = { {
372 .cra_name = "__ecb-serpent-avx",
373 .cra_driver_name = "__driver-ecb-serpent-avx",
374 .cra_priority = 0,
375 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
376 CRYPTO_ALG_INTERNAL,
377 .cra_blocksize = SERPENT_BLOCK_SIZE,
378 .cra_ctxsize = sizeof(struct serpent_ctx),
379 .cra_alignmask = 0,
380 .cra_type = &crypto_blkcipher_type,
381 .cra_module = THIS_MODULE,
382 .cra_u = {
383 .blkcipher = {
384 .min_keysize = SERPENT_MIN_KEY_SIZE,
385 .max_keysize = SERPENT_MAX_KEY_SIZE,
386 .setkey = serpent_setkey,
387 .encrypt = ecb_encrypt,
388 .decrypt = ecb_decrypt,
389 },
390 },
391}, {
392 .cra_name = "__cbc-serpent-avx",
393 .cra_driver_name = "__driver-cbc-serpent-avx",
394 .cra_priority = 0,
395 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
396 CRYPTO_ALG_INTERNAL,
397 .cra_blocksize = SERPENT_BLOCK_SIZE,
398 .cra_ctxsize = sizeof(struct serpent_ctx),
399 .cra_alignmask = 0,
400 .cra_type = &crypto_blkcipher_type,
401 .cra_module = THIS_MODULE,
402 .cra_u = {
403 .blkcipher = {
404 .min_keysize = SERPENT_MIN_KEY_SIZE,
405 .max_keysize = SERPENT_MAX_KEY_SIZE,
406 .setkey = serpent_setkey,
407 .encrypt = cbc_encrypt,
408 .decrypt = cbc_decrypt,
409 },
410 },
411}, {
412 .cra_name = "__ctr-serpent-avx",
413 .cra_driver_name = "__driver-ctr-serpent-avx",
414 .cra_priority = 0,
415 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
416 CRYPTO_ALG_INTERNAL,
417 .cra_blocksize = 1,
418 .cra_ctxsize = sizeof(struct serpent_ctx),
419 .cra_alignmask = 0,
420 .cra_type = &crypto_blkcipher_type,
421 .cra_module = THIS_MODULE,
422 .cra_u = {
423 .blkcipher = {
424 .min_keysize = SERPENT_MIN_KEY_SIZE,
425 .max_keysize = SERPENT_MAX_KEY_SIZE,
426 .ivsize = SERPENT_BLOCK_SIZE,
427 .setkey = serpent_setkey,
428 .encrypt = ctr_crypt,
429 .decrypt = ctr_crypt,
430 },
431 },
432}, {
433 .cra_name = "__lrw-serpent-avx",
434 .cra_driver_name = "__driver-lrw-serpent-avx",
435 .cra_priority = 0,
436 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
437 CRYPTO_ALG_INTERNAL,
438 .cra_blocksize = SERPENT_BLOCK_SIZE,
439 .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
440 .cra_alignmask = 0,
441 .cra_type = &crypto_blkcipher_type,
442 .cra_module = THIS_MODULE,
443 .cra_exit = lrw_serpent_exit_tfm,
444 .cra_u = {
445 .blkcipher = {
446 .min_keysize = SERPENT_MIN_KEY_SIZE +
447 SERPENT_BLOCK_SIZE,
448 .max_keysize = SERPENT_MAX_KEY_SIZE +
449 SERPENT_BLOCK_SIZE,
450 .ivsize = SERPENT_BLOCK_SIZE,
451 .setkey = lrw_serpent_setkey,
452 .encrypt = lrw_encrypt,
453 .decrypt = lrw_decrypt,
454 },
455 },
456}, {
457 .cra_name = "__xts-serpent-avx",
458 .cra_driver_name = "__driver-xts-serpent-avx",
459 .cra_priority = 0,
460 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
461 CRYPTO_ALG_INTERNAL,
462 .cra_blocksize = SERPENT_BLOCK_SIZE,
463 .cra_ctxsize = sizeof(struct serpent_xts_ctx),
464 .cra_alignmask = 0,
465 .cra_type = &crypto_blkcipher_type,
466 .cra_module = THIS_MODULE,
467 .cra_u = {
468 .blkcipher = {
469 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
470 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
471 .ivsize = SERPENT_BLOCK_SIZE,
472 .setkey = xts_serpent_setkey,
473 .encrypt = xts_encrypt,
474 .decrypt = xts_decrypt,
475 },
476 },
477}, {
478 .cra_name = "ecb(serpent)",
479 .cra_driver_name = "ecb-serpent-avx",
480 .cra_priority = 500,
481 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
482 .cra_blocksize = SERPENT_BLOCK_SIZE,
483 .cra_ctxsize = sizeof(struct async_helper_ctx),
484 .cra_alignmask = 0,
485 .cra_type = &crypto_ablkcipher_type,
486 .cra_module = THIS_MODULE,
487 .cra_init = ablk_init,
488 .cra_exit = ablk_exit,
489 .cra_u = {
490 .ablkcipher = {
491 .min_keysize = SERPENT_MIN_KEY_SIZE,
492 .max_keysize = SERPENT_MAX_KEY_SIZE,
493 .setkey = ablk_set_key,
494 .encrypt = ablk_encrypt,
495 .decrypt = ablk_decrypt,
496 },
497 },
498}, {
499 .cra_name = "cbc(serpent)",
500 .cra_driver_name = "cbc-serpent-avx",
501 .cra_priority = 500,
502 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
503 .cra_blocksize = SERPENT_BLOCK_SIZE,
504 .cra_ctxsize = sizeof(struct async_helper_ctx),
505 .cra_alignmask = 0,
506 .cra_type = &crypto_ablkcipher_type,
507 .cra_module = THIS_MODULE,
508 .cra_init = ablk_init,
509 .cra_exit = ablk_exit,
510 .cra_u = {
511 .ablkcipher = {
512 .min_keysize = SERPENT_MIN_KEY_SIZE,
513 .max_keysize = SERPENT_MAX_KEY_SIZE,
514 .ivsize = SERPENT_BLOCK_SIZE,
515 .setkey = ablk_set_key,
516 .encrypt = __ablk_encrypt,
517 .decrypt = ablk_decrypt,
518 },
519 },
520}, {
521 .cra_name = "ctr(serpent)",
522 .cra_driver_name = "ctr-serpent-avx",
523 .cra_priority = 500,
524 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
525 .cra_blocksize = 1,
526 .cra_ctxsize = sizeof(struct async_helper_ctx),
527 .cra_alignmask = 0,
528 .cra_type = &crypto_ablkcipher_type,
529 .cra_module = THIS_MODULE,
530 .cra_init = ablk_init,
531 .cra_exit = ablk_exit,
532 .cra_u = {
533 .ablkcipher = {
534 .min_keysize = SERPENT_MIN_KEY_SIZE,
535 .max_keysize = SERPENT_MAX_KEY_SIZE,
536 .ivsize = SERPENT_BLOCK_SIZE,
537 .setkey = ablk_set_key,
538 .encrypt = ablk_encrypt,
539 .decrypt = ablk_encrypt,
540 .geniv = "chainiv",
541 },
542 },
543}, {
544 .cra_name = "lrw(serpent)",
545 .cra_driver_name = "lrw-serpent-avx",
546 .cra_priority = 500,
547 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
548 .cra_blocksize = SERPENT_BLOCK_SIZE,
549 .cra_ctxsize = sizeof(struct async_helper_ctx),
550 .cra_alignmask = 0,
551 .cra_type = &crypto_ablkcipher_type,
552 .cra_module = THIS_MODULE,
553 .cra_init = ablk_init,
554 .cra_exit = ablk_exit,
555 .cra_u = {
556 .ablkcipher = {
557 .min_keysize = SERPENT_MIN_KEY_SIZE +
558 SERPENT_BLOCK_SIZE,
559 .max_keysize = SERPENT_MAX_KEY_SIZE +
560 SERPENT_BLOCK_SIZE,
561 .ivsize = SERPENT_BLOCK_SIZE,
562 .setkey = ablk_set_key,
563 .encrypt = ablk_encrypt,
564 .decrypt = ablk_decrypt,
565 },
566 },
567}, {
568 .cra_name = "xts(serpent)",
569 .cra_driver_name = "xts-serpent-avx",
570 .cra_priority = 500,
571 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
572 .cra_blocksize = SERPENT_BLOCK_SIZE,
573 .cra_ctxsize = sizeof(struct async_helper_ctx),
574 .cra_alignmask = 0,
575 .cra_type = &crypto_ablkcipher_type,
576 .cra_module = THIS_MODULE,
577 .cra_init = ablk_init,
578 .cra_exit = ablk_exit,
579 .cra_u = {
580 .ablkcipher = {
581 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
582 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
583 .ivsize = SERPENT_BLOCK_SIZE,
584 .setkey = ablk_set_key,
585 .encrypt = ablk_encrypt,
586 .decrypt = ablk_decrypt,
587 },
588 }, 295 },
589} }; 296};
297
298static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
590 299
591static int __init serpent_init(void) 300static int __init serpent_init(void)
592{ 301{
@@ -598,12 +307,15 @@ static int __init serpent_init(void)
598 return -ENODEV; 307 return -ENODEV;
599 } 308 }
600 309
601 return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs)); 310 return simd_register_skciphers_compat(serpent_algs,
311 ARRAY_SIZE(serpent_algs),
312 serpent_simd_algs);
602} 313}
603 314
604static void __exit serpent_exit(void) 315static void __exit serpent_exit(void)
605{ 316{
606 crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs)); 317 simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
318 serpent_simd_algs);
607} 319}
608 320
609module_init(serpent_init); 321module_init(serpent_init);
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index ac0e831943f5..3dafe137596a 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -30,21 +30,22 @@
30 */ 30 */
31 31
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/hardirq.h>
34#include <linux/types.h> 33#include <linux/types.h>
35#include <linux/crypto.h> 34#include <linux/crypto.h>
36#include <linux/err.h> 35#include <linux/err.h>
37#include <crypto/ablk_helper.h>
38#include <crypto/algapi.h> 36#include <crypto/algapi.h>
39#include <crypto/serpent.h>
40#include <crypto/cryptd.h>
41#include <crypto/b128ops.h> 37#include <crypto/b128ops.h>
42#include <crypto/ctr.h> 38#include <crypto/internal/simd.h>
43#include <crypto/lrw.h> 39#include <crypto/serpent.h>
44#include <crypto/xts.h>
45#include <asm/crypto/serpent-sse2.h> 40#include <asm/crypto/serpent-sse2.h>
46#include <asm/crypto/glue_helper.h> 41#include <asm/crypto/glue_helper.h>
47 42
43static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
44 const u8 *key, unsigned int keylen)
45{
46 return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
47}
48
48static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) 49static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
49{ 50{
50 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; 51 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
@@ -139,464 +140,79 @@ static const struct common_glue_ctx serpent_dec_cbc = {
139 } } 140 } }
140}; 141};
141 142
142static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 143static int ecb_encrypt(struct skcipher_request *req)
143 struct scatterlist *src, unsigned int nbytes)
144{
145 return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
146}
147
148static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
149 struct scatterlist *src, unsigned int nbytes)
150{
151 return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
152}
153
154static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
155 struct scatterlist *src, unsigned int nbytes)
156{ 144{
157 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc, 145 return glue_ecb_req_128bit(&serpent_enc, req);
158 dst, src, nbytes);
159} 146}
160 147
161static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 148static int ecb_decrypt(struct skcipher_request *req)
162 struct scatterlist *src, unsigned int nbytes)
163{ 149{
164 return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src, 150 return glue_ecb_req_128bit(&serpent_dec, req);
165 nbytes);
166} 151}
167 152
168static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 153static int cbc_encrypt(struct skcipher_request *req)
169 struct scatterlist *src, unsigned int nbytes)
170{ 154{
171 return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes); 155 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
156 req);
172} 157}
173 158
174static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes) 159static int cbc_decrypt(struct skcipher_request *req)
175{ 160{
176 return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS, 161 return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
177 NULL, fpu_enabled, nbytes);
178} 162}
179 163
180static inline void serpent_fpu_end(bool fpu_enabled) 164static int ctr_crypt(struct skcipher_request *req)
181{ 165{
182 glue_fpu_end(fpu_enabled); 166 return glue_ctr_req_128bit(&serpent_ctr, req);
183} 167}
184 168
185struct crypt_priv { 169static struct skcipher_alg serpent_algs[] = {
186 struct serpent_ctx *ctx; 170 {
187 bool fpu_enabled; 171 .base.cra_name = "__ecb(serpent)",
188}; 172 .base.cra_driver_name = "__ecb-serpent-sse2",
189 173 .base.cra_priority = 400,
190static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) 174 .base.cra_flags = CRYPTO_ALG_INTERNAL,
191{ 175 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
192 const unsigned int bsize = SERPENT_BLOCK_SIZE; 176 .base.cra_ctxsize = sizeof(struct serpent_ctx),
193 struct crypt_priv *ctx = priv; 177 .base.cra_module = THIS_MODULE,
194 int i; 178 .min_keysize = SERPENT_MIN_KEY_SIZE,
195 179 .max_keysize = SERPENT_MAX_KEY_SIZE,
196 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); 180 .setkey = serpent_setkey_skcipher,
197 181 .encrypt = ecb_encrypt,
198 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) { 182 .decrypt = ecb_decrypt,
199 serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst); 183 }, {
200 return; 184 .base.cra_name = "__cbc(serpent)",
201 } 185 .base.cra_driver_name = "__cbc-serpent-sse2",
202 186 .base.cra_priority = 400,
203 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) 187 .base.cra_flags = CRYPTO_ALG_INTERNAL,
204 __serpent_encrypt(ctx->ctx, srcdst, srcdst); 188 .base.cra_blocksize = SERPENT_BLOCK_SIZE,
205} 189 .base.cra_ctxsize = sizeof(struct serpent_ctx),
206 190 .base.cra_module = THIS_MODULE,
207static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) 191 .min_keysize = SERPENT_MIN_KEY_SIZE,
208{ 192 .max_keysize = SERPENT_MAX_KEY_SIZE,
209 const unsigned int bsize = SERPENT_BLOCK_SIZE; 193 .ivsize = SERPENT_BLOCK_SIZE,
210 struct crypt_priv *ctx = priv; 194 .setkey = serpent_setkey_skcipher,
211 int i; 195 .encrypt = cbc_encrypt,
212 196 .decrypt = cbc_decrypt,
213 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); 197 }, {
214 198 .base.cra_name = "__ctr(serpent)",
215 if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) { 199 .base.cra_driver_name = "__ctr-serpent-sse2",
216 serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst); 200 .base.cra_priority = 400,
217 return; 201 .base.cra_flags = CRYPTO_ALG_INTERNAL,
218 } 202 .base.cra_blocksize = 1,
219 203 .base.cra_ctxsize = sizeof(struct serpent_ctx),
220 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) 204 .base.cra_module = THIS_MODULE,
221 __serpent_decrypt(ctx->ctx, srcdst, srcdst); 205 .min_keysize = SERPENT_MIN_KEY_SIZE,
222} 206 .max_keysize = SERPENT_MAX_KEY_SIZE,
223 207 .ivsize = SERPENT_BLOCK_SIZE,
224struct serpent_lrw_ctx { 208 .chunksize = SERPENT_BLOCK_SIZE,
225 struct lrw_table_ctx lrw_table; 209 .setkey = serpent_setkey_skcipher,
226 struct serpent_ctx serpent_ctx; 210 .encrypt = ctr_crypt,
227}; 211 .decrypt = ctr_crypt,
228 212 },
229static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
230 unsigned int keylen)
231{
232 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
233 int err;
234
235 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
236 SERPENT_BLOCK_SIZE);
237 if (err)
238 return err;
239
240 return lrw_init_table(&ctx->lrw_table, key + keylen -
241 SERPENT_BLOCK_SIZE);
242}
243
244static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
245 struct scatterlist *src, unsigned int nbytes)
246{
247 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
248 be128 buf[SERPENT_PARALLEL_BLOCKS];
249 struct crypt_priv crypt_ctx = {
250 .ctx = &ctx->serpent_ctx,
251 .fpu_enabled = false,
252 };
253 struct lrw_crypt_req req = {
254 .tbuf = buf,
255 .tbuflen = sizeof(buf),
256
257 .table_ctx = &ctx->lrw_table,
258 .crypt_ctx = &crypt_ctx,
259 .crypt_fn = encrypt_callback,
260 };
261 int ret;
262
263 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
264 ret = lrw_crypt(desc, dst, src, nbytes, &req);
265 serpent_fpu_end(crypt_ctx.fpu_enabled);
266
267 return ret;
268}
269
270static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
271 struct scatterlist *src, unsigned int nbytes)
272{
273 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
274 be128 buf[SERPENT_PARALLEL_BLOCKS];
275 struct crypt_priv crypt_ctx = {
276 .ctx = &ctx->serpent_ctx,
277 .fpu_enabled = false,
278 };
279 struct lrw_crypt_req req = {
280 .tbuf = buf,
281 .tbuflen = sizeof(buf),
282
283 .table_ctx = &ctx->lrw_table,
284 .crypt_ctx = &crypt_ctx,
285 .crypt_fn = decrypt_callback,
286 };
287 int ret;
288
289 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
290 ret = lrw_crypt(desc, dst, src, nbytes, &req);
291 serpent_fpu_end(crypt_ctx.fpu_enabled);
292
293 return ret;
294}
295
296static void lrw_exit_tfm(struct crypto_tfm *tfm)
297{
298 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
299
300 lrw_free_table(&ctx->lrw_table);
301}
302
303struct serpent_xts_ctx {
304 struct serpent_ctx tweak_ctx;
305 struct serpent_ctx crypt_ctx;
306}; 213};
307 214
308static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key, 215static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
309 unsigned int keylen)
310{
311 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
312 int err;
313
314 err = xts_check_key(tfm, key, keylen);
315 if (err)
316 return err;
317
318 /* first half of xts-key is for crypt */
319 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
320 if (err)
321 return err;
322
323 /* second half of xts-key is for tweak */
324 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
325}
326
327static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
328 struct scatterlist *src, unsigned int nbytes)
329{
330 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
331 le128 buf[SERPENT_PARALLEL_BLOCKS];
332 struct crypt_priv crypt_ctx = {
333 .ctx = &ctx->crypt_ctx,
334 .fpu_enabled = false,
335 };
336 struct xts_crypt_req req = {
337 .tbuf = buf,
338 .tbuflen = sizeof(buf),
339
340 .tweak_ctx = &ctx->tweak_ctx,
341 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
342 .crypt_ctx = &crypt_ctx,
343 .crypt_fn = encrypt_callback,
344 };
345 int ret;
346
347 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
348 ret = xts_crypt(desc, dst, src, nbytes, &req);
349 serpent_fpu_end(crypt_ctx.fpu_enabled);
350
351 return ret;
352}
353
354static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
355 struct scatterlist *src, unsigned int nbytes)
356{
357 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
358 le128 buf[SERPENT_PARALLEL_BLOCKS];
359 struct crypt_priv crypt_ctx = {
360 .ctx = &ctx->crypt_ctx,
361 .fpu_enabled = false,
362 };
363 struct xts_crypt_req req = {
364 .tbuf = buf,
365 .tbuflen = sizeof(buf),
366
367 .tweak_ctx = &ctx->tweak_ctx,
368 .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
369 .crypt_ctx = &crypt_ctx,
370 .crypt_fn = decrypt_callback,
371 };
372 int ret;
373
374 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
375 ret = xts_crypt(desc, dst, src, nbytes, &req);
376 serpent_fpu_end(crypt_ctx.fpu_enabled);
377
378 return ret;
379}
380
381static struct crypto_alg serpent_algs[10] = { {
382 .cra_name = "__ecb-serpent-sse2",
383 .cra_driver_name = "__driver-ecb-serpent-sse2",
384 .cra_priority = 0,
385 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
386 CRYPTO_ALG_INTERNAL,
387 .cra_blocksize = SERPENT_BLOCK_SIZE,
388 .cra_ctxsize = sizeof(struct serpent_ctx),
389 .cra_alignmask = 0,
390 .cra_type = &crypto_blkcipher_type,
391 .cra_module = THIS_MODULE,
392 .cra_u = {
393 .blkcipher = {
394 .min_keysize = SERPENT_MIN_KEY_SIZE,
395 .max_keysize = SERPENT_MAX_KEY_SIZE,
396 .setkey = serpent_setkey,
397 .encrypt = ecb_encrypt,
398 .decrypt = ecb_decrypt,
399 },
400 },
401}, {
402 .cra_name = "__cbc-serpent-sse2",
403 .cra_driver_name = "__driver-cbc-serpent-sse2",
404 .cra_priority = 0,
405 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
406 CRYPTO_ALG_INTERNAL,
407 .cra_blocksize = SERPENT_BLOCK_SIZE,
408 .cra_ctxsize = sizeof(struct serpent_ctx),
409 .cra_alignmask = 0,
410 .cra_type = &crypto_blkcipher_type,
411 .cra_module = THIS_MODULE,
412 .cra_u = {
413 .blkcipher = {
414 .min_keysize = SERPENT_MIN_KEY_SIZE,
415 .max_keysize = SERPENT_MAX_KEY_SIZE,
416 .setkey = serpent_setkey,
417 .encrypt = cbc_encrypt,
418 .decrypt = cbc_decrypt,
419 },
420 },
421}, {
422 .cra_name = "__ctr-serpent-sse2",
423 .cra_driver_name = "__driver-ctr-serpent-sse2",
424 .cra_priority = 0,
425 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
426 CRYPTO_ALG_INTERNAL,
427 .cra_blocksize = 1,
428 .cra_ctxsize = sizeof(struct serpent_ctx),
429 .cra_alignmask = 0,
430 .cra_type = &crypto_blkcipher_type,
431 .cra_module = THIS_MODULE,
432 .cra_u = {
433 .blkcipher = {
434 .min_keysize = SERPENT_MIN_KEY_SIZE,
435 .max_keysize = SERPENT_MAX_KEY_SIZE,
436 .ivsize = SERPENT_BLOCK_SIZE,
437 .setkey = serpent_setkey,
438 .encrypt = ctr_crypt,
439 .decrypt = ctr_crypt,
440 },
441 },
442}, {
443 .cra_name = "__lrw-serpent-sse2",
444 .cra_driver_name = "__driver-lrw-serpent-sse2",
445 .cra_priority = 0,
446 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
447 CRYPTO_ALG_INTERNAL,
448 .cra_blocksize = SERPENT_BLOCK_SIZE,
449 .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
450 .cra_alignmask = 0,
451 .cra_type = &crypto_blkcipher_type,
452 .cra_module = THIS_MODULE,
453 .cra_exit = lrw_exit_tfm,
454 .cra_u = {
455 .blkcipher = {
456 .min_keysize = SERPENT_MIN_KEY_SIZE +
457 SERPENT_BLOCK_SIZE,
458 .max_keysize = SERPENT_MAX_KEY_SIZE +
459 SERPENT_BLOCK_SIZE,
460 .ivsize = SERPENT_BLOCK_SIZE,
461 .setkey = lrw_serpent_setkey,
462 .encrypt = lrw_encrypt,
463 .decrypt = lrw_decrypt,
464 },
465 },
466}, {
467 .cra_name = "__xts-serpent-sse2",
468 .cra_driver_name = "__driver-xts-serpent-sse2",
469 .cra_priority = 0,
470 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
471 CRYPTO_ALG_INTERNAL,
472 .cra_blocksize = SERPENT_BLOCK_SIZE,
473 .cra_ctxsize = sizeof(struct serpent_xts_ctx),
474 .cra_alignmask = 0,
475 .cra_type = &crypto_blkcipher_type,
476 .cra_module = THIS_MODULE,
477 .cra_u = {
478 .blkcipher = {
479 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
480 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
481 .ivsize = SERPENT_BLOCK_SIZE,
482 .setkey = xts_serpent_setkey,
483 .encrypt = xts_encrypt,
484 .decrypt = xts_decrypt,
485 },
486 },
487}, {
488 .cra_name = "ecb(serpent)",
489 .cra_driver_name = "ecb-serpent-sse2",
490 .cra_priority = 400,
491 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
492 .cra_blocksize = SERPENT_BLOCK_SIZE,
493 .cra_ctxsize = sizeof(struct async_helper_ctx),
494 .cra_alignmask = 0,
495 .cra_type = &crypto_ablkcipher_type,
496 .cra_module = THIS_MODULE,
497 .cra_init = ablk_init,
498 .cra_exit = ablk_exit,
499 .cra_u = {
500 .ablkcipher = {
501 .min_keysize = SERPENT_MIN_KEY_SIZE,
502 .max_keysize = SERPENT_MAX_KEY_SIZE,
503 .setkey = ablk_set_key,
504 .encrypt = ablk_encrypt,
505 .decrypt = ablk_decrypt,
506 },
507 },
508}, {
509 .cra_name = "cbc(serpent)",
510 .cra_driver_name = "cbc-serpent-sse2",
511 .cra_priority = 400,
512 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
513 .cra_blocksize = SERPENT_BLOCK_SIZE,
514 .cra_ctxsize = sizeof(struct async_helper_ctx),
515 .cra_alignmask = 0,
516 .cra_type = &crypto_ablkcipher_type,
517 .cra_module = THIS_MODULE,
518 .cra_init = ablk_init,
519 .cra_exit = ablk_exit,
520 .cra_u = {
521 .ablkcipher = {
522 .min_keysize = SERPENT_MIN_KEY_SIZE,
523 .max_keysize = SERPENT_MAX_KEY_SIZE,
524 .ivsize = SERPENT_BLOCK_SIZE,
525 .setkey = ablk_set_key,
526 .encrypt = __ablk_encrypt,
527 .decrypt = ablk_decrypt,
528 },
529 },
530}, {
531 .cra_name = "ctr(serpent)",
532 .cra_driver_name = "ctr-serpent-sse2",
533 .cra_priority = 400,
534 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
535 .cra_blocksize = 1,
536 .cra_ctxsize = sizeof(struct async_helper_ctx),
537 .cra_alignmask = 0,
538 .cra_type = &crypto_ablkcipher_type,
539 .cra_module = THIS_MODULE,
540 .cra_init = ablk_init,
541 .cra_exit = ablk_exit,
542 .cra_u = {
543 .ablkcipher = {
544 .min_keysize = SERPENT_MIN_KEY_SIZE,
545 .max_keysize = SERPENT_MAX_KEY_SIZE,
546 .ivsize = SERPENT_BLOCK_SIZE,
547 .setkey = ablk_set_key,
548 .encrypt = ablk_encrypt,
549 .decrypt = ablk_encrypt,
550 .geniv = "chainiv",
551 },
552 },
553}, {
554 .cra_name = "lrw(serpent)",
555 .cra_driver_name = "lrw-serpent-sse2",
556 .cra_priority = 400,
557 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
558 .cra_blocksize = SERPENT_BLOCK_SIZE,
559 .cra_ctxsize = sizeof(struct async_helper_ctx),
560 .cra_alignmask = 0,
561 .cra_type = &crypto_ablkcipher_type,
562 .cra_module = THIS_MODULE,
563 .cra_init = ablk_init,
564 .cra_exit = ablk_exit,
565 .cra_u = {
566 .ablkcipher = {
567 .min_keysize = SERPENT_MIN_KEY_SIZE +
568 SERPENT_BLOCK_SIZE,
569 .max_keysize = SERPENT_MAX_KEY_SIZE +
570 SERPENT_BLOCK_SIZE,
571 .ivsize = SERPENT_BLOCK_SIZE,
572 .setkey = ablk_set_key,
573 .encrypt = ablk_encrypt,
574 .decrypt = ablk_decrypt,
575 },
576 },
577}, {
578 .cra_name = "xts(serpent)",
579 .cra_driver_name = "xts-serpent-sse2",
580 .cra_priority = 400,
581 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
582 .cra_blocksize = SERPENT_BLOCK_SIZE,
583 .cra_ctxsize = sizeof(struct async_helper_ctx),
584 .cra_alignmask = 0,
585 .cra_type = &crypto_ablkcipher_type,
586 .cra_module = THIS_MODULE,
587 .cra_init = ablk_init,
588 .cra_exit = ablk_exit,
589 .cra_u = {
590 .ablkcipher = {
591 .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
592 .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
593 .ivsize = SERPENT_BLOCK_SIZE,
594 .setkey = ablk_set_key,
595 .encrypt = ablk_encrypt,
596 .decrypt = ablk_decrypt,
597 },
598 },
599} };
600 216
601static int __init serpent_sse2_init(void) 217static int __init serpent_sse2_init(void)
602{ 218{
@@ -605,12 +221,15 @@ static int __init serpent_sse2_init(void)
605 return -ENODEV; 221 return -ENODEV;
606 } 222 }
607 223
608 return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs)); 224 return simd_register_skciphers_compat(serpent_algs,
225 ARRAY_SIZE(serpent_algs),
226 serpent_simd_algs);
609} 227}
610 228
611static void __exit serpent_sse2_exit(void) 229static void __exit serpent_sse2_exit(void)
612{ 230{
613 crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs)); 231 simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
232 serpent_simd_algs);
614} 233}
615 234
616module_init(serpent_sse2_init); 235module_init(serpent_sse2_init);
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index acf9fdf01671..e17655ffde79 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -106,13 +106,6 @@ static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
106static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job) 106static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
107 (struct sha1_mb_mgr *state); 107 (struct sha1_mb_mgr *state);
108 108
109static inline void sha1_init_digest(uint32_t *digest)
110{
111 static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
112 SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
113 memcpy(digest, initial_digest, sizeof(initial_digest));
114}
115
116static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2], 109static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
117 uint64_t total_len) 110 uint64_t total_len)
118{ 111{
@@ -244,11 +237,8 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
244 uint32_t len, 237 uint32_t len,
245 int flags) 238 int flags)
246{ 239{
247 if (flags & (~HASH_ENTIRE)) { 240 if (flags & ~(HASH_UPDATE | HASH_LAST)) {
248 /* 241 /* User should not pass anything other than UPDATE or LAST */
249 * User should not pass anything other than FIRST, UPDATE, or
250 * LAST
251 */
252 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; 242 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
253 return ctx; 243 return ctx;
254 } 244 }
@@ -259,24 +249,12 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
259 return ctx; 249 return ctx;
260 } 250 }
261 251
262 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { 252 if (ctx->status & HASH_CTX_STS_COMPLETE) {
263 /* Cannot update a finished job. */ 253 /* Cannot update a finished job. */
264 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; 254 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
265 return ctx; 255 return ctx;
266 } 256 }
267 257
268
269 if (flags & HASH_FIRST) {
270 /* Init digest */
271 sha1_init_digest(ctx->job.result_digest);
272
273 /* Reset byte counter */
274 ctx->total_length = 0;
275
276 /* Clear extra blocks */
277 ctx->partial_block_buffer_length = 0;
278 }
279
280 /* 258 /*
281 * If we made it here, there were no errors during this call to 259 * If we made it here, there were no errors during this call to
282 * submit 260 * submit
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
index 13590ccf965c..9454bd16f9f8 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
@@ -57,11 +57,9 @@
57#include "sha1_mb_mgr.h" 57#include "sha1_mb_mgr.h"
58 58
59#define HASH_UPDATE 0x00 59#define HASH_UPDATE 0x00
60#define HASH_FIRST 0x01 60#define HASH_LAST 0x01
61#define HASH_LAST 0x02 61#define HASH_DONE 0x02
62#define HASH_ENTIRE 0x03 62#define HASH_FINAL 0x04
63#define HASH_DONE 0x04
64#define HASH_FINAL 0x08
65 63
66#define HASH_CTX_STS_IDLE 0x00 64#define HASH_CTX_STS_IDLE 0x00
67#define HASH_CTX_STS_PROCESSING 0x01 65#define HASH_CTX_STS_PROCESSING 0x01
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 7926a226b120..4c46ac1b6653 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -106,14 +106,6 @@ static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
106static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job) 106static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
107 (struct sha256_mb_mgr *state); 107 (struct sha256_mb_mgr *state);
108 108
109inline void sha256_init_digest(uint32_t *digest)
110{
111 static const uint32_t initial_digest[SHA256_DIGEST_LENGTH] = {
112 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
113 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7};
114 memcpy(digest, initial_digest, sizeof(initial_digest));
115}
116
117inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2], 109inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
118 uint64_t total_len) 110 uint64_t total_len)
119{ 111{
@@ -245,10 +237,8 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
245 uint32_t len, 237 uint32_t len,
246 int flags) 238 int flags)
247{ 239{
248 if (flags & (~HASH_ENTIRE)) { 240 if (flags & ~(HASH_UPDATE | HASH_LAST)) {
249 /* User should not pass anything other than FIRST, UPDATE 241 /* User should not pass anything other than UPDATE or LAST */
250 * or LAST
251 */
252 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; 242 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
253 return ctx; 243 return ctx;
254 } 244 }
@@ -259,23 +249,12 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
259 return ctx; 249 return ctx;
260 } 250 }
261 251
262 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { 252 if (ctx->status & HASH_CTX_STS_COMPLETE) {
263 /* Cannot update a finished job. */ 253 /* Cannot update a finished job. */
264 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; 254 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
265 return ctx; 255 return ctx;
266 } 256 }
267 257
268 if (flags & HASH_FIRST) {
269 /* Init digest */
270 sha256_init_digest(ctx->job.result_digest);
271
272 /* Reset byte counter */
273 ctx->total_length = 0;
274
275 /* Clear extra blocks */
276 ctx->partial_block_buffer_length = 0;
277 }
278
279 /* If we made it here, there was no error during this call to submit */ 258 /* If we made it here, there was no error during this call to submit */
280 ctx->error = HASH_CTX_ERROR_NONE; 259 ctx->error = HASH_CTX_ERROR_NONE;
281 260
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
index aabb30320af0..7c432543dc7f 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
@@ -57,11 +57,9 @@
57#include "sha256_mb_mgr.h" 57#include "sha256_mb_mgr.h"
58 58
59#define HASH_UPDATE 0x00 59#define HASH_UPDATE 0x00
60#define HASH_FIRST 0x01 60#define HASH_LAST 0x01
61#define HASH_LAST 0x02 61#define HASH_DONE 0x02
62#define HASH_ENTIRE 0x03 62#define HASH_FINAL 0x04
63#define HASH_DONE 0x04
64#define HASH_FINAL 0x08
65 63
66#define HASH_CTX_STS_IDLE 0x00 64#define HASH_CTX_STS_IDLE 0x00
67#define HASH_CTX_STS_PROCESSING 0x01 65#define HASH_CTX_STS_PROCESSING 0x01
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index 458409b7568d..39e2bbdc1836 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -107,15 +107,6 @@ static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
107static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job) 107static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
108 (struct sha512_mb_mgr *state); 108 (struct sha512_mb_mgr *state);
109 109
110inline void sha512_init_digest(uint64_t *digest)
111{
112 static const uint64_t initial_digest[SHA512_DIGEST_LENGTH] = {
113 SHA512_H0, SHA512_H1, SHA512_H2,
114 SHA512_H3, SHA512_H4, SHA512_H5,
115 SHA512_H6, SHA512_H7 };
116 memcpy(digest, initial_digest, sizeof(initial_digest));
117}
118
119inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2], 110inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
120 uint64_t total_len) 111 uint64_t total_len)
121{ 112{
@@ -263,11 +254,8 @@ static struct sha512_hash_ctx
263 254
264 mgr = cstate->mgr; 255 mgr = cstate->mgr;
265 spin_lock_irqsave(&cstate->work_lock, irqflags); 256 spin_lock_irqsave(&cstate->work_lock, irqflags);
266 if (flags & (~HASH_ENTIRE)) { 257 if (flags & ~(HASH_UPDATE | HASH_LAST)) {
267 /* 258 /* User should not pass anything other than UPDATE or LAST */
268 * User should not pass anything other than FIRST, UPDATE, or
269 * LAST
270 */
271 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; 259 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
272 goto unlock; 260 goto unlock;
273 } 261 }
@@ -278,24 +266,12 @@ static struct sha512_hash_ctx
278 goto unlock; 266 goto unlock;
279 } 267 }
280 268
281 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { 269 if (ctx->status & HASH_CTX_STS_COMPLETE) {
282 /* Cannot update a finished job. */ 270 /* Cannot update a finished job. */
283 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; 271 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
284 goto unlock; 272 goto unlock;
285 } 273 }
286 274
287
288 if (flags & HASH_FIRST) {
289 /* Init digest */
290 sha512_init_digest(ctx->job.result_digest);
291
292 /* Reset byte counter */
293 ctx->total_length = 0;
294
295 /* Clear extra blocks */
296 ctx->partial_block_buffer_length = 0;
297 }
298
299 /* 275 /*
300 * If we made it here, there were no errors during this call to 276 * If we made it here, there were no errors during this call to
301 * submit 277 * submit
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
index e4653f5eec3f..e5c465bd821e 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
@@ -57,11 +57,9 @@
57#include "sha512_mb_mgr.h" 57#include "sha512_mb_mgr.h"
58 58
59#define HASH_UPDATE 0x00 59#define HASH_UPDATE 0x00
60#define HASH_FIRST 0x01 60#define HASH_LAST 0x01
61#define HASH_LAST 0x02 61#define HASH_DONE 0x02
62#define HASH_ENTIRE 0x03 62#define HASH_FINAL 0x04
63#define HASH_DONE 0x04
64#define HASH_FINAL 0x08
65 63
66#define HASH_CTX_STS_IDLE 0x00 64#define HASH_CTX_STS_IDLE 0x00
67#define HASH_CTX_STS_PROCESSING 0x01 65#define HASH_CTX_STS_PROCESSING 0x01
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index b7a3904b953c..66d989230d10 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -24,24 +24,15 @@
24 */ 24 */
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/hardirq.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/crypto.h> 28#include <linux/crypto.h>
30#include <linux/err.h> 29#include <linux/err.h>
31#include <crypto/ablk_helper.h>
32#include <crypto/algapi.h> 30#include <crypto/algapi.h>
31#include <crypto/internal/simd.h>
33#include <crypto/twofish.h> 32#include <crypto/twofish.h>
34#include <crypto/cryptd.h>
35#include <crypto/b128ops.h>
36#include <crypto/ctr.h>
37#include <crypto/lrw.h>
38#include <crypto/xts.h> 33#include <crypto/xts.h>
39#include <asm/fpu/api.h>
40#include <asm/crypto/twofish.h>
41#include <asm/crypto/glue_helper.h> 34#include <asm/crypto/glue_helper.h>
42#include <crypto/scatterwalk.h> 35#include <asm/crypto/twofish.h>
43#include <linux/workqueue.h>
44#include <linux/spinlock.h>
45 36
46#define TWOFISH_PARALLEL_BLOCKS 8 37#define TWOFISH_PARALLEL_BLOCKS 8
47 38
@@ -61,6 +52,12 @@ asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
61asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst, 52asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
62 const u8 *src, le128 *iv); 53 const u8 *src, le128 *iv);
63 54
55static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
56 const u8 *key, unsigned int keylen)
57{
58 return twofish_setkey(&tfm->base, key, keylen);
59}
60
64static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 61static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
65 const u8 *src) 62 const u8 *src)
66{ 63{
@@ -79,6 +76,31 @@ static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
79 GLUE_FUNC_CAST(twofish_dec_blk)); 76 GLUE_FUNC_CAST(twofish_dec_blk));
80} 77}
81 78
79struct twofish_xts_ctx {
80 struct twofish_ctx tweak_ctx;
81 struct twofish_ctx crypt_ctx;
82};
83
84static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key,
85 unsigned int keylen)
86{
87 struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
88 u32 *flags = &tfm->base.crt_flags;
89 int err;
90
91 err = xts_verify_key(tfm, key, keylen);
92 if (err)
93 return err;
94
95 /* first half of xts-key is for crypt */
96 err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
97 if (err)
98 return err;
99
100 /* second half of xts-key is for tweak */
101 return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
102 flags);
103}
82 104
83static const struct common_glue_ctx twofish_enc = { 105static const struct common_glue_ctx twofish_enc = {
84 .num_funcs = 3, 106 .num_funcs = 3,
@@ -170,389 +192,113 @@ static const struct common_glue_ctx twofish_dec_xts = {
170 } } 192 } }
171}; 193};
172 194
173static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 195static int ecb_encrypt(struct skcipher_request *req)
174 struct scatterlist *src, unsigned int nbytes)
175{
176 return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
177}
178
179static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
180 struct scatterlist *src, unsigned int nbytes)
181{
182 return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
183}
184
185static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
186 struct scatterlist *src, unsigned int nbytes)
187{ 196{
188 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc, 197 return glue_ecb_req_128bit(&twofish_enc, req);
189 dst, src, nbytes);
190} 198}
191 199
192static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 200static int ecb_decrypt(struct skcipher_request *req)
193 struct scatterlist *src, unsigned int nbytes)
194{ 201{
195 return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src, 202 return glue_ecb_req_128bit(&twofish_dec, req);
196 nbytes);
197} 203}
198 204
199static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 205static int cbc_encrypt(struct skcipher_request *req)
200 struct scatterlist *src, unsigned int nbytes)
201{ 206{
202 return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes); 207 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
208 req);
203} 209}
204 210
205static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes) 211static int cbc_decrypt(struct skcipher_request *req)
206{ 212{
207 return glue_fpu_begin(TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS, NULL, 213 return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
208 fpu_enabled, nbytes);
209} 214}
210 215
211static inline void twofish_fpu_end(bool fpu_enabled) 216static int ctr_crypt(struct skcipher_request *req)
212{ 217{
213 glue_fpu_end(fpu_enabled); 218 return glue_ctr_req_128bit(&twofish_ctr, req);
214} 219}
215 220
216struct crypt_priv { 221static int xts_encrypt(struct skcipher_request *req)
217 struct twofish_ctx *ctx;
218 bool fpu_enabled;
219};
220
221static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
222{ 222{
223 const unsigned int bsize = TF_BLOCK_SIZE; 223 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
224 struct crypt_priv *ctx = priv; 224 struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
225 int i;
226
227 ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
228
229 if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
230 twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
231 return;
232 }
233
234 for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
235 twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
236 225
237 nbytes %= bsize * 3; 226 return glue_xts_req_128bit(&twofish_enc_xts, req,
238 227 XTS_TWEAK_CAST(twofish_enc_blk),
239 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) 228 &ctx->tweak_ctx, &ctx->crypt_ctx);
240 twofish_enc_blk(ctx->ctx, srcdst, srcdst);
241} 229}
242 230
243static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) 231static int xts_decrypt(struct skcipher_request *req)
244{ 232{
245 const unsigned int bsize = TF_BLOCK_SIZE; 233 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
246 struct crypt_priv *ctx = priv; 234 struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
247 int i;
248
249 ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
250
251 if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
252 twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
253 return;
254 }
255 235
256 for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3) 236 return glue_xts_req_128bit(&twofish_dec_xts, req,
257 twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst); 237 XTS_TWEAK_CAST(twofish_enc_blk),
258 238 &ctx->tweak_ctx, &ctx->crypt_ctx);
259 nbytes %= bsize * 3;
260
261 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
262 twofish_dec_blk(ctx->ctx, srcdst, srcdst);
263} 239}
264 240
265static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 241static struct skcipher_alg twofish_algs[] = {
266 struct scatterlist *src, unsigned int nbytes) 242 {
267{ 243 .base.cra_name = "__ecb(twofish)",
268 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 244 .base.cra_driver_name = "__ecb-twofish-avx",
269 be128 buf[TWOFISH_PARALLEL_BLOCKS]; 245 .base.cra_priority = 400,
270 struct crypt_priv crypt_ctx = { 246 .base.cra_flags = CRYPTO_ALG_INTERNAL,
271 .ctx = &ctx->twofish_ctx, 247 .base.cra_blocksize = TF_BLOCK_SIZE,
272 .fpu_enabled = false, 248 .base.cra_ctxsize = sizeof(struct twofish_ctx),
273 }; 249 .base.cra_module = THIS_MODULE,
274 struct lrw_crypt_req req = { 250 .min_keysize = TF_MIN_KEY_SIZE,
275 .tbuf = buf, 251 .max_keysize = TF_MAX_KEY_SIZE,
276 .tbuflen = sizeof(buf), 252 .setkey = twofish_setkey_skcipher,
277 253 .encrypt = ecb_encrypt,
278 .table_ctx = &ctx->lrw_table, 254 .decrypt = ecb_decrypt,
279 .crypt_ctx = &crypt_ctx, 255 }, {
280 .crypt_fn = encrypt_callback, 256 .base.cra_name = "__cbc(twofish)",
281 }; 257 .base.cra_driver_name = "__cbc-twofish-avx",
282 int ret; 258 .base.cra_priority = 400,
283 259 .base.cra_flags = CRYPTO_ALG_INTERNAL,
284 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 260 .base.cra_blocksize = TF_BLOCK_SIZE,
285 ret = lrw_crypt(desc, dst, src, nbytes, &req); 261 .base.cra_ctxsize = sizeof(struct twofish_ctx),
286 twofish_fpu_end(crypt_ctx.fpu_enabled); 262 .base.cra_module = THIS_MODULE,
287 263 .min_keysize = TF_MIN_KEY_SIZE,
288 return ret; 264 .max_keysize = TF_MAX_KEY_SIZE,
289} 265 .ivsize = TF_BLOCK_SIZE,
290 266 .setkey = twofish_setkey_skcipher,
291static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 267 .encrypt = cbc_encrypt,
292 struct scatterlist *src, unsigned int nbytes) 268 .decrypt = cbc_decrypt,
293{ 269 }, {
294 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 270 .base.cra_name = "__ctr(twofish)",
295 be128 buf[TWOFISH_PARALLEL_BLOCKS]; 271 .base.cra_driver_name = "__ctr-twofish-avx",
296 struct crypt_priv crypt_ctx = { 272 .base.cra_priority = 400,
297 .ctx = &ctx->twofish_ctx, 273 .base.cra_flags = CRYPTO_ALG_INTERNAL,
298 .fpu_enabled = false, 274 .base.cra_blocksize = 1,
299 }; 275 .base.cra_ctxsize = sizeof(struct twofish_ctx),
300 struct lrw_crypt_req req = { 276 .base.cra_module = THIS_MODULE,
301 .tbuf = buf, 277 .min_keysize = TF_MIN_KEY_SIZE,
302 .tbuflen = sizeof(buf), 278 .max_keysize = TF_MAX_KEY_SIZE,
303 279 .ivsize = TF_BLOCK_SIZE,
304 .table_ctx = &ctx->lrw_table, 280 .chunksize = TF_BLOCK_SIZE,
305 .crypt_ctx = &crypt_ctx, 281 .setkey = twofish_setkey_skcipher,
306 .crypt_fn = decrypt_callback, 282 .encrypt = ctr_crypt,
307 }; 283 .decrypt = ctr_crypt,
308 int ret; 284 }, {
309 285 .base.cra_name = "__xts(twofish)",
310 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 286 .base.cra_driver_name = "__xts-twofish-avx",
311 ret = lrw_crypt(desc, dst, src, nbytes, &req); 287 .base.cra_priority = 400,
312 twofish_fpu_end(crypt_ctx.fpu_enabled); 288 .base.cra_flags = CRYPTO_ALG_INTERNAL,
313 289 .base.cra_blocksize = TF_BLOCK_SIZE,
314 return ret; 290 .base.cra_ctxsize = sizeof(struct twofish_xts_ctx),
315} 291 .base.cra_module = THIS_MODULE,
316 292 .min_keysize = 2 * TF_MIN_KEY_SIZE,
317static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 293 .max_keysize = 2 * TF_MAX_KEY_SIZE,
318 struct scatterlist *src, unsigned int nbytes) 294 .ivsize = TF_BLOCK_SIZE,
319{ 295 .setkey = xts_twofish_setkey,
320 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 296 .encrypt = xts_encrypt,
321 297 .decrypt = xts_decrypt,
322 return glue_xts_crypt_128bit(&twofish_enc_xts, desc, dst, src, nbytes,
323 XTS_TWEAK_CAST(twofish_enc_blk),
324 &ctx->tweak_ctx, &ctx->crypt_ctx);
325}
326
327static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
328 struct scatterlist *src, unsigned int nbytes)
329{
330 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
331
332 return glue_xts_crypt_128bit(&twofish_dec_xts, desc, dst, src, nbytes,
333 XTS_TWEAK_CAST(twofish_enc_blk),
334 &ctx->tweak_ctx, &ctx->crypt_ctx);
335}
336
337static struct crypto_alg twofish_algs[10] = { {
338 .cra_name = "__ecb-twofish-avx",
339 .cra_driver_name = "__driver-ecb-twofish-avx",
340 .cra_priority = 0,
341 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
342 CRYPTO_ALG_INTERNAL,
343 .cra_blocksize = TF_BLOCK_SIZE,
344 .cra_ctxsize = sizeof(struct twofish_ctx),
345 .cra_alignmask = 0,
346 .cra_type = &crypto_blkcipher_type,
347 .cra_module = THIS_MODULE,
348 .cra_u = {
349 .blkcipher = {
350 .min_keysize = TF_MIN_KEY_SIZE,
351 .max_keysize = TF_MAX_KEY_SIZE,
352 .setkey = twofish_setkey,
353 .encrypt = ecb_encrypt,
354 .decrypt = ecb_decrypt,
355 },
356 },
357}, {
358 .cra_name = "__cbc-twofish-avx",
359 .cra_driver_name = "__driver-cbc-twofish-avx",
360 .cra_priority = 0,
361 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
362 CRYPTO_ALG_INTERNAL,
363 .cra_blocksize = TF_BLOCK_SIZE,
364 .cra_ctxsize = sizeof(struct twofish_ctx),
365 .cra_alignmask = 0,
366 .cra_type = &crypto_blkcipher_type,
367 .cra_module = THIS_MODULE,
368 .cra_u = {
369 .blkcipher = {
370 .min_keysize = TF_MIN_KEY_SIZE,
371 .max_keysize = TF_MAX_KEY_SIZE,
372 .setkey = twofish_setkey,
373 .encrypt = cbc_encrypt,
374 .decrypt = cbc_decrypt,
375 },
376 },
377}, {
378 .cra_name = "__ctr-twofish-avx",
379 .cra_driver_name = "__driver-ctr-twofish-avx",
380 .cra_priority = 0,
381 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
382 CRYPTO_ALG_INTERNAL,
383 .cra_blocksize = 1,
384 .cra_ctxsize = sizeof(struct twofish_ctx),
385 .cra_alignmask = 0,
386 .cra_type = &crypto_blkcipher_type,
387 .cra_module = THIS_MODULE,
388 .cra_u = {
389 .blkcipher = {
390 .min_keysize = TF_MIN_KEY_SIZE,
391 .max_keysize = TF_MAX_KEY_SIZE,
392 .ivsize = TF_BLOCK_SIZE,
393 .setkey = twofish_setkey,
394 .encrypt = ctr_crypt,
395 .decrypt = ctr_crypt,
396 },
397 },
398}, {
399 .cra_name = "__lrw-twofish-avx",
400 .cra_driver_name = "__driver-lrw-twofish-avx",
401 .cra_priority = 0,
402 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
403 CRYPTO_ALG_INTERNAL,
404 .cra_blocksize = TF_BLOCK_SIZE,
405 .cra_ctxsize = sizeof(struct twofish_lrw_ctx),
406 .cra_alignmask = 0,
407 .cra_type = &crypto_blkcipher_type,
408 .cra_module = THIS_MODULE,
409 .cra_exit = lrw_twofish_exit_tfm,
410 .cra_u = {
411 .blkcipher = {
412 .min_keysize = TF_MIN_KEY_SIZE +
413 TF_BLOCK_SIZE,
414 .max_keysize = TF_MAX_KEY_SIZE +
415 TF_BLOCK_SIZE,
416 .ivsize = TF_BLOCK_SIZE,
417 .setkey = lrw_twofish_setkey,
418 .encrypt = lrw_encrypt,
419 .decrypt = lrw_decrypt,
420 },
421 },
422}, {
423 .cra_name = "__xts-twofish-avx",
424 .cra_driver_name = "__driver-xts-twofish-avx",
425 .cra_priority = 0,
426 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
427 CRYPTO_ALG_INTERNAL,
428 .cra_blocksize = TF_BLOCK_SIZE,
429 .cra_ctxsize = sizeof(struct twofish_xts_ctx),
430 .cra_alignmask = 0,
431 .cra_type = &crypto_blkcipher_type,
432 .cra_module = THIS_MODULE,
433 .cra_u = {
434 .blkcipher = {
435 .min_keysize = TF_MIN_KEY_SIZE * 2,
436 .max_keysize = TF_MAX_KEY_SIZE * 2,
437 .ivsize = TF_BLOCK_SIZE,
438 .setkey = xts_twofish_setkey,
439 .encrypt = xts_encrypt,
440 .decrypt = xts_decrypt,
441 },
442 },
443}, {
444 .cra_name = "ecb(twofish)",
445 .cra_driver_name = "ecb-twofish-avx",
446 .cra_priority = 400,
447 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
448 .cra_blocksize = TF_BLOCK_SIZE,
449 .cra_ctxsize = sizeof(struct async_helper_ctx),
450 .cra_alignmask = 0,
451 .cra_type = &crypto_ablkcipher_type,
452 .cra_module = THIS_MODULE,
453 .cra_init = ablk_init,
454 .cra_exit = ablk_exit,
455 .cra_u = {
456 .ablkcipher = {
457 .min_keysize = TF_MIN_KEY_SIZE,
458 .max_keysize = TF_MAX_KEY_SIZE,
459 .setkey = ablk_set_key,
460 .encrypt = ablk_encrypt,
461 .decrypt = ablk_decrypt,
462 },
463 },
464}, {
465 .cra_name = "cbc(twofish)",
466 .cra_driver_name = "cbc-twofish-avx",
467 .cra_priority = 400,
468 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
469 .cra_blocksize = TF_BLOCK_SIZE,
470 .cra_ctxsize = sizeof(struct async_helper_ctx),
471 .cra_alignmask = 0,
472 .cra_type = &crypto_ablkcipher_type,
473 .cra_module = THIS_MODULE,
474 .cra_init = ablk_init,
475 .cra_exit = ablk_exit,
476 .cra_u = {
477 .ablkcipher = {
478 .min_keysize = TF_MIN_KEY_SIZE,
479 .max_keysize = TF_MAX_KEY_SIZE,
480 .ivsize = TF_BLOCK_SIZE,
481 .setkey = ablk_set_key,
482 .encrypt = __ablk_encrypt,
483 .decrypt = ablk_decrypt,
484 },
485 },
486}, {
487 .cra_name = "ctr(twofish)",
488 .cra_driver_name = "ctr-twofish-avx",
489 .cra_priority = 400,
490 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
491 .cra_blocksize = 1,
492 .cra_ctxsize = sizeof(struct async_helper_ctx),
493 .cra_alignmask = 0,
494 .cra_type = &crypto_ablkcipher_type,
495 .cra_module = THIS_MODULE,
496 .cra_init = ablk_init,
497 .cra_exit = ablk_exit,
498 .cra_u = {
499 .ablkcipher = {
500 .min_keysize = TF_MIN_KEY_SIZE,
501 .max_keysize = TF_MAX_KEY_SIZE,
502 .ivsize = TF_BLOCK_SIZE,
503 .setkey = ablk_set_key,
504 .encrypt = ablk_encrypt,
505 .decrypt = ablk_encrypt,
506 .geniv = "chainiv",
507 },
508 },
509}, {
510 .cra_name = "lrw(twofish)",
511 .cra_driver_name = "lrw-twofish-avx",
512 .cra_priority = 400,
513 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
514 .cra_blocksize = TF_BLOCK_SIZE,
515 .cra_ctxsize = sizeof(struct async_helper_ctx),
516 .cra_alignmask = 0,
517 .cra_type = &crypto_ablkcipher_type,
518 .cra_module = THIS_MODULE,
519 .cra_init = ablk_init,
520 .cra_exit = ablk_exit,
521 .cra_u = {
522 .ablkcipher = {
523 .min_keysize = TF_MIN_KEY_SIZE +
524 TF_BLOCK_SIZE,
525 .max_keysize = TF_MAX_KEY_SIZE +
526 TF_BLOCK_SIZE,
527 .ivsize = TF_BLOCK_SIZE,
528 .setkey = ablk_set_key,
529 .encrypt = ablk_encrypt,
530 .decrypt = ablk_decrypt,
531 },
532 },
533}, {
534 .cra_name = "xts(twofish)",
535 .cra_driver_name = "xts-twofish-avx",
536 .cra_priority = 400,
537 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
538 .cra_blocksize = TF_BLOCK_SIZE,
539 .cra_ctxsize = sizeof(struct async_helper_ctx),
540 .cra_alignmask = 0,
541 .cra_type = &crypto_ablkcipher_type,
542 .cra_module = THIS_MODULE,
543 .cra_init = ablk_init,
544 .cra_exit = ablk_exit,
545 .cra_u = {
546 .ablkcipher = {
547 .min_keysize = TF_MIN_KEY_SIZE * 2,
548 .max_keysize = TF_MAX_KEY_SIZE * 2,
549 .ivsize = TF_BLOCK_SIZE,
550 .setkey = ablk_set_key,
551 .encrypt = ablk_encrypt,
552 .decrypt = ablk_decrypt,
553 },
554 }, 298 },
555} }; 299};
300
301static struct simd_skcipher_alg *twofish_simd_algs[ARRAY_SIZE(twofish_algs)];
556 302
557static int __init twofish_init(void) 303static int __init twofish_init(void)
558{ 304{
@@ -563,12 +309,15 @@ static int __init twofish_init(void)
563 return -ENODEV; 309 return -ENODEV;
564 } 310 }
565 311
566 return crypto_register_algs(twofish_algs, ARRAY_SIZE(twofish_algs)); 312 return simd_register_skciphers_compat(twofish_algs,
313 ARRAY_SIZE(twofish_algs),
314 twofish_simd_algs);
567} 315}
568 316
569static void __exit twofish_exit(void) 317static void __exit twofish_exit(void)
570{ 318{
571 crypto_unregister_algs(twofish_algs, ARRAY_SIZE(twofish_algs)); 319 simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs),
320 twofish_simd_algs);
572} 321}
573 322
574module_init(twofish_init); 323module_init(twofish_init);
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 243e90a4b5d9..571485502ec8 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -20,22 +20,26 @@
20 * 20 *
21 */ 21 */
22 22
23#include <asm/processor.h> 23#include <asm/crypto/glue_helper.h>
24#include <asm/crypto/twofish.h>
25#include <crypto/algapi.h>
26#include <crypto/b128ops.h>
27#include <crypto/internal/skcipher.h>
28#include <crypto/twofish.h>
24#include <linux/crypto.h> 29#include <linux/crypto.h>
25#include <linux/init.h> 30#include <linux/init.h>
26#include <linux/module.h> 31#include <linux/module.h>
27#include <linux/types.h> 32#include <linux/types.h>
28#include <crypto/algapi.h>
29#include <crypto/twofish.h>
30#include <crypto/b128ops.h>
31#include <asm/crypto/twofish.h>
32#include <asm/crypto/glue_helper.h>
33#include <crypto/lrw.h>
34#include <crypto/xts.h>
35 33
36EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way); 34EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way);
37EXPORT_SYMBOL_GPL(twofish_dec_blk_3way); 35EXPORT_SYMBOL_GPL(twofish_dec_blk_3way);
38 36
37static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
38 const u8 *key, unsigned int keylen)
39{
40 return twofish_setkey(&tfm->base, key, keylen);
41}
42
39static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 43static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
40 const u8 *src) 44 const u8 *src)
41{ 45{
@@ -151,284 +155,74 @@ static const struct common_glue_ctx twofish_dec_cbc = {
151 } } 155 } }
152}; 156};
153 157
154static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 158static int ecb_encrypt(struct skcipher_request *req)
155 struct scatterlist *src, unsigned int nbytes)
156{
157 return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
158}
159
160static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
161 struct scatterlist *src, unsigned int nbytes)
162{
163 return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
164}
165
166static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
167 struct scatterlist *src, unsigned int nbytes)
168{
169 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc,
170 dst, src, nbytes);
171}
172
173static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
174 struct scatterlist *src, unsigned int nbytes)
175{
176 return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src,
177 nbytes);
178}
179
180static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
181 struct scatterlist *src, unsigned int nbytes)
182{
183 return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes);
184}
185
186static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
187{ 159{
188 const unsigned int bsize = TF_BLOCK_SIZE; 160 return glue_ecb_req_128bit(&twofish_enc, req);
189 struct twofish_ctx *ctx = priv;
190 int i;
191
192 if (nbytes == 3 * bsize) {
193 twofish_enc_blk_3way(ctx, srcdst, srcdst);
194 return;
195 }
196
197 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
198 twofish_enc_blk(ctx, srcdst, srcdst);
199}
200
201static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
202{
203 const unsigned int bsize = TF_BLOCK_SIZE;
204 struct twofish_ctx *ctx = priv;
205 int i;
206
207 if (nbytes == 3 * bsize) {
208 twofish_dec_blk_3way(ctx, srcdst, srcdst);
209 return;
210 }
211
212 for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
213 twofish_dec_blk(ctx, srcdst, srcdst);
214}
215
216int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
217 unsigned int keylen)
218{
219 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
220 int err;
221
222 err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
223 &tfm->crt_flags);
224 if (err)
225 return err;
226
227 return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
228} 161}
229EXPORT_SYMBOL_GPL(lrw_twofish_setkey);
230 162
231static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 163static int ecb_decrypt(struct skcipher_request *req)
232 struct scatterlist *src, unsigned int nbytes)
233{ 164{
234 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 165 return glue_ecb_req_128bit(&twofish_dec, req);
235 be128 buf[3];
236 struct lrw_crypt_req req = {
237 .tbuf = buf,
238 .tbuflen = sizeof(buf),
239
240 .table_ctx = &ctx->lrw_table,
241 .crypt_ctx = &ctx->twofish_ctx,
242 .crypt_fn = encrypt_callback,
243 };
244
245 return lrw_crypt(desc, dst, src, nbytes, &req);
246} 166}
247 167
248static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 168static int cbc_encrypt(struct skcipher_request *req)
249 struct scatterlist *src, unsigned int nbytes)
250{ 169{
251 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 170 return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
252 be128 buf[3]; 171 req);
253 struct lrw_crypt_req req = {
254 .tbuf = buf,
255 .tbuflen = sizeof(buf),
256
257 .table_ctx = &ctx->lrw_table,
258 .crypt_ctx = &ctx->twofish_ctx,
259 .crypt_fn = decrypt_callback,
260 };
261
262 return lrw_crypt(desc, dst, src, nbytes, &req);
263} 172}
264 173
265void lrw_twofish_exit_tfm(struct crypto_tfm *tfm) 174static int cbc_decrypt(struct skcipher_request *req)
266{ 175{
267 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm); 176 return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
268
269 lrw_free_table(&ctx->lrw_table);
270}
271EXPORT_SYMBOL_GPL(lrw_twofish_exit_tfm);
272
273int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
274 unsigned int keylen)
275{
276 struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
277 u32 *flags = &tfm->crt_flags;
278 int err;
279
280 err = xts_check_key(tfm, key, keylen);
281 if (err)
282 return err;
283
284 /* first half of xts-key is for crypt */
285 err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
286 if (err)
287 return err;
288
289 /* second half of xts-key is for tweak */
290 return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
291 flags);
292}
293EXPORT_SYMBOL_GPL(xts_twofish_setkey);
294
295static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
296 struct scatterlist *src, unsigned int nbytes)
297{
298 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
299 le128 buf[3];
300 struct xts_crypt_req req = {
301 .tbuf = buf,
302 .tbuflen = sizeof(buf),
303
304 .tweak_ctx = &ctx->tweak_ctx,
305 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
306 .crypt_ctx = &ctx->crypt_ctx,
307 .crypt_fn = encrypt_callback,
308 };
309
310 return xts_crypt(desc, dst, src, nbytes, &req);
311} 177}
312 178
313static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 179static int ctr_crypt(struct skcipher_request *req)
314 struct scatterlist *src, unsigned int nbytes)
315{ 180{
316 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 181 return glue_ctr_req_128bit(&twofish_ctr, req);
317 le128 buf[3];
318 struct xts_crypt_req req = {
319 .tbuf = buf,
320 .tbuflen = sizeof(buf),
321
322 .tweak_ctx = &ctx->tweak_ctx,
323 .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
324 .crypt_ctx = &ctx->crypt_ctx,
325 .crypt_fn = decrypt_callback,
326 };
327
328 return xts_crypt(desc, dst, src, nbytes, &req);
329} 182}
330 183
331static struct crypto_alg tf_algs[5] = { { 184static struct skcipher_alg tf_skciphers[] = {
332 .cra_name = "ecb(twofish)", 185 {
333 .cra_driver_name = "ecb-twofish-3way", 186 .base.cra_name = "ecb(twofish)",
334 .cra_priority = 300, 187 .base.cra_driver_name = "ecb-twofish-3way",
335 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 188 .base.cra_priority = 300,
336 .cra_blocksize = TF_BLOCK_SIZE, 189 .base.cra_blocksize = TF_BLOCK_SIZE,
337 .cra_ctxsize = sizeof(struct twofish_ctx), 190 .base.cra_ctxsize = sizeof(struct twofish_ctx),
338 .cra_alignmask = 0, 191 .base.cra_module = THIS_MODULE,
339 .cra_type = &crypto_blkcipher_type, 192 .min_keysize = TF_MIN_KEY_SIZE,
340 .cra_module = THIS_MODULE, 193 .max_keysize = TF_MAX_KEY_SIZE,
341 .cra_u = { 194 .setkey = twofish_setkey_skcipher,
342 .blkcipher = { 195 .encrypt = ecb_encrypt,
343 .min_keysize = TF_MIN_KEY_SIZE, 196 .decrypt = ecb_decrypt,
344 .max_keysize = TF_MAX_KEY_SIZE, 197 }, {
345 .setkey = twofish_setkey, 198 .base.cra_name = "cbc(twofish)",
346 .encrypt = ecb_encrypt, 199 .base.cra_driver_name = "cbc-twofish-3way",
347 .decrypt = ecb_decrypt, 200 .base.cra_priority = 300,
348 }, 201 .base.cra_blocksize = TF_BLOCK_SIZE,
349 }, 202 .base.cra_ctxsize = sizeof(struct twofish_ctx),
350}, { 203 .base.cra_module = THIS_MODULE,
351 .cra_name = "cbc(twofish)", 204 .min_keysize = TF_MIN_KEY_SIZE,
352 .cra_driver_name = "cbc-twofish-3way", 205 .max_keysize = TF_MAX_KEY_SIZE,
353 .cra_priority = 300, 206 .ivsize = TF_BLOCK_SIZE,
354 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 207 .setkey = twofish_setkey_skcipher,
355 .cra_blocksize = TF_BLOCK_SIZE, 208 .encrypt = cbc_encrypt,
356 .cra_ctxsize = sizeof(struct twofish_ctx), 209 .decrypt = cbc_decrypt,
357 .cra_alignmask = 0, 210 }, {
358 .cra_type = &crypto_blkcipher_type, 211 .base.cra_name = "ctr(twofish)",
359 .cra_module = THIS_MODULE, 212 .base.cra_driver_name = "ctr-twofish-3way",
360 .cra_u = { 213 .base.cra_priority = 300,
361 .blkcipher = { 214 .base.cra_blocksize = 1,
362 .min_keysize = TF_MIN_KEY_SIZE, 215 .base.cra_ctxsize = sizeof(struct twofish_ctx),
363 .max_keysize = TF_MAX_KEY_SIZE, 216 .base.cra_module = THIS_MODULE,
364 .ivsize = TF_BLOCK_SIZE, 217 .min_keysize = TF_MIN_KEY_SIZE,
365 .setkey = twofish_setkey, 218 .max_keysize = TF_MAX_KEY_SIZE,
366 .encrypt = cbc_encrypt, 219 .ivsize = TF_BLOCK_SIZE,
367 .decrypt = cbc_decrypt, 220 .chunksize = TF_BLOCK_SIZE,
368 }, 221 .setkey = twofish_setkey_skcipher,
369 }, 222 .encrypt = ctr_crypt,
370}, { 223 .decrypt = ctr_crypt,
371 .cra_name = "ctr(twofish)",
372 .cra_driver_name = "ctr-twofish-3way",
373 .cra_priority = 300,
374 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
375 .cra_blocksize = 1,
376 .cra_ctxsize = sizeof(struct twofish_ctx),
377 .cra_alignmask = 0,
378 .cra_type = &crypto_blkcipher_type,
379 .cra_module = THIS_MODULE,
380 .cra_u = {
381 .blkcipher = {
382 .min_keysize = TF_MIN_KEY_SIZE,
383 .max_keysize = TF_MAX_KEY_SIZE,
384 .ivsize = TF_BLOCK_SIZE,
385 .setkey = twofish_setkey,
386 .encrypt = ctr_crypt,
387 .decrypt = ctr_crypt,
388 },
389 },
390}, {
391 .cra_name = "lrw(twofish)",
392 .cra_driver_name = "lrw-twofish-3way",
393 .cra_priority = 300,
394 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
395 .cra_blocksize = TF_BLOCK_SIZE,
396 .cra_ctxsize = sizeof(struct twofish_lrw_ctx),
397 .cra_alignmask = 0,
398 .cra_type = &crypto_blkcipher_type,
399 .cra_module = THIS_MODULE,
400 .cra_exit = lrw_twofish_exit_tfm,
401 .cra_u = {
402 .blkcipher = {
403 .min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
404 .max_keysize = TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
405 .ivsize = TF_BLOCK_SIZE,
406 .setkey = lrw_twofish_setkey,
407 .encrypt = lrw_encrypt,
408 .decrypt = lrw_decrypt,
409 },
410 },
411}, {
412 .cra_name = "xts(twofish)",
413 .cra_driver_name = "xts-twofish-3way",
414 .cra_priority = 300,
415 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
416 .cra_blocksize = TF_BLOCK_SIZE,
417 .cra_ctxsize = sizeof(struct twofish_xts_ctx),
418 .cra_alignmask = 0,
419 .cra_type = &crypto_blkcipher_type,
420 .cra_module = THIS_MODULE,
421 .cra_u = {
422 .blkcipher = {
423 .min_keysize = TF_MIN_KEY_SIZE * 2,
424 .max_keysize = TF_MAX_KEY_SIZE * 2,
425 .ivsize = TF_BLOCK_SIZE,
426 .setkey = xts_twofish_setkey,
427 .encrypt = xts_encrypt,
428 .decrypt = xts_decrypt,
429 },
430 }, 224 },
431} }; 225};
432 226
433static bool is_blacklisted_cpu(void) 227static bool is_blacklisted_cpu(void)
434{ 228{
@@ -478,12 +272,13 @@ static int __init init(void)
478 return -ENODEV; 272 return -ENODEV;
479 } 273 }
480 274
481 return crypto_register_algs(tf_algs, ARRAY_SIZE(tf_algs)); 275 return crypto_register_skciphers(tf_skciphers,
276 ARRAY_SIZE(tf_skciphers));
482} 277}
483 278
484static void __exit fini(void) 279static void __exit fini(void)
485{ 280{
486 crypto_unregister_algs(tf_algs, ARRAY_SIZE(tf_algs)); 281 crypto_unregister_skciphers(tf_skciphers, ARRAY_SIZE(tf_skciphers));
487} 282}
488 283
489module_init(init); 284module_init(init);
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
index 10f8d590bcfe..a5d86fc0593f 100644
--- a/arch/x86/include/asm/crypto/camellia.h
+++ b/arch/x86/include/asm/crypto/camellia.h
@@ -2,8 +2,9 @@
2#ifndef ASM_X86_CAMELLIA_H 2#ifndef ASM_X86_CAMELLIA_H
3#define ASM_X86_CAMELLIA_H 3#define ASM_X86_CAMELLIA_H
4 4
5#include <linux/kernel.h> 5#include <crypto/b128ops.h>
6#include <linux/crypto.h> 6#include <linux/crypto.h>
7#include <linux/kernel.h>
7 8
8#define CAMELLIA_MIN_KEY_SIZE 16 9#define CAMELLIA_MIN_KEY_SIZE 16
9#define CAMELLIA_MAX_KEY_SIZE 32 10#define CAMELLIA_MAX_KEY_SIZE 32
@@ -11,16 +12,13 @@
11#define CAMELLIA_TABLE_BYTE_LEN 272 12#define CAMELLIA_TABLE_BYTE_LEN 272
12#define CAMELLIA_PARALLEL_BLOCKS 2 13#define CAMELLIA_PARALLEL_BLOCKS 2
13 14
15struct crypto_skcipher;
16
14struct camellia_ctx { 17struct camellia_ctx {
15 u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)]; 18 u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
16 u32 key_length; 19 u32 key_length;
17}; 20};
18 21
19struct camellia_lrw_ctx {
20 struct lrw_table_ctx lrw_table;
21 struct camellia_ctx camellia_ctx;
22};
23
24struct camellia_xts_ctx { 22struct camellia_xts_ctx {
25 struct camellia_ctx tweak_ctx; 23 struct camellia_ctx tweak_ctx;
26 struct camellia_ctx crypt_ctx; 24 struct camellia_ctx crypt_ctx;
@@ -30,11 +28,7 @@ extern int __camellia_setkey(struct camellia_ctx *cctx,
30 const unsigned char *key, 28 const unsigned char *key,
31 unsigned int key_len, u32 *flags); 29 unsigned int key_len, u32 *flags);
32 30
33extern int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key, 31extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
34 unsigned int keylen);
35extern void lrw_camellia_exit_tfm(struct crypto_tfm *tfm);
36
37extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
38 unsigned int keylen); 32 unsigned int keylen);
39 33
40/* regular block cipher functions */ 34/* regular block cipher functions */
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
index 553a03de55c3..d1818634ae7e 100644
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -45,7 +45,7 @@ struct common_glue_ctx {
45}; 45};
46 46
47static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, 47static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
48 struct blkcipher_desc *desc, 48 struct skcipher_walk *walk,
49 bool fpu_enabled, unsigned int nbytes) 49 bool fpu_enabled, unsigned int nbytes)
50{ 50{
51 if (likely(fpu_blocks_limit < 0)) 51 if (likely(fpu_blocks_limit < 0))
@@ -61,33 +61,6 @@ static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
61 if (nbytes < bsize * (unsigned int)fpu_blocks_limit) 61 if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
62 return false; 62 return false;
63 63
64 if (desc) {
65 /* prevent sleeping if FPU is in use */
66 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
67 }
68
69 kernel_fpu_begin();
70 return true;
71}
72
73static inline bool glue_skwalk_fpu_begin(unsigned int bsize,
74 int fpu_blocks_limit,
75 struct skcipher_walk *walk,
76 bool fpu_enabled, unsigned int nbytes)
77{
78 if (likely(fpu_blocks_limit < 0))
79 return false;
80
81 if (fpu_enabled)
82 return true;
83
84 /*
85 * Vector-registers are only used when chunk to be processed is large
86 * enough, so do not enable FPU until it is necessary.
87 */
88 if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
89 return false;
90
91 /* prevent sleeping if FPU is in use */ 64 /* prevent sleeping if FPU is in use */
92 skcipher_walk_atomise(walk); 65 skcipher_walk_atomise(walk);
93 66
@@ -126,41 +99,17 @@ static inline void le128_inc(le128 *i)
126 i->b = cpu_to_le64(b); 99 i->b = cpu_to_le64(b);
127} 100}
128 101
129extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, 102extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
130 struct blkcipher_desc *desc, 103 struct skcipher_request *req);
131 struct scatterlist *dst, 104
132 struct scatterlist *src, unsigned int nbytes); 105extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
133 106 struct skcipher_request *req);
134extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn, 107
135 struct blkcipher_desc *desc, 108extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
136 struct scatterlist *dst, 109 struct skcipher_request *req);
137 struct scatterlist *src, 110
138 unsigned int nbytes); 111extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
139 112 struct skcipher_request *req);
140extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
141 struct blkcipher_desc *desc,
142 struct scatterlist *dst,
143 struct scatterlist *src,
144 unsigned int nbytes);
145
146extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
147 struct blkcipher_desc *desc,
148 struct scatterlist *dst,
149 struct scatterlist *src, unsigned int nbytes);
150
151extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
152 struct blkcipher_desc *desc,
153 struct scatterlist *dst,
154 struct scatterlist *src, unsigned int nbytes,
155 common_glue_func_t tweak_fn, void *tweak_ctx,
156 void *crypt_ctx);
157
158extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
159 struct blkcipher_desc *desc,
160 struct scatterlist *dst,
161 struct scatterlist *src, unsigned int nbytes,
162 common_glue_func_t tweak_fn, void *tweak_ctx,
163 void *crypt_ctx);
164 113
165extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx, 114extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
166 struct skcipher_request *req, 115 struct skcipher_request *req,
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
index c958b7bd0fcb..db7c9cc32234 100644
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ b/arch/x86/include/asm/crypto/serpent-avx.h
@@ -2,15 +2,13 @@
2#ifndef ASM_X86_SERPENT_AVX_H 2#ifndef ASM_X86_SERPENT_AVX_H
3#define ASM_X86_SERPENT_AVX_H 3#define ASM_X86_SERPENT_AVX_H
4 4
5#include <linux/crypto.h> 5#include <crypto/b128ops.h>
6#include <crypto/serpent.h> 6#include <crypto/serpent.h>
7#include <linux/types.h>
7 8
8#define SERPENT_PARALLEL_BLOCKS 8 9struct crypto_skcipher;
9 10
10struct serpent_lrw_ctx { 11#define SERPENT_PARALLEL_BLOCKS 8
11 struct lrw_table_ctx lrw_table;
12 struct serpent_ctx serpent_ctx;
13};
14 12
15struct serpent_xts_ctx { 13struct serpent_xts_ctx {
16 struct serpent_ctx tweak_ctx; 14 struct serpent_ctx tweak_ctx;
@@ -38,12 +36,7 @@ extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
38extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); 36extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
39extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); 37extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
40 38
41extern int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key, 39extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
42 unsigned int keylen);
43
44extern void lrw_serpent_exit_tfm(struct crypto_tfm *tfm);
45
46extern int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
47 unsigned int keylen); 40 unsigned int keylen);
48 41
49#endif 42#endif
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h
index 65bb80adba3e..f618bf272b90 100644
--- a/arch/x86/include/asm/crypto/twofish.h
+++ b/arch/x86/include/asm/crypto/twofish.h
@@ -4,19 +4,8 @@
4 4
5#include <linux/crypto.h> 5#include <linux/crypto.h>
6#include <crypto/twofish.h> 6#include <crypto/twofish.h>
7#include <crypto/lrw.h>
8#include <crypto/b128ops.h> 7#include <crypto/b128ops.h>
9 8
10struct twofish_lrw_ctx {
11 struct lrw_table_ctx lrw_table;
12 struct twofish_ctx twofish_ctx;
13};
14
15struct twofish_xts_ctx {
16 struct twofish_ctx tweak_ctx;
17 struct twofish_ctx crypt_ctx;
18};
19
20/* regular block cipher functions from twofish_x86_64 module */ 9/* regular block cipher functions from twofish_x86_64 module */
21asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, 10asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
22 const u8 *src); 11 const u8 *src);
@@ -36,12 +25,4 @@ extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
36extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, 25extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
37 le128 *iv); 26 le128 *iv);
38 27
39extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
40 unsigned int keylen);
41
42extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm);
43
44extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
45 unsigned int keylen);
46
47#endif /* ASM_X86_TWOFISH_H */ 28#endif /* ASM_X86_TWOFISH_H */
diff --git a/crypto/Kconfig b/crypto/Kconfig
index b75264b09a46..c0dabed5122e 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -245,10 +245,6 @@ config CRYPTO_TEST
245 help 245 help
246 Quick & dirty crypto test module. 246 Quick & dirty crypto test module.
247 247
248config CRYPTO_ABLK_HELPER
249 tristate
250 select CRYPTO_CRYPTD
251
252config CRYPTO_SIMD 248config CRYPTO_SIMD
253 tristate 249 tristate
254 select CRYPTO_CRYPTD 250 select CRYPTO_CRYPTD
@@ -324,6 +320,14 @@ config CRYPTO_CBC
324 CBC: Cipher Block Chaining mode 320 CBC: Cipher Block Chaining mode
325 This block cipher algorithm is required for IPSec. 321 This block cipher algorithm is required for IPSec.
326 322
323config CRYPTO_CFB
324 tristate "CFB support"
325 select CRYPTO_BLKCIPHER
326 select CRYPTO_MANAGER
327 help
328 CFB: Cipher FeedBack mode
329 This block cipher algorithm is required for TPM2 Cryptography.
330
327config CRYPTO_CTR 331config CRYPTO_CTR
328 tristate "CTR support" 332 tristate "CTR support"
329 select CRYPTO_BLKCIPHER 333 select CRYPTO_BLKCIPHER
@@ -1114,7 +1118,7 @@ config CRYPTO_BLOWFISH_COMMON
1114config CRYPTO_BLOWFISH_X86_64 1118config CRYPTO_BLOWFISH_X86_64
1115 tristate "Blowfish cipher algorithm (x86_64)" 1119 tristate "Blowfish cipher algorithm (x86_64)"
1116 depends on X86 && 64BIT 1120 depends on X86 && 64BIT
1117 select CRYPTO_ALGAPI 1121 select CRYPTO_BLKCIPHER
1118 select CRYPTO_BLOWFISH_COMMON 1122 select CRYPTO_BLOWFISH_COMMON
1119 help 1123 help
1120 Blowfish cipher algorithm (x86_64), by Bruce Schneier. 1124 Blowfish cipher algorithm (x86_64), by Bruce Schneier.
@@ -1145,10 +1149,8 @@ config CRYPTO_CAMELLIA_X86_64
1145 tristate "Camellia cipher algorithm (x86_64)" 1149 tristate "Camellia cipher algorithm (x86_64)"
1146 depends on X86 && 64BIT 1150 depends on X86 && 64BIT
1147 depends on CRYPTO 1151 depends on CRYPTO
1148 select CRYPTO_ALGAPI 1152 select CRYPTO_BLKCIPHER
1149 select CRYPTO_GLUE_HELPER_X86 1153 select CRYPTO_GLUE_HELPER_X86
1150 select CRYPTO_LRW
1151 select CRYPTO_XTS
1152 help 1154 help
1153 Camellia cipher algorithm module (x86_64). 1155 Camellia cipher algorithm module (x86_64).
1154 1156
@@ -1164,12 +1166,10 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
1164 tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)" 1166 tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
1165 depends on X86 && 64BIT 1167 depends on X86 && 64BIT
1166 depends on CRYPTO 1168 depends on CRYPTO
1167 select CRYPTO_ALGAPI 1169 select CRYPTO_BLKCIPHER
1168 select CRYPTO_CRYPTD
1169 select CRYPTO_ABLK_HELPER
1170 select CRYPTO_GLUE_HELPER_X86
1171 select CRYPTO_CAMELLIA_X86_64 1170 select CRYPTO_CAMELLIA_X86_64
1172 select CRYPTO_LRW 1171 select CRYPTO_GLUE_HELPER_X86
1172 select CRYPTO_SIMD
1173 select CRYPTO_XTS 1173 select CRYPTO_XTS
1174 help 1174 help
1175 Camellia cipher algorithm module (x86_64/AES-NI/AVX). 1175 Camellia cipher algorithm module (x86_64/AES-NI/AVX).
@@ -1186,14 +1186,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
1186 tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)" 1186 tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)"
1187 depends on X86 && 64BIT 1187 depends on X86 && 64BIT
1188 depends on CRYPTO 1188 depends on CRYPTO
1189 select CRYPTO_ALGAPI
1190 select CRYPTO_CRYPTD
1191 select CRYPTO_ABLK_HELPER
1192 select CRYPTO_GLUE_HELPER_X86
1193 select CRYPTO_CAMELLIA_X86_64
1194 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 1189 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
1195 select CRYPTO_LRW
1196 select CRYPTO_XTS
1197 help 1190 help
1198 Camellia cipher algorithm module (x86_64/AES-NI/AVX2). 1191 Camellia cipher algorithm module (x86_64/AES-NI/AVX2).
1199 1192
@@ -1238,11 +1231,10 @@ config CRYPTO_CAST5
1238config CRYPTO_CAST5_AVX_X86_64 1231config CRYPTO_CAST5_AVX_X86_64
1239 tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)" 1232 tristate "CAST5 (CAST-128) cipher algorithm (x86_64/AVX)"
1240 depends on X86 && 64BIT 1233 depends on X86 && 64BIT
1241 select CRYPTO_ALGAPI 1234 select CRYPTO_BLKCIPHER
1242 select CRYPTO_CRYPTD
1243 select CRYPTO_ABLK_HELPER
1244 select CRYPTO_CAST_COMMON
1245 select CRYPTO_CAST5 1235 select CRYPTO_CAST5
1236 select CRYPTO_CAST_COMMON
1237 select CRYPTO_SIMD
1246 help 1238 help
1247 The CAST5 encryption algorithm (synonymous with CAST-128) is 1239 The CAST5 encryption algorithm (synonymous with CAST-128) is
1248 described in RFC2144. 1240 described in RFC2144.
@@ -1261,13 +1253,11 @@ config CRYPTO_CAST6
1261config CRYPTO_CAST6_AVX_X86_64 1253config CRYPTO_CAST6_AVX_X86_64
1262 tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)" 1254 tristate "CAST6 (CAST-256) cipher algorithm (x86_64/AVX)"
1263 depends on X86 && 64BIT 1255 depends on X86 && 64BIT
1264 select CRYPTO_ALGAPI 1256 select CRYPTO_BLKCIPHER
1265 select CRYPTO_CRYPTD
1266 select CRYPTO_ABLK_HELPER
1267 select CRYPTO_GLUE_HELPER_X86
1268 select CRYPTO_CAST_COMMON
1269 select CRYPTO_CAST6 1257 select CRYPTO_CAST6
1270 select CRYPTO_LRW 1258 select CRYPTO_CAST_COMMON
1259 select CRYPTO_GLUE_HELPER_X86
1260 select CRYPTO_SIMD
1271 select CRYPTO_XTS 1261 select CRYPTO_XTS
1272 help 1262 help
1273 The CAST6 encryption algorithm (synonymous with CAST-256) is 1263 The CAST6 encryption algorithm (synonymous with CAST-256) is
@@ -1294,7 +1284,7 @@ config CRYPTO_DES_SPARC64
1294config CRYPTO_DES3_EDE_X86_64 1284config CRYPTO_DES3_EDE_X86_64
1295 tristate "Triple DES EDE cipher algorithm (x86-64)" 1285 tristate "Triple DES EDE cipher algorithm (x86-64)"
1296 depends on X86 && 64BIT 1286 depends on X86 && 64BIT
1297 select CRYPTO_ALGAPI 1287 select CRYPTO_BLKCIPHER
1298 select CRYPTO_DES 1288 select CRYPTO_DES
1299 help 1289 help
1300 Triple DES EDE (FIPS 46-3) algorithm. 1290 Triple DES EDE (FIPS 46-3) algorithm.
@@ -1422,13 +1412,10 @@ config CRYPTO_SERPENT
1422config CRYPTO_SERPENT_SSE2_X86_64 1412config CRYPTO_SERPENT_SSE2_X86_64
1423 tristate "Serpent cipher algorithm (x86_64/SSE2)" 1413 tristate "Serpent cipher algorithm (x86_64/SSE2)"
1424 depends on X86 && 64BIT 1414 depends on X86 && 64BIT
1425 select CRYPTO_ALGAPI 1415 select CRYPTO_BLKCIPHER
1426 select CRYPTO_CRYPTD
1427 select CRYPTO_ABLK_HELPER
1428 select CRYPTO_GLUE_HELPER_X86 1416 select CRYPTO_GLUE_HELPER_X86
1429 select CRYPTO_SERPENT 1417 select CRYPTO_SERPENT
1430 select CRYPTO_LRW 1418 select CRYPTO_SIMD
1431 select CRYPTO_XTS
1432 help 1419 help
1433 Serpent cipher algorithm, by Anderson, Biham & Knudsen. 1420 Serpent cipher algorithm, by Anderson, Biham & Knudsen.
1434 1421
@@ -1444,13 +1431,10 @@ config CRYPTO_SERPENT_SSE2_X86_64
1444config CRYPTO_SERPENT_SSE2_586 1431config CRYPTO_SERPENT_SSE2_586
1445 tristate "Serpent cipher algorithm (i586/SSE2)" 1432 tristate "Serpent cipher algorithm (i586/SSE2)"
1446 depends on X86 && !64BIT 1433 depends on X86 && !64BIT
1447 select CRYPTO_ALGAPI 1434 select CRYPTO_BLKCIPHER
1448 select CRYPTO_CRYPTD
1449 select CRYPTO_ABLK_HELPER
1450 select CRYPTO_GLUE_HELPER_X86 1435 select CRYPTO_GLUE_HELPER_X86
1451 select CRYPTO_SERPENT 1436 select CRYPTO_SERPENT
1452 select CRYPTO_LRW 1437 select CRYPTO_SIMD
1453 select CRYPTO_XTS
1454 help 1438 help
1455 Serpent cipher algorithm, by Anderson, Biham & Knudsen. 1439 Serpent cipher algorithm, by Anderson, Biham & Knudsen.
1456 1440
@@ -1466,12 +1450,10 @@ config CRYPTO_SERPENT_SSE2_586
1466config CRYPTO_SERPENT_AVX_X86_64 1450config CRYPTO_SERPENT_AVX_X86_64
1467 tristate "Serpent cipher algorithm (x86_64/AVX)" 1451 tristate "Serpent cipher algorithm (x86_64/AVX)"
1468 depends on X86 && 64BIT 1452 depends on X86 && 64BIT
1469 select CRYPTO_ALGAPI 1453 select CRYPTO_BLKCIPHER
1470 select CRYPTO_CRYPTD
1471 select CRYPTO_ABLK_HELPER
1472 select CRYPTO_GLUE_HELPER_X86 1454 select CRYPTO_GLUE_HELPER_X86
1473 select CRYPTO_SERPENT 1455 select CRYPTO_SERPENT
1474 select CRYPTO_LRW 1456 select CRYPTO_SIMD
1475 select CRYPTO_XTS 1457 select CRYPTO_XTS
1476 help 1458 help
1477 Serpent cipher algorithm, by Anderson, Biham & Knudsen. 1459 Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -1488,14 +1470,7 @@ config CRYPTO_SERPENT_AVX_X86_64
1488config CRYPTO_SERPENT_AVX2_X86_64 1470config CRYPTO_SERPENT_AVX2_X86_64
1489 tristate "Serpent cipher algorithm (x86_64/AVX2)" 1471 tristate "Serpent cipher algorithm (x86_64/AVX2)"
1490 depends on X86 && 64BIT 1472 depends on X86 && 64BIT
1491 select CRYPTO_ALGAPI
1492 select CRYPTO_CRYPTD
1493 select CRYPTO_ABLK_HELPER
1494 select CRYPTO_GLUE_HELPER_X86
1495 select CRYPTO_SERPENT
1496 select CRYPTO_SERPENT_AVX_X86_64 1473 select CRYPTO_SERPENT_AVX_X86_64
1497 select CRYPTO_LRW
1498 select CRYPTO_XTS
1499 help 1474 help
1500 Serpent cipher algorithm, by Anderson, Biham & Knudsen. 1475 Serpent cipher algorithm, by Anderson, Biham & Knudsen.
1501 1476
@@ -1508,6 +1483,45 @@ config CRYPTO_SERPENT_AVX2_X86_64
1508 See also: 1483 See also:
1509 <http://www.cl.cam.ac.uk/~rja14/serpent.html> 1484 <http://www.cl.cam.ac.uk/~rja14/serpent.html>
1510 1485
1486config CRYPTO_SM4
1487 tristate "SM4 cipher algorithm"
1488 select CRYPTO_ALGAPI
1489 help
1490 SM4 cipher algorithms (OSCCA GB/T 32907-2016).
1491
1492 SM4 (GBT.32907-2016) is a cryptographic standard issued by the
1493 Organization of State Commercial Administration of China (OSCCA)
1494 as an authorized cryptographic algorithms for the use within China.
1495
1496 SMS4 was originally created for use in protecting wireless
1497 networks, and is mandated in the Chinese National Standard for
1498 Wireless LAN WAPI (Wired Authentication and Privacy Infrastructure)
1499 (GB.15629.11-2003).
1500
1501 The latest SM4 standard (GBT.32907-2016) was proposed by OSCCA and
1502 standardized through TC 260 of the Standardization Administration
1503 of the People's Republic of China (SAC).
1504
1505 The input, output, and key of SMS4 are each 128 bits.
1506
1507 See also: <https://eprint.iacr.org/2008/329.pdf>
1508
1509 If unsure, say N.
1510
1511config CRYPTO_SPECK
1512 tristate "Speck cipher algorithm"
1513 select CRYPTO_ALGAPI
1514 help
1515 Speck is a lightweight block cipher that is tuned for optimal
1516 performance in software (rather than hardware).
1517
1518 Speck may not be as secure as AES, and should only be used on systems
1519 where AES is not fast enough.
1520
1521 See also: <https://eprint.iacr.org/2013/404.pdf>
1522
1523 If unsure, say N.
1524
1511config CRYPTO_TEA 1525config CRYPTO_TEA
1512 tristate "TEA, XTEA and XETA cipher algorithms" 1526 tristate "TEA, XTEA and XETA cipher algorithms"
1513 select CRYPTO_ALGAPI 1527 select CRYPTO_ALGAPI
@@ -1581,12 +1595,10 @@ config CRYPTO_TWOFISH_X86_64
1581config CRYPTO_TWOFISH_X86_64_3WAY 1595config CRYPTO_TWOFISH_X86_64_3WAY
1582 tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" 1596 tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
1583 depends on X86 && 64BIT 1597 depends on X86 && 64BIT
1584 select CRYPTO_ALGAPI 1598 select CRYPTO_BLKCIPHER
1585 select CRYPTO_TWOFISH_COMMON 1599 select CRYPTO_TWOFISH_COMMON
1586 select CRYPTO_TWOFISH_X86_64 1600 select CRYPTO_TWOFISH_X86_64
1587 select CRYPTO_GLUE_HELPER_X86 1601 select CRYPTO_GLUE_HELPER_X86
1588 select CRYPTO_LRW
1589 select CRYPTO_XTS
1590 help 1602 help
1591 Twofish cipher algorithm (x86_64, 3-way parallel). 1603 Twofish cipher algorithm (x86_64, 3-way parallel).
1592 1604
@@ -1604,15 +1616,12 @@ config CRYPTO_TWOFISH_X86_64_3WAY
1604config CRYPTO_TWOFISH_AVX_X86_64 1616config CRYPTO_TWOFISH_AVX_X86_64
1605 tristate "Twofish cipher algorithm (x86_64/AVX)" 1617 tristate "Twofish cipher algorithm (x86_64/AVX)"
1606 depends on X86 && 64BIT 1618 depends on X86 && 64BIT
1607 select CRYPTO_ALGAPI 1619 select CRYPTO_BLKCIPHER
1608 select CRYPTO_CRYPTD
1609 select CRYPTO_ABLK_HELPER
1610 select CRYPTO_GLUE_HELPER_X86 1620 select CRYPTO_GLUE_HELPER_X86
1621 select CRYPTO_SIMD
1611 select CRYPTO_TWOFISH_COMMON 1622 select CRYPTO_TWOFISH_COMMON
1612 select CRYPTO_TWOFISH_X86_64 1623 select CRYPTO_TWOFISH_X86_64
1613 select CRYPTO_TWOFISH_X86_64_3WAY 1624 select CRYPTO_TWOFISH_X86_64_3WAY
1614 select CRYPTO_LRW
1615 select CRYPTO_XTS
1616 help 1625 help
1617 Twofish cipher algorithm (x86_64/AVX). 1626 Twofish cipher algorithm (x86_64/AVX).
1618 1627
diff --git a/crypto/Makefile b/crypto/Makefile
index cdbc03b35510..4fc69fe94e6a 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
78obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o 78obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
79obj-$(CONFIG_CRYPTO_ECB) += ecb.o 79obj-$(CONFIG_CRYPTO_ECB) += ecb.o
80obj-$(CONFIG_CRYPTO_CBC) += cbc.o 80obj-$(CONFIG_CRYPTO_CBC) += cbc.o
81obj-$(CONFIG_CRYPTO_CFB) += cfb.o
81obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o 82obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
82obj-$(CONFIG_CRYPTO_CTS) += cts.o 83obj-$(CONFIG_CRYPTO_CTS) += cts.o
83obj-$(CONFIG_CRYPTO_LRW) += lrw.o 84obj-$(CONFIG_CRYPTO_LRW) += lrw.o
@@ -100,6 +101,7 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
100CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 101CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
101obj-$(CONFIG_CRYPTO_AES) += aes_generic.o 102obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
102CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 103CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
104obj-$(CONFIG_CRYPTO_SM4) += sm4_generic.o
103obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o 105obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
104obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o 106obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
105obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o 107obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
@@ -110,6 +112,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
110obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o 112obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
111obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o 113obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
112obj-$(CONFIG_CRYPTO_SEED) += seed.o 114obj-$(CONFIG_CRYPTO_SEED) += seed.o
115obj-$(CONFIG_CRYPTO_SPECK) += speck.o
113obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o 116obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
114obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o 117obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
115obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o 118obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
@@ -149,6 +152,5 @@ obj-$(CONFIG_XOR_BLOCKS) += xor.o
149obj-$(CONFIG_ASYNC_CORE) += async_tx/ 152obj-$(CONFIG_ASYNC_CORE) += async_tx/
150obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ 153obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
151obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o 154obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
152obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
153crypto_simd-y := simd.o 155crypto_simd-y := simd.o
154obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o 156obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
deleted file mode 100644
index 09776bb1360e..000000000000
--- a/crypto/ablk_helper.c
+++ /dev/null
@@ -1,150 +0,0 @@
1/*
2 * Shared async block cipher helpers
3 *
4 * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
5 *
6 * Based on aesni-intel_glue.c by:
7 * Copyright (C) 2008, Intel Corp.
8 * Author: Huang Ying <ying.huang@intel.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/crypto.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <crypto/algapi.h>
30#include <crypto/cryptd.h>
31#include <crypto/ablk_helper.h>
32#include <asm/simd.h>
33
34int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
35 unsigned int key_len)
36{
37 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
38 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
39 int err;
40
41 crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
42 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
43 & CRYPTO_TFM_REQ_MASK);
44 err = crypto_ablkcipher_setkey(child, key, key_len);
45 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
46 & CRYPTO_TFM_RES_MASK);
47 return err;
48}
49EXPORT_SYMBOL_GPL(ablk_set_key);
50
51int __ablk_encrypt(struct ablkcipher_request *req)
52{
53 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
54 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
55 struct blkcipher_desc desc;
56
57 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
58 desc.info = req->info;
59 desc.flags = 0;
60
61 return crypto_blkcipher_crt(desc.tfm)->encrypt(
62 &desc, req->dst, req->src, req->nbytes);
63}
64EXPORT_SYMBOL_GPL(__ablk_encrypt);
65
66int ablk_encrypt(struct ablkcipher_request *req)
67{
68 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
69 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
70
71 if (!may_use_simd() ||
72 (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
73 struct ablkcipher_request *cryptd_req =
74 ablkcipher_request_ctx(req);
75
76 *cryptd_req = *req;
77 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
78
79 return crypto_ablkcipher_encrypt(cryptd_req);
80 } else {
81 return __ablk_encrypt(req);
82 }
83}
84EXPORT_SYMBOL_GPL(ablk_encrypt);
85
86int ablk_decrypt(struct ablkcipher_request *req)
87{
88 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
89 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
90
91 if (!may_use_simd() ||
92 (in_atomic() && cryptd_ablkcipher_queued(ctx->cryptd_tfm))) {
93 struct ablkcipher_request *cryptd_req =
94 ablkcipher_request_ctx(req);
95
96 *cryptd_req = *req;
97 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
98
99 return crypto_ablkcipher_decrypt(cryptd_req);
100 } else {
101 struct blkcipher_desc desc;
102
103 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
104 desc.info = req->info;
105 desc.flags = 0;
106
107 return crypto_blkcipher_crt(desc.tfm)->decrypt(
108 &desc, req->dst, req->src, req->nbytes);
109 }
110}
111EXPORT_SYMBOL_GPL(ablk_decrypt);
112
113void ablk_exit(struct crypto_tfm *tfm)
114{
115 struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
116
117 cryptd_free_ablkcipher(ctx->cryptd_tfm);
118}
119EXPORT_SYMBOL_GPL(ablk_exit);
120
121int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
122{
123 struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
124 struct cryptd_ablkcipher *cryptd_tfm;
125
126 cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, CRYPTO_ALG_INTERNAL,
127 CRYPTO_ALG_INTERNAL);
128 if (IS_ERR(cryptd_tfm))
129 return PTR_ERR(cryptd_tfm);
130
131 ctx->cryptd_tfm = cryptd_tfm;
132 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
133 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
134
135 return 0;
136}
137EXPORT_SYMBOL_GPL(ablk_init_common);
138
139int ablk_init(struct crypto_tfm *tfm)
140{
141 char drv_name[CRYPTO_MAX_ALG_NAME];
142
143 snprintf(drv_name, sizeof(drv_name), "__driver-%s",
144 crypto_tfm_alg_driver_name(tfm));
145
146 return ablk_init_common(tfm, drv_name);
147}
148EXPORT_SYMBOL_GPL(ablk_init);
149
150MODULE_LICENSE("GPL");
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 266fc1d64f61..a64c143165b1 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -92,13 +92,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
92 92
93 if (nbytes && walk->offset & alignmask && !err) { 93 if (nbytes && walk->offset & alignmask && !err) {
94 walk->offset = ALIGN(walk->offset, alignmask + 1); 94 walk->offset = ALIGN(walk->offset, alignmask + 1);
95 walk->data += walk->offset;
96
97 nbytes = min(nbytes, 95 nbytes = min(nbytes,
98 ((unsigned int)(PAGE_SIZE)) - walk->offset); 96 ((unsigned int)(PAGE_SIZE)) - walk->offset);
99 walk->entrylen -= nbytes; 97 walk->entrylen -= nbytes;
100 98
101 return nbytes; 99 if (nbytes) {
100 walk->data += walk->offset;
101 return nbytes;
102 }
102 } 103 }
103 104
104 if (walk->flags & CRYPTO_ALG_ASYNC) 105 if (walk->flags & CRYPTO_ALG_ASYNC)
@@ -446,24 +447,12 @@ static int ahash_def_finup(struct ahash_request *req)
446 return ahash_def_finup_finish1(req, err); 447 return ahash_def_finup_finish1(req, err);
447} 448}
448 449
449static int ahash_no_export(struct ahash_request *req, void *out)
450{
451 return -ENOSYS;
452}
453
454static int ahash_no_import(struct ahash_request *req, const void *in)
455{
456 return -ENOSYS;
457}
458
459static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) 450static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
460{ 451{
461 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 452 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
462 struct ahash_alg *alg = crypto_ahash_alg(hash); 453 struct ahash_alg *alg = crypto_ahash_alg(hash);
463 454
464 hash->setkey = ahash_nosetkey; 455 hash->setkey = ahash_nosetkey;
465 hash->export = ahash_no_export;
466 hash->import = ahash_no_import;
467 456
468 if (tfm->__crt_alg->cra_type != &crypto_ahash_type) 457 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
469 return crypto_init_shash_ops_async(tfm); 458 return crypto_init_shash_ops_async(tfm);
@@ -473,16 +462,14 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
473 hash->final = alg->final; 462 hash->final = alg->final;
474 hash->finup = alg->finup ?: ahash_def_finup; 463 hash->finup = alg->finup ?: ahash_def_finup;
475 hash->digest = alg->digest; 464 hash->digest = alg->digest;
465 hash->export = alg->export;
466 hash->import = alg->import;
476 467
477 if (alg->setkey) { 468 if (alg->setkey) {
478 hash->setkey = alg->setkey; 469 hash->setkey = alg->setkey;
479 if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) 470 if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
480 crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); 471 crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
481 } 472 }
482 if (alg->export)
483 hash->export = alg->export;
484 if (alg->import)
485 hash->import = alg->import;
486 473
487 return 0; 474 return 0;
488} 475}
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 395b082d03a9..2a0271b5f62a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -543,9 +543,6 @@ int crypto_register_instance(struct crypto_template *tmpl,
543 inst->alg.cra_module = tmpl->module; 543 inst->alg.cra_module = tmpl->module;
544 inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE; 544 inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
545 545
546 if (unlikely(!crypto_mod_get(&inst->alg)))
547 return -EAGAIN;
548
549 down_write(&crypto_alg_sem); 546 down_write(&crypto_alg_sem);
550 547
551 larval = __crypto_register_alg(&inst->alg); 548 larval = __crypto_register_alg(&inst->alg);
@@ -563,14 +560,9 @@ unlock:
563 goto err; 560 goto err;
564 561
565 crypto_wait_for_test(larval); 562 crypto_wait_for_test(larval);
566
567 /* Remove instance if test failed */
568 if (!(inst->alg.cra_flags & CRYPTO_ALG_TESTED))
569 crypto_unregister_instance(inst);
570 err = 0; 563 err = 0;
571 564
572err: 565err:
573 crypto_mod_put(&inst->alg);
574 return err; 566 return err;
575} 567}
576EXPORT_SYMBOL_GPL(crypto_register_instance); 568EXPORT_SYMBOL_GPL(crypto_register_instance);
diff --git a/crypto/api.c b/crypto/api.c
index 70a894e52ff3..1d5290c67108 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -193,17 +193,24 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
193 return alg; 193 return alg;
194} 194}
195 195
196struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask) 196static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
197 u32 mask)
197{ 198{
198 struct crypto_alg *alg; 199 struct crypto_alg *alg;
200 u32 test = 0;
201
202 if (!((type | mask) & CRYPTO_ALG_TESTED))
203 test |= CRYPTO_ALG_TESTED;
199 204
200 down_read(&crypto_alg_sem); 205 down_read(&crypto_alg_sem);
201 alg = __crypto_alg_lookup(name, type, mask); 206 alg = __crypto_alg_lookup(name, type | test, mask | test);
207 if (!alg && test)
208 alg = __crypto_alg_lookup(name, type, mask) ?
209 ERR_PTR(-ELIBBAD) : NULL;
202 up_read(&crypto_alg_sem); 210 up_read(&crypto_alg_sem);
203 211
204 return alg; 212 return alg;
205} 213}
206EXPORT_SYMBOL_GPL(crypto_alg_lookup);
207 214
208static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, 215static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
209 u32 mask) 216 u32 mask)
@@ -227,10 +234,12 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
227 alg = crypto_alg_lookup(name, type, mask); 234 alg = crypto_alg_lookup(name, type, mask);
228 } 235 }
229 236
230 if (alg) 237 if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
231 return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; 238 alg = crypto_larval_wait(alg);
239 else if (!alg)
240 alg = crypto_larval_add(name, type, mask);
232 241
233 return crypto_larval_add(name, type, mask); 242 return alg;
234} 243}
235 244
236int crypto_probing_notify(unsigned long val, void *v) 245int crypto_probing_notify(unsigned long val, void *v)
@@ -253,11 +262,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
253 struct crypto_alg *larval; 262 struct crypto_alg *larval;
254 int ok; 263 int ok;
255 264
256 if (!((type | mask) & CRYPTO_ALG_TESTED)) {
257 type |= CRYPTO_ALG_TESTED;
258 mask |= CRYPTO_ALG_TESTED;
259 }
260
261 /* 265 /*
262 * If the internal flag is set for a cipher, require a caller to 266 * If the internal flag is set for a cipher, require a caller to
263 * to invoke the cipher with the internal flag to use that cipher. 267 * to invoke the cipher with the internal flag to use that cipher.
@@ -485,20 +489,14 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
485 const struct crypto_type *frontend, 489 const struct crypto_type *frontend,
486 u32 type, u32 mask) 490 u32 type, u32 mask)
487{ 491{
488 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
489 crypto_alg_mod_lookup;
490
491 if (frontend) { 492 if (frontend) {
492 type &= frontend->maskclear; 493 type &= frontend->maskclear;
493 mask &= frontend->maskclear; 494 mask &= frontend->maskclear;
494 type |= frontend->type; 495 type |= frontend->type;
495 mask |= frontend->maskset; 496 mask |= frontend->maskset;
496
497 if (frontend->lookup)
498 lookup = frontend->lookup;
499 } 497 }
500 498
501 return lookup(alg_name, type, mask); 499 return crypto_alg_mod_lookup(alg_name, type, mask);
502} 500}
503EXPORT_SYMBOL_GPL(crypto_find_alg); 501EXPORT_SYMBOL_GPL(crypto_find_alg);
504 502
diff --git a/crypto/cfb.c b/crypto/cfb.c
new file mode 100644
index 000000000000..94ee39bed758
--- /dev/null
+++ b/crypto/cfb.c
@@ -0,0 +1,353 @@
1//SPDX-License-Identifier: GPL-2.0
2/*
3 * CFB: Cipher FeedBack mode
4 *
5 * Copyright (c) 2018 James.Bottomley@HansenPartnership.com
6 *
7 * CFB is a stream cipher mode which is layered on to a block
8 * encryption scheme. It works very much like a one time pad where
9 * the pad is generated initially from the encrypted IV and then
10 * subsequently from the encrypted previous block of ciphertext. The
11 * pad is XOR'd into the plain text to get the final ciphertext.
12 *
13 * The scheme of CFB is best described by wikipedia:
14 *
15 * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
16 *
17 * Note that since the pad for both encryption and decryption is
18 * generated by an encryption operation, CFB never uses the block
19 * decryption function.
20 */
21
22#include <crypto/algapi.h>
23#include <crypto/internal/skcipher.h>
24#include <linux/err.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/types.h>
31
32struct crypto_cfb_ctx {
33 struct crypto_cipher *child;
34};
35
36static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
37{
38 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
39 struct crypto_cipher *child = ctx->child;
40
41 return crypto_cipher_blocksize(child);
42}
43
44static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
45 const u8 *src, u8 *dst)
46{
47 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
48
49 crypto_cipher_encrypt_one(ctx->child, dst, src);
50}
51
52/* final encrypt and decrypt is the same */
53static void crypto_cfb_final(struct skcipher_walk *walk,
54 struct crypto_skcipher *tfm)
55{
56 const unsigned int bsize = crypto_cfb_bsize(tfm);
57 const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
58 u8 tmp[bsize + alignmask];
59 u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
60 u8 *src = walk->src.virt.addr;
61 u8 *dst = walk->dst.virt.addr;
62 u8 *iv = walk->iv;
63 unsigned int nbytes = walk->nbytes;
64
65 crypto_cfb_encrypt_one(tfm, iv, stream);
66 crypto_xor_cpy(dst, stream, src, nbytes);
67}
68
69static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
70 struct crypto_skcipher *tfm)
71{
72 const unsigned int bsize = crypto_cfb_bsize(tfm);
73 unsigned int nbytes = walk->nbytes;
74 u8 *src = walk->src.virt.addr;
75 u8 *dst = walk->dst.virt.addr;
76 u8 *iv = walk->iv;
77
78 do {
79 crypto_cfb_encrypt_one(tfm, iv, dst);
80 crypto_xor(dst, src, bsize);
81 memcpy(iv, dst, bsize);
82
83 src += bsize;
84 dst += bsize;
85 } while ((nbytes -= bsize) >= bsize);
86
87 return nbytes;
88}
89
90static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
91 struct crypto_skcipher *tfm)
92{
93 const unsigned int bsize = crypto_cfb_bsize(tfm);
94 unsigned int nbytes = walk->nbytes;
95 u8 *src = walk->src.virt.addr;
96 u8 *iv = walk->iv;
97 u8 tmp[bsize];
98
99 do {
100 crypto_cfb_encrypt_one(tfm, iv, tmp);
101 crypto_xor(src, tmp, bsize);
102 iv = src;
103
104 src += bsize;
105 } while ((nbytes -= bsize) >= bsize);
106
107 memcpy(walk->iv, iv, bsize);
108
109 return nbytes;
110}
111
112static int crypto_cfb_encrypt(struct skcipher_request *req)
113{
114 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
115 struct skcipher_walk walk;
116 unsigned int bsize = crypto_cfb_bsize(tfm);
117 int err;
118
119 err = skcipher_walk_virt(&walk, req, false);
120
121 while (walk.nbytes >= bsize) {
122 if (walk.src.virt.addr == walk.dst.virt.addr)
123 err = crypto_cfb_encrypt_inplace(&walk, tfm);
124 else
125 err = crypto_cfb_encrypt_segment(&walk, tfm);
126 err = skcipher_walk_done(&walk, err);
127 }
128
129 if (walk.nbytes) {
130 crypto_cfb_final(&walk, tfm);
131 err = skcipher_walk_done(&walk, 0);
132 }
133
134 return err;
135}
136
137static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
138 struct crypto_skcipher *tfm)
139{
140 const unsigned int bsize = crypto_cfb_bsize(tfm);
141 unsigned int nbytes = walk->nbytes;
142 u8 *src = walk->src.virt.addr;
143 u8 *dst = walk->dst.virt.addr;
144 u8 *iv = walk->iv;
145
146 do {
147 crypto_cfb_encrypt_one(tfm, iv, dst);
148 crypto_xor(dst, iv, bsize);
149 iv = src;
150
151 src += bsize;
152 dst += bsize;
153 } while ((nbytes -= bsize) >= bsize);
154
155 memcpy(walk->iv, iv, bsize);
156
157 return nbytes;
158}
159
160static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
161 struct crypto_skcipher *tfm)
162{
163 const unsigned int bsize = crypto_cfb_bsize(tfm);
164 unsigned int nbytes = walk->nbytes;
165 u8 *src = walk->src.virt.addr;
166 u8 *iv = walk->iv;
167 u8 tmp[bsize];
168
169 do {
170 crypto_cfb_encrypt_one(tfm, iv, tmp);
171 memcpy(iv, src, bsize);
172 crypto_xor(src, tmp, bsize);
173 src += bsize;
174 } while ((nbytes -= bsize) >= bsize);
175
176 memcpy(walk->iv, iv, bsize);
177
178 return nbytes;
179}
180
181static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
182 struct crypto_skcipher *tfm)
183{
184 if (walk->src.virt.addr == walk->dst.virt.addr)
185 return crypto_cfb_decrypt_inplace(walk, tfm);
186 else
187 return crypto_cfb_decrypt_segment(walk, tfm);
188}
189
190static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key,
191 unsigned int keylen)
192{
193 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent);
194 struct crypto_cipher *child = ctx->child;
195 int err;
196
197 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
198 crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
199 CRYPTO_TFM_REQ_MASK);
200 err = crypto_cipher_setkey(child, key, keylen);
201 crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
202 CRYPTO_TFM_RES_MASK);
203 return err;
204}
205
206static int crypto_cfb_decrypt(struct skcipher_request *req)
207{
208 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
209 struct skcipher_walk walk;
210 const unsigned int bsize = crypto_cfb_bsize(tfm);
211 int err;
212
213 err = skcipher_walk_virt(&walk, req, false);
214
215 while (walk.nbytes >= bsize) {
216 err = crypto_cfb_decrypt_blocks(&walk, tfm);
217 err = skcipher_walk_done(&walk, err);
218 }
219
220 if (walk.nbytes) {
221 crypto_cfb_final(&walk, tfm);
222 err = skcipher_walk_done(&walk, 0);
223 }
224
225 return err;
226}
227
228static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm)
229{
230 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
231 struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
232 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
233 struct crypto_cipher *cipher;
234
235 cipher = crypto_spawn_cipher(spawn);
236 if (IS_ERR(cipher))
237 return PTR_ERR(cipher);
238
239 ctx->child = cipher;
240 return 0;
241}
242
243static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm)
244{
245 struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm);
246
247 crypto_free_cipher(ctx->child);
248}
249
250static void crypto_cfb_free(struct skcipher_instance *inst)
251{
252 crypto_drop_skcipher(skcipher_instance_ctx(inst));
253 kfree(inst);
254}
255
256static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
257{
258 struct skcipher_instance *inst;
259 struct crypto_attr_type *algt;
260 struct crypto_spawn *spawn;
261 struct crypto_alg *alg;
262 u32 mask;
263 int err;
264
265 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
266 if (err)
267 return err;
268
269 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
270 if (!inst)
271 return -ENOMEM;
272
273 algt = crypto_get_attr_type(tb);
274 err = PTR_ERR(algt);
275 if (IS_ERR(algt))
276 goto err_free_inst;
277
278 mask = CRYPTO_ALG_TYPE_MASK |
279 crypto_requires_off(algt->type, algt->mask,
280 CRYPTO_ALG_NEED_FALLBACK);
281
282 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
283 err = PTR_ERR(alg);
284 if (IS_ERR(alg))
285 goto err_free_inst;
286
287 spawn = skcipher_instance_ctx(inst);
288 err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
289 CRYPTO_ALG_TYPE_MASK);
290 crypto_mod_put(alg);
291 if (err)
292 goto err_free_inst;
293
294 err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
295 if (err)
296 goto err_drop_spawn;
297
298 inst->alg.base.cra_priority = alg->cra_priority;
299 /* we're a stream cipher independend of the crypto cra_blocksize */
300 inst->alg.base.cra_blocksize = 1;
301 inst->alg.base.cra_alignmask = alg->cra_alignmask;
302
303 inst->alg.ivsize = alg->cra_blocksize;
304 inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
305 inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
306
307 inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx);
308
309 inst->alg.init = crypto_cfb_init_tfm;
310 inst->alg.exit = crypto_cfb_exit_tfm;
311
312 inst->alg.setkey = crypto_cfb_setkey;
313 inst->alg.encrypt = crypto_cfb_encrypt;
314 inst->alg.decrypt = crypto_cfb_decrypt;
315
316 inst->free = crypto_cfb_free;
317
318 err = skcipher_register_instance(tmpl, inst);
319 if (err)
320 goto err_drop_spawn;
321
322out:
323 return err;
324
325err_drop_spawn:
326 crypto_drop_spawn(spawn);
327err_free_inst:
328 kfree(inst);
329 goto out;
330}
331
332static struct crypto_template crypto_cfb_tmpl = {
333 .name = "cfb",
334 .create = crypto_cfb_create,
335 .module = THIS_MODULE,
336};
337
338static int __init crypto_cfb_module_init(void)
339{
340 return crypto_register_template(&crypto_cfb_tmpl);
341}
342
343static void __exit crypto_cfb_module_exit(void)
344{
345 crypto_unregister_template(&crypto_cfb_tmpl);
346}
347
348module_init(crypto_cfb_module_init);
349module_exit(crypto_cfb_module_exit);
350
351MODULE_LICENSE("GPL");
352MODULE_DESCRIPTION("CFB block cipher algorithm");
353MODULE_ALIAS_CRYPTO("cfb");
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 61e7c4e02fd2..992e8d8dcdd9 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -15,13 +15,50 @@
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <crypto/engine.h> 17#include <crypto/engine.h>
18#include <crypto/internal/hash.h>
19#include <uapi/linux/sched/types.h> 18#include <uapi/linux/sched/types.h>
20#include "internal.h" 19#include "internal.h"
21 20
22#define CRYPTO_ENGINE_MAX_QLEN 10 21#define CRYPTO_ENGINE_MAX_QLEN 10
23 22
24/** 23/**
24 * crypto_finalize_request - finalize one request if the request is done
25 * @engine: the hardware engine
26 * @req: the request need to be finalized
27 * @err: error number
28 */
29static void crypto_finalize_request(struct crypto_engine *engine,
30 struct crypto_async_request *req, int err)
31{
32 unsigned long flags;
33 bool finalize_cur_req = false;
34 int ret;
35 struct crypto_engine_ctx *enginectx;
36
37 spin_lock_irqsave(&engine->queue_lock, flags);
38 if (engine->cur_req == req)
39 finalize_cur_req = true;
40 spin_unlock_irqrestore(&engine->queue_lock, flags);
41
42 if (finalize_cur_req) {
43 enginectx = crypto_tfm_ctx(req->tfm);
44 if (engine->cur_req_prepared &&
45 enginectx->op.unprepare_request) {
46 ret = enginectx->op.unprepare_request(engine, req);
47 if (ret)
48 dev_err(engine->dev, "failed to unprepare request\n");
49 }
50 spin_lock_irqsave(&engine->queue_lock, flags);
51 engine->cur_req = NULL;
52 engine->cur_req_prepared = false;
53 spin_unlock_irqrestore(&engine->queue_lock, flags);
54 }
55
56 req->complete(req, err);
57
58 kthread_queue_work(engine->kworker, &engine->pump_requests);
59}
60
61/**
25 * crypto_pump_requests - dequeue one request from engine queue to process 62 * crypto_pump_requests - dequeue one request from engine queue to process
26 * @engine: the hardware engine 63 * @engine: the hardware engine
27 * @in_kthread: true if we are in the context of the request pump thread 64 * @in_kthread: true if we are in the context of the request pump thread
@@ -34,11 +71,10 @@ static void crypto_pump_requests(struct crypto_engine *engine,
34 bool in_kthread) 71 bool in_kthread)
35{ 72{
36 struct crypto_async_request *async_req, *backlog; 73 struct crypto_async_request *async_req, *backlog;
37 struct ahash_request *hreq;
38 struct ablkcipher_request *breq;
39 unsigned long flags; 74 unsigned long flags;
40 bool was_busy = false; 75 bool was_busy = false;
41 int ret, rtype; 76 int ret;
77 struct crypto_engine_ctx *enginectx;
42 78
43 spin_lock_irqsave(&engine->queue_lock, flags); 79 spin_lock_irqsave(&engine->queue_lock, flags);
44 80
@@ -94,7 +130,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
94 130
95 spin_unlock_irqrestore(&engine->queue_lock, flags); 131 spin_unlock_irqrestore(&engine->queue_lock, flags);
96 132
97 rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
98 /* Until here we get the request need to be encrypted successfully */ 133 /* Until here we get the request need to be encrypted successfully */
99 if (!was_busy && engine->prepare_crypt_hardware) { 134 if (!was_busy && engine->prepare_crypt_hardware) {
100 ret = engine->prepare_crypt_hardware(engine); 135 ret = engine->prepare_crypt_hardware(engine);
@@ -104,57 +139,31 @@ static void crypto_pump_requests(struct crypto_engine *engine,
104 } 139 }
105 } 140 }
106 141
107 switch (rtype) { 142 enginectx = crypto_tfm_ctx(async_req->tfm);
108 case CRYPTO_ALG_TYPE_AHASH: 143
109 hreq = ahash_request_cast(engine->cur_req); 144 if (enginectx->op.prepare_request) {
110 if (engine->prepare_hash_request) { 145 ret = enginectx->op.prepare_request(engine, async_req);
111 ret = engine->prepare_hash_request(engine, hreq);
112 if (ret) {
113 dev_err(engine->dev, "failed to prepare request: %d\n",
114 ret);
115 goto req_err;
116 }
117 engine->cur_req_prepared = true;
118 }
119 ret = engine->hash_one_request(engine, hreq);
120 if (ret) {
121 dev_err(engine->dev, "failed to hash one request from queue\n");
122 goto req_err;
123 }
124 return;
125 case CRYPTO_ALG_TYPE_ABLKCIPHER:
126 breq = ablkcipher_request_cast(engine->cur_req);
127 if (engine->prepare_cipher_request) {
128 ret = engine->prepare_cipher_request(engine, breq);
129 if (ret) {
130 dev_err(engine->dev, "failed to prepare request: %d\n",
131 ret);
132 goto req_err;
133 }
134 engine->cur_req_prepared = true;
135 }
136 ret = engine->cipher_one_request(engine, breq);
137 if (ret) { 146 if (ret) {
138 dev_err(engine->dev, "failed to cipher one request from queue\n"); 147 dev_err(engine->dev, "failed to prepare request: %d\n",
148 ret);
139 goto req_err; 149 goto req_err;
140 } 150 }
141 return; 151 engine->cur_req_prepared = true;
142 default: 152 }
143 dev_err(engine->dev, "failed to prepare request of unknown type\n"); 153 if (!enginectx->op.do_one_request) {
144 return; 154 dev_err(engine->dev, "failed to do request\n");
155 ret = -EINVAL;
156 goto req_err;
145 } 157 }
158 ret = enginectx->op.do_one_request(engine, async_req);
159 if (ret) {
160 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
161 goto req_err;
162 }
163 return;
146 164
147req_err: 165req_err:
148 switch (rtype) { 166 crypto_finalize_request(engine, async_req, ret);
149 case CRYPTO_ALG_TYPE_AHASH:
150 hreq = ahash_request_cast(engine->cur_req);
151 crypto_finalize_hash_request(engine, hreq, ret);
152 break;
153 case CRYPTO_ALG_TYPE_ABLKCIPHER:
154 breq = ablkcipher_request_cast(engine->cur_req);
155 crypto_finalize_cipher_request(engine, breq, ret);
156 break;
157 }
158 return; 167 return;
159 168
160out: 169out:
@@ -170,13 +179,12 @@ static void crypto_pump_work(struct kthread_work *work)
170} 179}
171 180
172/** 181/**
173 * crypto_transfer_cipher_request - transfer the new request into the 182 * crypto_transfer_request - transfer the new request into the engine queue
174 * enginequeue
175 * @engine: the hardware engine 183 * @engine: the hardware engine
176 * @req: the request need to be listed into the engine queue 184 * @req: the request need to be listed into the engine queue
177 */ 185 */
178int crypto_transfer_cipher_request(struct crypto_engine *engine, 186static int crypto_transfer_request(struct crypto_engine *engine,
179 struct ablkcipher_request *req, 187 struct crypto_async_request *req,
180 bool need_pump) 188 bool need_pump)
181{ 189{
182 unsigned long flags; 190 unsigned long flags;
@@ -189,7 +197,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
189 return -ESHUTDOWN; 197 return -ESHUTDOWN;
190 } 198 }
191 199
192 ret = ablkcipher_enqueue_request(&engine->queue, req); 200 ret = crypto_enqueue_request(&engine->queue, req);
193 201
194 if (!engine->busy && need_pump) 202 if (!engine->busy && need_pump)
195 kthread_queue_work(engine->kworker, &engine->pump_requests); 203 kthread_queue_work(engine->kworker, &engine->pump_requests);
@@ -197,102 +205,131 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
197 spin_unlock_irqrestore(&engine->queue_lock, flags); 205 spin_unlock_irqrestore(&engine->queue_lock, flags);
198 return ret; 206 return ret;
199} 207}
200EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
201 208
202/** 209/**
203 * crypto_transfer_cipher_request_to_engine - transfer one request to list 210 * crypto_transfer_request_to_engine - transfer one request to list
204 * into the engine queue 211 * into the engine queue
205 * @engine: the hardware engine 212 * @engine: the hardware engine
206 * @req: the request need to be listed into the engine queue 213 * @req: the request need to be listed into the engine queue
207 */ 214 */
208int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, 215static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
209 struct ablkcipher_request *req) 216 struct crypto_async_request *req)
210{ 217{
211 return crypto_transfer_cipher_request(engine, req, true); 218 return crypto_transfer_request(engine, req, true);
212} 219}
213EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
214 220
215/** 221/**
216 * crypto_transfer_hash_request - transfer the new request into the 222 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
217 * enginequeue 223 * to list into the engine queue
218 * @engine: the hardware engine 224 * @engine: the hardware engine
219 * @req: the request need to be listed into the engine queue 225 * @req: the request need to be listed into the engine queue
226 * TODO: Remove this function when skcipher conversion is finished
220 */ 227 */
221int crypto_transfer_hash_request(struct crypto_engine *engine, 228int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
222 struct ahash_request *req, bool need_pump) 229 struct ablkcipher_request *req)
223{ 230{
224 unsigned long flags; 231 return crypto_transfer_request_to_engine(engine, &req->base);
225 int ret; 232}
226 233EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
227 spin_lock_irqsave(&engine->queue_lock, flags);
228
229 if (!engine->running) {
230 spin_unlock_irqrestore(&engine->queue_lock, flags);
231 return -ESHUTDOWN;
232 }
233
234 ret = ahash_enqueue_request(&engine->queue, req);
235 234
236 if (!engine->busy && need_pump) 235/**
237 kthread_queue_work(engine->kworker, &engine->pump_requests); 236 * crypto_transfer_aead_request_to_engine - transfer one aead_request
237 * to list into the engine queue
238 * @engine: the hardware engine
239 * @req: the request need to be listed into the engine queue
240 */
241int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
242 struct aead_request *req)
243{
244 return crypto_transfer_request_to_engine(engine, &req->base);
245}
246EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
238 247
239 spin_unlock_irqrestore(&engine->queue_lock, flags); 248/**
240 return ret; 249 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
250 * to list into the engine queue
251 * @engine: the hardware engine
252 * @req: the request need to be listed into the engine queue
253 */
254int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
255 struct akcipher_request *req)
256{
257 return crypto_transfer_request_to_engine(engine, &req->base);
241} 258}
242EXPORT_SYMBOL_GPL(crypto_transfer_hash_request); 259EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
243 260
244/** 261/**
245 * crypto_transfer_hash_request_to_engine - transfer one request to list 262 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
246 * into the engine queue 263 * to list into the engine queue
247 * @engine: the hardware engine 264 * @engine: the hardware engine
248 * @req: the request need to be listed into the engine queue 265 * @req: the request need to be listed into the engine queue
249 */ 266 */
250int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, 267int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
251 struct ahash_request *req) 268 struct ahash_request *req)
252{ 269{
253 return crypto_transfer_hash_request(engine, req, true); 270 return crypto_transfer_request_to_engine(engine, &req->base);
254} 271}
255EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); 272EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
256 273
257/** 274/**
258 * crypto_finalize_cipher_request - finalize one request if the request is done 275 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
276 * to list into the engine queue
277 * @engine: the hardware engine
278 * @req: the request need to be listed into the engine queue
279 */
280int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
281 struct skcipher_request *req)
282{
283 return crypto_transfer_request_to_engine(engine, &req->base);
284}
285EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
286
287/**
288 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
289 * the request is done
259 * @engine: the hardware engine 290 * @engine: the hardware engine
260 * @req: the request need to be finalized 291 * @req: the request need to be finalized
261 * @err: error number 292 * @err: error number
293 * TODO: Remove this function when skcipher conversion is finished
262 */ 294 */
263void crypto_finalize_cipher_request(struct crypto_engine *engine, 295void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
264 struct ablkcipher_request *req, int err) 296 struct ablkcipher_request *req, int err)
265{ 297{
266 unsigned long flags; 298 return crypto_finalize_request(engine, &req->base, err);
267 bool finalize_cur_req = false; 299}
268 int ret; 300EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
269
270 spin_lock_irqsave(&engine->queue_lock, flags);
271 if (engine->cur_req == &req->base)
272 finalize_cur_req = true;
273 spin_unlock_irqrestore(&engine->queue_lock, flags);
274
275 if (finalize_cur_req) {
276 if (engine->cur_req_prepared &&
277 engine->unprepare_cipher_request) {
278 ret = engine->unprepare_cipher_request(engine, req);
279 if (ret)
280 dev_err(engine->dev, "failed to unprepare request\n");
281 }
282 spin_lock_irqsave(&engine->queue_lock, flags);
283 engine->cur_req = NULL;
284 engine->cur_req_prepared = false;
285 spin_unlock_irqrestore(&engine->queue_lock, flags);
286 }
287 301
288 req->base.complete(&req->base, err); 302/**
303 * crypto_finalize_aead_request - finalize one aead_request if
304 * the request is done
305 * @engine: the hardware engine
306 * @req: the request need to be finalized
307 * @err: error number
308 */
309void crypto_finalize_aead_request(struct crypto_engine *engine,
310 struct aead_request *req, int err)
311{
312 return crypto_finalize_request(engine, &req->base, err);
313}
314EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
289 315
290 kthread_queue_work(engine->kworker, &engine->pump_requests); 316/**
317 * crypto_finalize_akcipher_request - finalize one akcipher_request if
318 * the request is done
319 * @engine: the hardware engine
320 * @req: the request need to be finalized
321 * @err: error number
322 */
323void crypto_finalize_akcipher_request(struct crypto_engine *engine,
324 struct akcipher_request *req, int err)
325{
326 return crypto_finalize_request(engine, &req->base, err);
291} 327}
292EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); 328EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
293 329
294/** 330/**
295 * crypto_finalize_hash_request - finalize one request if the request is done 331 * crypto_finalize_hash_request - finalize one ahash_request if
332 * the request is done
296 * @engine: the hardware engine 333 * @engine: the hardware engine
297 * @req: the request need to be finalized 334 * @req: the request need to be finalized
298 * @err: error number 335 * @err: error number
@@ -300,35 +337,25 @@ EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
300void crypto_finalize_hash_request(struct crypto_engine *engine, 337void crypto_finalize_hash_request(struct crypto_engine *engine,
301 struct ahash_request *req, int err) 338 struct ahash_request *req, int err)
302{ 339{
303 unsigned long flags; 340 return crypto_finalize_request(engine, &req->base, err);
304 bool finalize_cur_req = false;
305 int ret;
306
307 spin_lock_irqsave(&engine->queue_lock, flags);
308 if (engine->cur_req == &req->base)
309 finalize_cur_req = true;
310 spin_unlock_irqrestore(&engine->queue_lock, flags);
311
312 if (finalize_cur_req) {
313 if (engine->cur_req_prepared &&
314 engine->unprepare_hash_request) {
315 ret = engine->unprepare_hash_request(engine, req);
316 if (ret)
317 dev_err(engine->dev, "failed to unprepare request\n");
318 }
319 spin_lock_irqsave(&engine->queue_lock, flags);
320 engine->cur_req = NULL;
321 engine->cur_req_prepared = false;
322 spin_unlock_irqrestore(&engine->queue_lock, flags);
323 }
324
325 req->base.complete(&req->base, err);
326
327 kthread_queue_work(engine->kworker, &engine->pump_requests);
328} 341}
329EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); 342EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
330 343
331/** 344/**
345 * crypto_finalize_skcipher_request - finalize one skcipher_request if
346 * the request is done
347 * @engine: the hardware engine
348 * @req: the request need to be finalized
349 * @err: error number
350 */
351void crypto_finalize_skcipher_request(struct crypto_engine *engine,
352 struct skcipher_request *req, int err)
353{
354 return crypto_finalize_request(engine, &req->base, err);
355}
356EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
357
358/**
332 * crypto_engine_start - start the hardware engine 359 * crypto_engine_start - start the hardware engine
333 * @engine: the hardware engine need to be started 360 * @engine: the hardware engine need to be started
334 * 361 *
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 5c291eedaa70..0e89b5457cab 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -271,7 +271,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
271 return -ENOENT; 271 return -ENOENT;
272 272
273 err = -ENOMEM; 273 err = -ENOMEM;
274 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 274 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
275 if (!skb) 275 if (!skb)
276 goto drop_alg; 276 goto drop_alg;
277 277
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 18f32f2a5e1c..9c066b5ac12d 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1025,9 +1025,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
1025{ 1025{
1026 int ret = 0; 1026 int ret = 0;
1027 struct ecc_point *product, *pk; 1027 struct ecc_point *product, *pk;
1028 u64 priv[ndigits]; 1028 u64 *priv, *rand_z;
1029 u64 rand_z[ndigits];
1030 unsigned int nbytes;
1031 const struct ecc_curve *curve = ecc_get_curve(curve_id); 1029 const struct ecc_curve *curve = ecc_get_curve(curve_id);
1032 1030
1033 if (!private_key || !public_key || !curve) { 1031 if (!private_key || !public_key || !curve) {
@@ -1035,14 +1033,22 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
1035 goto out; 1033 goto out;
1036 } 1034 }
1037 1035
1038 nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; 1036 priv = kmalloc_array(ndigits, sizeof(*priv), GFP_KERNEL);
1037 if (!priv) {
1038 ret = -ENOMEM;
1039 goto out;
1040 }
1039 1041
1040 get_random_bytes(rand_z, nbytes); 1042 rand_z = kmalloc_array(ndigits, sizeof(*rand_z), GFP_KERNEL);
1043 if (!rand_z) {
1044 ret = -ENOMEM;
1045 goto kfree_out;
1046 }
1041 1047
1042 pk = ecc_alloc_point(ndigits); 1048 pk = ecc_alloc_point(ndigits);
1043 if (!pk) { 1049 if (!pk) {
1044 ret = -ENOMEM; 1050 ret = -ENOMEM;
1045 goto out; 1051 goto kfree_out;
1046 } 1052 }
1047 1053
1048 product = ecc_alloc_point(ndigits); 1054 product = ecc_alloc_point(ndigits);
@@ -1051,6 +1057,8 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
1051 goto err_alloc_product; 1057 goto err_alloc_product;
1052 } 1058 }
1053 1059
1060 get_random_bytes(rand_z, ndigits << ECC_DIGITS_TO_BYTES_SHIFT);
1061
1054 ecc_swap_digits(public_key, pk->x, ndigits); 1062 ecc_swap_digits(public_key, pk->x, ndigits);
1055 ecc_swap_digits(&public_key[ndigits], pk->y, ndigits); 1063 ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
1056 ecc_swap_digits(private_key, priv, ndigits); 1064 ecc_swap_digits(private_key, priv, ndigits);
@@ -1065,6 +1073,9 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
1065 ecc_free_point(product); 1073 ecc_free_point(product);
1066err_alloc_product: 1074err_alloc_product:
1067 ecc_free_point(pk); 1075 ecc_free_point(pk);
1076kfree_out:
1077 kzfree(priv);
1078 kzfree(rand_z);
1068out: 1079out:
1069 return ret; 1080 return ret;
1070} 1081}
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 3aca0933ec44..d2ec33f0e098 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -89,12 +89,19 @@ static int ecdh_compute_value(struct kpp_request *req)
89 if (!shared_secret) 89 if (!shared_secret)
90 goto free_pubkey; 90 goto free_pubkey;
91 91
92 copied = sg_copy_to_buffer(req->src, 1, public_key, 92 /* from here on it's invalid parameters */
93 public_key_sz); 93 ret = -EINVAL;
94 if (copied != public_key_sz) { 94
95 ret = -EINVAL; 95 /* must have exactly two points to be on the curve */
96 if (public_key_sz != req->src_len)
97 goto free_all;
98
99 copied = sg_copy_to_buffer(req->src,
100 sg_nents_for_len(req->src,
101 public_key_sz),
102 public_key, public_key_sz);
103 if (copied != public_key_sz)
96 goto free_all; 104 goto free_all;
97 }
98 105
99 ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits, 106 ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
100 ctx->private_key, public_key, 107 ctx->private_key, public_key,
@@ -111,7 +118,11 @@ static int ecdh_compute_value(struct kpp_request *req)
111 if (ret < 0) 118 if (ret < 0)
112 goto free_all; 119 goto free_all;
113 120
114 copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes); 121 /* might want less than we've got */
122 nbytes = min_t(size_t, nbytes, req->dst_len);
123 copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
124 nbytes),
125 buf, nbytes);
115 if (copied != nbytes) 126 if (copied != nbytes)
116 ret = -EINVAL; 127 ret = -EINVAL;
117 128
diff --git a/crypto/internal.h b/crypto/internal.h
index 5ac27fba10e8..9a3f39939fba 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -67,7 +67,6 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg)
67} 67}
68 68
69struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); 69struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
70struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
71struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); 70struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
72 71
73int crypto_init_cipher_ops(struct crypto_tfm *tfm); 72int crypto_init_cipher_ops(struct crypto_tfm *tfm);
diff --git a/crypto/lrw.c b/crypto/lrw.c
index cbbd7c50ad19..954a7064a179 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -28,13 +28,31 @@
28 28
29#include <crypto/b128ops.h> 29#include <crypto/b128ops.h>
30#include <crypto/gf128mul.h> 30#include <crypto/gf128mul.h>
31#include <crypto/lrw.h>
32 31
33#define LRW_BUFFER_SIZE 128u 32#define LRW_BUFFER_SIZE 128u
34 33
34#define LRW_BLOCK_SIZE 16
35
35struct priv { 36struct priv {
36 struct crypto_skcipher *child; 37 struct crypto_skcipher *child;
37 struct lrw_table_ctx table; 38
39 /*
40 * optimizes multiplying a random (non incrementing, as at the
41 * start of a new sector) value with key2, we could also have
42 * used 4k optimization tables or no optimization at all. In the
43 * latter case we would have to store key2 here
44 */
45 struct gf128mul_64k *table;
46
47 /*
48 * stores:
49 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
50 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
51 * key2*{ 0,0,...1,1,1,1,1 }, etc
52 * needed for optimized multiplication of incrementing values
53 * with key2
54 */
55 be128 mulinc[128];
38}; 56};
39 57
40struct rctx { 58struct rctx {
@@ -65,11 +83,25 @@ static inline void setbit128_bbe(void *b, int bit)
65 ), b); 83 ), b);
66} 84}
67 85
68int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) 86static int setkey(struct crypto_skcipher *parent, const u8 *key,
87 unsigned int keylen)
69{ 88{
89 struct priv *ctx = crypto_skcipher_ctx(parent);
90 struct crypto_skcipher *child = ctx->child;
91 int err, bsize = LRW_BLOCK_SIZE;
92 const u8 *tweak = key + keylen - bsize;
70 be128 tmp = { 0 }; 93 be128 tmp = { 0 };
71 int i; 94 int i;
72 95
96 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
97 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
98 CRYPTO_TFM_REQ_MASK);
99 err = crypto_skcipher_setkey(child, key, keylen - bsize);
100 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
101 CRYPTO_TFM_RES_MASK);
102 if (err)
103 return err;
104
73 if (ctx->table) 105 if (ctx->table)
74 gf128mul_free_64k(ctx->table); 106 gf128mul_free_64k(ctx->table);
75 107
@@ -87,34 +119,6 @@ int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
87 119
88 return 0; 120 return 0;
89} 121}
90EXPORT_SYMBOL_GPL(lrw_init_table);
91
92void lrw_free_table(struct lrw_table_ctx *ctx)
93{
94 if (ctx->table)
95 gf128mul_free_64k(ctx->table);
96}
97EXPORT_SYMBOL_GPL(lrw_free_table);
98
99static int setkey(struct crypto_skcipher *parent, const u8 *key,
100 unsigned int keylen)
101{
102 struct priv *ctx = crypto_skcipher_ctx(parent);
103 struct crypto_skcipher *child = ctx->child;
104 int err, bsize = LRW_BLOCK_SIZE;
105 const u8 *tweak = key + keylen - bsize;
106
107 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
108 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
109 CRYPTO_TFM_REQ_MASK);
110 err = crypto_skcipher_setkey(child, key, keylen - bsize);
111 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
112 CRYPTO_TFM_RES_MASK);
113 if (err)
114 return err;
115
116 return lrw_init_table(&ctx->table, tweak);
117}
118 122
119static inline void inc(be128 *iv) 123static inline void inc(be128 *iv)
120{ 124{
@@ -238,7 +242,7 @@ static int pre_crypt(struct skcipher_request *req)
238 /* T <- I*Key2, using the optimization 242 /* T <- I*Key2, using the optimization
239 * discussed in the specification */ 243 * discussed in the specification */
240 be128_xor(&rctx->t, &rctx->t, 244 be128_xor(&rctx->t, &rctx->t,
241 &ctx->table.mulinc[get_index128(iv)]); 245 &ctx->mulinc[get_index128(iv)]);
242 inc(iv); 246 inc(iv);
243 } while ((avail -= bs) >= bs); 247 } while ((avail -= bs) >= bs);
244 248
@@ -301,7 +305,7 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
301 memcpy(&rctx->t, req->iv, sizeof(rctx->t)); 305 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
302 306
303 /* T <- I*Key2 */ 307 /* T <- I*Key2 */
304 gf128mul_64k_bbe(&rctx->t, ctx->table.table); 308 gf128mul_64k_bbe(&rctx->t, ctx->table);
305 309
306 return 0; 310 return 0;
307} 311}
@@ -313,7 +317,7 @@ static void exit_crypt(struct skcipher_request *req)
313 rctx->left = 0; 317 rctx->left = 0;
314 318
315 if (rctx->ext) 319 if (rctx->ext)
316 kfree(rctx->ext); 320 kzfree(rctx->ext);
317} 321}
318 322
319static int do_encrypt(struct skcipher_request *req, int err) 323static int do_encrypt(struct skcipher_request *req, int err)
@@ -416,85 +420,6 @@ static int decrypt(struct skcipher_request *req)
416 return do_decrypt(req, init_crypt(req, decrypt_done)); 420 return do_decrypt(req, init_crypt(req, decrypt_done));
417} 421}
418 422
419int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
420 struct scatterlist *ssrc, unsigned int nbytes,
421 struct lrw_crypt_req *req)
422{
423 const unsigned int bsize = LRW_BLOCK_SIZE;
424 const unsigned int max_blks = req->tbuflen / bsize;
425 struct lrw_table_ctx *ctx = req->table_ctx;
426 struct blkcipher_walk walk;
427 unsigned int nblocks;
428 be128 *iv, *src, *dst, *t;
429 be128 *t_buf = req->tbuf;
430 int err, i;
431
432 BUG_ON(max_blks < 1);
433
434 blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
435
436 err = blkcipher_walk_virt(desc, &walk);
437 nbytes = walk.nbytes;
438 if (!nbytes)
439 return err;
440
441 nblocks = min(walk.nbytes / bsize, max_blks);
442 src = (be128 *)walk.src.virt.addr;
443 dst = (be128 *)walk.dst.virt.addr;
444
445 /* calculate first value of T */
446 iv = (be128 *)walk.iv;
447 t_buf[0] = *iv;
448
449 /* T <- I*Key2 */
450 gf128mul_64k_bbe(&t_buf[0], ctx->table);
451
452 i = 0;
453 goto first;
454
455 for (;;) {
456 do {
457 for (i = 0; i < nblocks; i++) {
458 /* T <- I*Key2, using the optimization
459 * discussed in the specification */
460 be128_xor(&t_buf[i], t,
461 &ctx->mulinc[get_index128(iv)]);
462 inc(iv);
463first:
464 t = &t_buf[i];
465
466 /* PP <- T xor P */
467 be128_xor(dst + i, t, src + i);
468 }
469
470 /* CC <- E(Key2,PP) */
471 req->crypt_fn(req->crypt_ctx, (u8 *)dst,
472 nblocks * bsize);
473
474 /* C <- T xor CC */
475 for (i = 0; i < nblocks; i++)
476 be128_xor(dst + i, dst + i, &t_buf[i]);
477
478 src += nblocks;
479 dst += nblocks;
480 nbytes -= nblocks * bsize;
481 nblocks = min(nbytes / bsize, max_blks);
482 } while (nblocks > 0);
483
484 err = blkcipher_walk_done(desc, &walk, nbytes);
485 nbytes = walk.nbytes;
486 if (!nbytes)
487 break;
488
489 nblocks = min(nbytes / bsize, max_blks);
490 src = (be128 *)walk.src.virt.addr;
491 dst = (be128 *)walk.dst.virt.addr;
492 }
493
494 return err;
495}
496EXPORT_SYMBOL_GPL(lrw_crypt);
497
498static int init_tfm(struct crypto_skcipher *tfm) 423static int init_tfm(struct crypto_skcipher *tfm)
499{ 424{
500 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 425 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
@@ -518,7 +443,8 @@ static void exit_tfm(struct crypto_skcipher *tfm)
518{ 443{
519 struct priv *ctx = crypto_skcipher_ctx(tfm); 444 struct priv *ctx = crypto_skcipher_ctx(tfm);
520 445
521 lrw_free_table(&ctx->table); 446 if (ctx->table)
447 gf128mul_free_64k(ctx->table);
522 crypto_free_skcipher(ctx->child); 448 crypto_free_skcipher(ctx->child);
523} 449}
524 450
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index fe5129d6ff4e..f14152147ce8 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -367,7 +367,7 @@ static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
367 goto out; 367 goto out;
368 368
369 rctx->out = req->result; 369 rctx->out = req->result;
370 err = ahash_mcryptd_update(&rctx->areq); 370 err = crypto_ahash_update(&rctx->areq);
371 if (err) { 371 if (err) {
372 req->base.complete = rctx->complete; 372 req->base.complete = rctx->complete;
373 goto out; 373 goto out;
@@ -394,7 +394,7 @@ static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
394 goto out; 394 goto out;
395 395
396 rctx->out = req->result; 396 rctx->out = req->result;
397 err = ahash_mcryptd_final(&rctx->areq); 397 err = crypto_ahash_final(&rctx->areq);
398 if (err) { 398 if (err) {
399 req->base.complete = rctx->complete; 399 req->base.complete = rctx->complete;
400 goto out; 400 goto out;
@@ -420,7 +420,7 @@ static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
420 if (unlikely(err == -EINPROGRESS)) 420 if (unlikely(err == -EINPROGRESS))
421 goto out; 421 goto out;
422 rctx->out = req->result; 422 rctx->out = req->result;
423 err = ahash_mcryptd_finup(&rctx->areq); 423 err = crypto_ahash_finup(&rctx->areq);
424 424
425 if (err) { 425 if (err) {
426 req->base.complete = rctx->complete; 426 req->base.complete = rctx->complete;
@@ -455,7 +455,7 @@ static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
455 rctx->complete, req_async); 455 rctx->complete, req_async);
456 456
457 rctx->out = req->result; 457 rctx->out = req->result;
458 err = ahash_mcryptd_digest(desc); 458 err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
459 459
460out: 460out:
461 local_bh_disable(); 461 local_bh_disable();
@@ -612,32 +612,6 @@ struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
612} 612}
613EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); 613EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
614 614
615int ahash_mcryptd_digest(struct ahash_request *desc)
616{
617 return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
618}
619
620int ahash_mcryptd_update(struct ahash_request *desc)
621{
622 /* alignment is to be done by multi-buffer crypto algorithm if needed */
623
624 return crypto_ahash_update(desc);
625}
626
627int ahash_mcryptd_finup(struct ahash_request *desc)
628{
629 /* alignment is to be done by multi-buffer crypto algorithm if needed */
630
631 return crypto_ahash_finup(desc);
632}
633
634int ahash_mcryptd_final(struct ahash_request *desc)
635{
636 /* alignment is to be done by multi-buffer crypto algorithm if needed */
637
638 return crypto_ahash_final(desc);
639}
640
641struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) 615struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
642{ 616{
643 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 617 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
diff --git a/crypto/md4.c b/crypto/md4.c
index 3515af425cc9..810fefb0a007 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -64,23 +64,6 @@ static inline u32 H(u32 x, u32 y, u32 z)
64#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) 64#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
65#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) 65#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
66 66
67/* XXX: this stuff can be optimized */
68static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
69{
70 while (words--) {
71 __le32_to_cpus(buf);
72 buf++;
73 }
74}
75
76static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
77{
78 while (words--) {
79 __cpu_to_le32s(buf);
80 buf++;
81 }
82}
83
84static void md4_transform(u32 *hash, u32 const *in) 67static void md4_transform(u32 *hash, u32 const *in)
85{ 68{
86 u32 a, b, c, d; 69 u32 a, b, c, d;
diff --git a/crypto/md5.c b/crypto/md5.c
index f7ae1a48225b..f776ef43d621 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -32,23 +32,6 @@ const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
32}; 32};
33EXPORT_SYMBOL_GPL(md5_zero_message_hash); 33EXPORT_SYMBOL_GPL(md5_zero_message_hash);
34 34
35/* XXX: this stuff can be optimized */
36static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
37{
38 while (words--) {
39 __le32_to_cpus(buf);
40 buf++;
41 }
42}
43
44static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
45{
46 while (words--) {
47 __cpu_to_le32s(buf);
48 buf++;
49 }
50}
51
52#define F1(x, y, z) (z ^ (x & (y ^ z))) 35#define F1(x, y, z) (z ^ (x & (y ^ z)))
53#define F2(x, y, z) F1(z, x, y) 36#define F2(x, y, z) F1(z, x, y)
54#define F3(x, y, z) (x ^ y ^ z) 37#define F3(x, y, z) (x ^ y ^ z)
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 2908f93c3e55..9893dbfc1af4 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -192,7 +192,7 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
192 if (likely(!pad_len)) 192 if (likely(!pad_len))
193 goto out; 193 goto out;
194 194
195 out_buf = kzalloc(ctx->key_size, GFP_ATOMIC); 195 out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
196 err = -ENOMEM; 196 err = -ENOMEM;
197 if (!out_buf) 197 if (!out_buf)
198 goto out; 198 goto out;
diff --git a/crypto/simd.c b/crypto/simd.c
index 208226d7f908..ea7240be3001 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -221,4 +221,54 @@ void simd_skcipher_free(struct simd_skcipher_alg *salg)
221} 221}
222EXPORT_SYMBOL_GPL(simd_skcipher_free); 222EXPORT_SYMBOL_GPL(simd_skcipher_free);
223 223
224int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
225 struct simd_skcipher_alg **simd_algs)
226{
227 int err;
228 int i;
229 const char *algname;
230 const char *drvname;
231 const char *basename;
232 struct simd_skcipher_alg *simd;
233
234 err = crypto_register_skciphers(algs, count);
235 if (err)
236 return err;
237
238 for (i = 0; i < count; i++) {
239 WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
240 WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
241 algname = algs[i].base.cra_name + 2;
242 drvname = algs[i].base.cra_driver_name + 2;
243 basename = algs[i].base.cra_driver_name;
244 simd = simd_skcipher_create_compat(algname, drvname, basename);
245 err = PTR_ERR(simd);
246 if (IS_ERR(simd))
247 goto err_unregister;
248 simd_algs[i] = simd;
249 }
250 return 0;
251
252err_unregister:
253 simd_unregister_skciphers(algs, count, simd_algs);
254 return err;
255}
256EXPORT_SYMBOL_GPL(simd_register_skciphers_compat);
257
258void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
259 struct simd_skcipher_alg **simd_algs)
260{
261 int i;
262
263 crypto_unregister_skciphers(algs, count);
264
265 for (i = 0; i < count; i++) {
266 if (simd_algs[i]) {
267 simd_skcipher_free(simd_algs[i]);
268 simd_algs[i] = NULL;
269 }
270 }
271}
272EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
273
224MODULE_LICENSE("GPL"); 274MODULE_LICENSE("GPL");
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
new file mode 100644
index 000000000000..f537a2766c55
--- /dev/null
+++ b/crypto/sm4_generic.c
@@ -0,0 +1,244 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * SM4 Cipher Algorithm.
5 *
6 * Copyright (C) 2018 ARM Limited or its affiliates.
7 * All rights reserved.
8 */
9
10#include <crypto/sm4.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/types.h>
14#include <linux/errno.h>
15#include <linux/crypto.h>
16#include <asm/byteorder.h>
17#include <asm/unaligned.h>
18
19static const u32 fk[4] = {
20 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
21};
22
23static const u8 sbox[256] = {
24 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
25 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
26 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
27 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
28 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
29 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
30 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
31 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
32 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
33 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
34 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
35 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
36 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
37 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
38 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
39 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
40 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
41 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
42 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
43 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
44 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
45 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
46 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
47 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
48 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
49 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
50 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
51 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
52 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
53 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
54 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
55 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
56};
57
58static const u32 ck[] = {
59 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
60 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
61 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
62 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
63 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
64 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
65 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
66 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
67};
68
69static u32 sm4_t_non_lin_sub(u32 x)
70{
71 int i;
72 u8 *b = (u8 *)&x;
73
74 for (i = 0; i < 4; ++i)
75 b[i] = sbox[b[i]];
76
77 return x;
78}
79
80static u32 sm4_key_lin_sub(u32 x)
81{
82 return x ^ rol32(x, 13) ^ rol32(x, 23);
83
84}
85
86static u32 sm4_enc_lin_sub(u32 x)
87{
88 return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
89}
90
91static u32 sm4_key_sub(u32 x)
92{
93 return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
94}
95
96static u32 sm4_enc_sub(u32 x)
97{
98 return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
99}
100
101static u32 sm4_round(const u32 *x, const u32 rk)
102{
103 return x[0] ^ sm4_enc_sub(x[1] ^ x[2] ^ x[3] ^ rk);
104}
105
106
107/**
108 * crypto_sm4_expand_key - Expands the SM4 key as described in GB/T 32907-2016
109 * @ctx: The location where the computed key will be stored.
110 * @in_key: The supplied key.
111 * @key_len: The length of the supplied key.
112 *
113 * Returns 0 on success. The function fails only if an invalid key size (or
114 * pointer) is supplied.
115 */
116int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
117 unsigned int key_len)
118{
119 u32 rk[4], t;
120 const u32 *key = (u32 *)in_key;
121 int i;
122
123 if (key_len != SM4_KEY_SIZE)
124 return -EINVAL;
125
126 for (i = 0; i < 4; ++i)
127 rk[i] = get_unaligned_be32(&key[i]) ^ fk[i];
128
129 for (i = 0; i < 32; ++i) {
130 t = rk[0] ^ sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i]);
131 ctx->rkey_enc[i] = t;
132 rk[0] = rk[1];
133 rk[1] = rk[2];
134 rk[2] = rk[3];
135 rk[3] = t;
136 }
137
138 for (i = 0; i < 32; ++i)
139 ctx->rkey_dec[i] = ctx->rkey_enc[31 - i];
140
141 return 0;
142}
143EXPORT_SYMBOL_GPL(crypto_sm4_expand_key);
144
145/**
146 * crypto_sm4_set_key - Set the AES key.
147 * @tfm: The %crypto_tfm that is used in the context.
148 * @in_key: The input key.
149 * @key_len: The size of the key.
150 *
151 * Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
152 * is set. The function uses crypto_sm4_expand_key() to expand the key.
153 * &crypto_sm4_ctx _must_ be the private data embedded in @tfm which is
154 * retrieved with crypto_tfm_ctx().
155 */
156int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
157 unsigned int key_len)
158{
159 struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
160 u32 *flags = &tfm->crt_flags;
161 int ret;
162
163 ret = crypto_sm4_expand_key(ctx, in_key, key_len);
164 if (!ret)
165 return 0;
166
167 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
168 return -EINVAL;
169}
170EXPORT_SYMBOL_GPL(crypto_sm4_set_key);
171
172static void sm4_do_crypt(const u32 *rk, u32 *out, const u32 *in)
173{
174 u32 x[4], i, t;
175
176 for (i = 0; i < 4; ++i)
177 x[i] = get_unaligned_be32(&in[i]);
178
179 for (i = 0; i < 32; ++i) {
180 t = sm4_round(x, rk[i]);
181 x[0] = x[1];
182 x[1] = x[2];
183 x[2] = x[3];
184 x[3] = t;
185 }
186
187 for (i = 0; i < 4; ++i)
188 put_unaligned_be32(x[3 - i], &out[i]);
189}
190
191/* encrypt a block of text */
192
193static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
194{
195 const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
196
197 sm4_do_crypt(ctx->rkey_enc, (u32 *)out, (u32 *)in);
198}
199
200/* decrypt a block of text */
201
202static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
203{
204 const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
205
206 sm4_do_crypt(ctx->rkey_dec, (u32 *)out, (u32 *)in);
207}
208
209static struct crypto_alg sm4_alg = {
210 .cra_name = "sm4",
211 .cra_driver_name = "sm4-generic",
212 .cra_priority = 100,
213 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
214 .cra_blocksize = SM4_BLOCK_SIZE,
215 .cra_ctxsize = sizeof(struct crypto_sm4_ctx),
216 .cra_module = THIS_MODULE,
217 .cra_u = {
218 .cipher = {
219 .cia_min_keysize = SM4_KEY_SIZE,
220 .cia_max_keysize = SM4_KEY_SIZE,
221 .cia_setkey = crypto_sm4_set_key,
222 .cia_encrypt = sm4_encrypt,
223 .cia_decrypt = sm4_decrypt
224 }
225 }
226};
227
228static int __init sm4_init(void)
229{
230 return crypto_register_alg(&sm4_alg);
231}
232
233static void __exit sm4_fini(void)
234{
235 crypto_unregister_alg(&sm4_alg);
236}
237
238module_init(sm4_init);
239module_exit(sm4_fini);
240
241MODULE_DESCRIPTION("SM4 Cipher Algorithm");
242MODULE_LICENSE("GPL v2");
243MODULE_ALIAS_CRYPTO("sm4");
244MODULE_ALIAS_CRYPTO("sm4-generic");
diff --git a/crypto/speck.c b/crypto/speck.c
new file mode 100644
index 000000000000..58aa9f7f91f7
--- /dev/null
+++ b/crypto/speck.c
@@ -0,0 +1,307 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Speck: a lightweight block cipher
4 *
5 * Copyright (c) 2018 Google, Inc
6 *
7 * Speck has 10 variants, including 5 block sizes. For now we only implement
8 * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
9 * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
10 * and a key size of K bits. The Speck128 variants are believed to be the most
11 * secure variants, and they use the same block size and key sizes as AES. The
12 * Speck64 variants are less secure, but on 32-bit processors are usually
13 * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
14 * secure and/or not as well suited for implementation on either 32-bit or
15 * 64-bit processors, so are omitted.
16 *
17 * Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
18 * https://eprint.iacr.org/2013/404.pdf
19 *
20 * In a correspondence, the Speck designers have also clarified that the words
21 * should be interpreted in little-endian format, and the words should be
22 * ordered such that the first word of each block is 'y' rather than 'x', and
23 * the first key word (rather than the last) becomes the first round key.
24 */
25
26#include <asm/unaligned.h>
27#include <crypto/speck.h>
28#include <linux/bitops.h>
29#include <linux/crypto.h>
30#include <linux/init.h>
31#include <linux/module.h>
32
33/* Speck128 */
34
35static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
36{
37 *x = ror64(*x, 8);
38 *x += *y;
39 *x ^= k;
40 *y = rol64(*y, 3);
41 *y ^= *x;
42}
43
44static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
45{
46 *y ^= *x;
47 *y = ror64(*y, 3);
48 *x ^= k;
49 *x -= *y;
50 *x = rol64(*x, 8);
51}
52
53void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
54 u8 *out, const u8 *in)
55{
56 u64 y = get_unaligned_le64(in);
57 u64 x = get_unaligned_le64(in + 8);
58 int i;
59
60 for (i = 0; i < ctx->nrounds; i++)
61 speck128_round(&x, &y, ctx->round_keys[i]);
62
63 put_unaligned_le64(y, out);
64 put_unaligned_le64(x, out + 8);
65}
66EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
67
68static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
69{
70 crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
71}
72
73void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
74 u8 *out, const u8 *in)
75{
76 u64 y = get_unaligned_le64(in);
77 u64 x = get_unaligned_le64(in + 8);
78 int i;
79
80 for (i = ctx->nrounds - 1; i >= 0; i--)
81 speck128_unround(&x, &y, ctx->round_keys[i]);
82
83 put_unaligned_le64(y, out);
84 put_unaligned_le64(x, out + 8);
85}
86EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
87
88static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
89{
90 crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
91}
92
93int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
94 unsigned int keylen)
95{
96 u64 l[3];
97 u64 k;
98 int i;
99
100 switch (keylen) {
101 case SPECK128_128_KEY_SIZE:
102 k = get_unaligned_le64(key);
103 l[0] = get_unaligned_le64(key + 8);
104 ctx->nrounds = SPECK128_128_NROUNDS;
105 for (i = 0; i < ctx->nrounds; i++) {
106 ctx->round_keys[i] = k;
107 speck128_round(&l[0], &k, i);
108 }
109 break;
110 case SPECK128_192_KEY_SIZE:
111 k = get_unaligned_le64(key);
112 l[0] = get_unaligned_le64(key + 8);
113 l[1] = get_unaligned_le64(key + 16);
114 ctx->nrounds = SPECK128_192_NROUNDS;
115 for (i = 0; i < ctx->nrounds; i++) {
116 ctx->round_keys[i] = k;
117 speck128_round(&l[i % 2], &k, i);
118 }
119 break;
120 case SPECK128_256_KEY_SIZE:
121 k = get_unaligned_le64(key);
122 l[0] = get_unaligned_le64(key + 8);
123 l[1] = get_unaligned_le64(key + 16);
124 l[2] = get_unaligned_le64(key + 24);
125 ctx->nrounds = SPECK128_256_NROUNDS;
126 for (i = 0; i < ctx->nrounds; i++) {
127 ctx->round_keys[i] = k;
128 speck128_round(&l[i % 3], &k, i);
129 }
130 break;
131 default:
132 return -EINVAL;
133 }
134
135 return 0;
136}
137EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
138
139static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
140 unsigned int keylen)
141{
142 return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
143}
144
145/* Speck64 */
146
147static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
148{
149 *x = ror32(*x, 8);
150 *x += *y;
151 *x ^= k;
152 *y = rol32(*y, 3);
153 *y ^= *x;
154}
155
156static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
157{
158 *y ^= *x;
159 *y = ror32(*y, 3);
160 *x ^= k;
161 *x -= *y;
162 *x = rol32(*x, 8);
163}
164
165void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
166 u8 *out, const u8 *in)
167{
168 u32 y = get_unaligned_le32(in);
169 u32 x = get_unaligned_le32(in + 4);
170 int i;
171
172 for (i = 0; i < ctx->nrounds; i++)
173 speck64_round(&x, &y, ctx->round_keys[i]);
174
175 put_unaligned_le32(y, out);
176 put_unaligned_le32(x, out + 4);
177}
178EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
179
180static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
181{
182 crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
183}
184
185void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
186 u8 *out, const u8 *in)
187{
188 u32 y = get_unaligned_le32(in);
189 u32 x = get_unaligned_le32(in + 4);
190 int i;
191
192 for (i = ctx->nrounds - 1; i >= 0; i--)
193 speck64_unround(&x, &y, ctx->round_keys[i]);
194
195 put_unaligned_le32(y, out);
196 put_unaligned_le32(x, out + 4);
197}
198EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
199
200static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
201{
202 crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
203}
204
205int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
206 unsigned int keylen)
207{
208 u32 l[3];
209 u32 k;
210 int i;
211
212 switch (keylen) {
213 case SPECK64_96_KEY_SIZE:
214 k = get_unaligned_le32(key);
215 l[0] = get_unaligned_le32(key + 4);
216 l[1] = get_unaligned_le32(key + 8);
217 ctx->nrounds = SPECK64_96_NROUNDS;
218 for (i = 0; i < ctx->nrounds; i++) {
219 ctx->round_keys[i] = k;
220 speck64_round(&l[i % 2], &k, i);
221 }
222 break;
223 case SPECK64_128_KEY_SIZE:
224 k = get_unaligned_le32(key);
225 l[0] = get_unaligned_le32(key + 4);
226 l[1] = get_unaligned_le32(key + 8);
227 l[2] = get_unaligned_le32(key + 12);
228 ctx->nrounds = SPECK64_128_NROUNDS;
229 for (i = 0; i < ctx->nrounds; i++) {
230 ctx->round_keys[i] = k;
231 speck64_round(&l[i % 3], &k, i);
232 }
233 break;
234 default:
235 return -EINVAL;
236 }
237
238 return 0;
239}
240EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
241
242static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
243 unsigned int keylen)
244{
245 return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
246}
247
248/* Algorithm definitions */
249
250static struct crypto_alg speck_algs[] = {
251 {
252 .cra_name = "speck128",
253 .cra_driver_name = "speck128-generic",
254 .cra_priority = 100,
255 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
256 .cra_blocksize = SPECK128_BLOCK_SIZE,
257 .cra_ctxsize = sizeof(struct speck128_tfm_ctx),
258 .cra_module = THIS_MODULE,
259 .cra_u = {
260 .cipher = {
261 .cia_min_keysize = SPECK128_128_KEY_SIZE,
262 .cia_max_keysize = SPECK128_256_KEY_SIZE,
263 .cia_setkey = speck128_setkey,
264 .cia_encrypt = speck128_encrypt,
265 .cia_decrypt = speck128_decrypt
266 }
267 }
268 }, {
269 .cra_name = "speck64",
270 .cra_driver_name = "speck64-generic",
271 .cra_priority = 100,
272 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
273 .cra_blocksize = SPECK64_BLOCK_SIZE,
274 .cra_ctxsize = sizeof(struct speck64_tfm_ctx),
275 .cra_module = THIS_MODULE,
276 .cra_u = {
277 .cipher = {
278 .cia_min_keysize = SPECK64_96_KEY_SIZE,
279 .cia_max_keysize = SPECK64_128_KEY_SIZE,
280 .cia_setkey = speck64_setkey,
281 .cia_encrypt = speck64_encrypt,
282 .cia_decrypt = speck64_decrypt
283 }
284 }
285 }
286};
287
288static int __init speck_module_init(void)
289{
290 return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
291}
292
293static void __exit speck_module_exit(void)
294{
295 crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
296}
297
298module_init(speck_module_init);
299module_exit(speck_module_exit);
300
301MODULE_DESCRIPTION("Speck block cipher (generic)");
302MODULE_LICENSE("GPL");
303MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
304MODULE_ALIAS_CRYPTO("speck128");
305MODULE_ALIAS_CRYPTO("speck128-generic");
306MODULE_ALIAS_CRYPTO("speck64");
307MODULE_ALIAS_CRYPTO("speck64-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 14213a096fd2..51fe7c8744ae 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1983,6 +1983,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
1983 case 190: 1983 case 190:
1984 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"); 1984 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
1985 break; 1985 break;
1986 case 191:
1987 ret += tcrypt_test("ecb(sm4)");
1988 break;
1986 case 200: 1989 case 200:
1987 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 1990 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
1988 speed_template_16_24_32); 1991 speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index d5e23a142a04..af4a01c5037b 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3001,6 +3001,33 @@ static const struct alg_test_desc alg_test_descs[] = {
3001 } 3001 }
3002 } 3002 }
3003 }, { 3003 }, {
3004 .alg = "ecb(sm4)",
3005 .test = alg_test_skcipher,
3006 .suite = {
3007 .cipher = {
3008 .enc = __VECS(sm4_enc_tv_template),
3009 .dec = __VECS(sm4_dec_tv_template)
3010 }
3011 }
3012 }, {
3013 .alg = "ecb(speck128)",
3014 .test = alg_test_skcipher,
3015 .suite = {
3016 .cipher = {
3017 .enc = __VECS(speck128_enc_tv_template),
3018 .dec = __VECS(speck128_dec_tv_template)
3019 }
3020 }
3021 }, {
3022 .alg = "ecb(speck64)",
3023 .test = alg_test_skcipher,
3024 .suite = {
3025 .cipher = {
3026 .enc = __VECS(speck64_enc_tv_template),
3027 .dec = __VECS(speck64_dec_tv_template)
3028 }
3029 }
3030 }, {
3004 .alg = "ecb(tea)", 3031 .alg = "ecb(tea)",
3005 .test = alg_test_skcipher, 3032 .test = alg_test_skcipher,
3006 .suite = { 3033 .suite = {
@@ -3558,6 +3585,24 @@ static const struct alg_test_desc alg_test_descs[] = {
3558 } 3585 }
3559 } 3586 }
3560 }, { 3587 }, {
3588 .alg = "xts(speck128)",
3589 .test = alg_test_skcipher,
3590 .suite = {
3591 .cipher = {
3592 .enc = __VECS(speck128_xts_enc_tv_template),
3593 .dec = __VECS(speck128_xts_dec_tv_template)
3594 }
3595 }
3596 }, {
3597 .alg = "xts(speck64)",
3598 .test = alg_test_skcipher,
3599 .suite = {
3600 .cipher = {
3601 .enc = __VECS(speck64_xts_enc_tv_template),
3602 .dec = __VECS(speck64_xts_dec_tv_template)
3603 }
3604 }
3605 }, {
3561 .alg = "xts(twofish)", 3606 .alg = "xts(twofish)",
3562 .test = alg_test_skcipher, 3607 .test = alg_test_skcipher,
3563 .suite = { 3608 .suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 6044f6906bd6..004c0a0f8004 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -548,7 +548,7 @@ static const struct akcipher_testvec rsa_tv_template[] = {
548static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { 548static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
549 { 549 {
550 .key = 550 .key =
551 "\x30\x82\x03\x1f\x02\x01\x10\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" 551 "\x30\x82\x03\x1f\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82"
552 "\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28" 552 "\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28"
553 "\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67" 553 "\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67"
554 "\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d" 554 "\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d"
@@ -597,8 +597,8 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
597 "\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a" 597 "\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a"
598 "\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda" 598 "\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda"
599 "\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46" 599 "\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46"
600 "\xb8\x35\xdf\x41\x02\x01\x30\x02\x01\x30\x02\x01\x30\x02\x01\x30" 600 "\xb8\x35\xdf\x41\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00"
601 "\x02\x01\x30", 601 "\x02\x01\x00",
602 .key_len = 804, 602 .key_len = 804,
603 /* 603 /*
604 * m is SHA256 hash of following message: 604 * m is SHA256 hash of following message:
@@ -2044,6 +2044,265 @@ static const struct hash_testvec crct10dif_tv_template[] = {
2044 .digest = (u8 *)(u16 []){ 0x44c6 }, 2044 .digest = (u8 *)(u16 []){ 0x44c6 },
2045 .np = 4, 2045 .np = 4,
2046 .tap = { 1, 255, 57, 6 }, 2046 .tap = { 1, 255, 57, 6 },
2047 }, {
2048 .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49"
2049 "\xe0\x54\xeb\x82\x19\x8d\x24\xbb"
2050 "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a"
2051 "\xa1\x38\xcf\x43\xda\x71\x08\x7c"
2052 "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee"
2053 "\x85\x1c\x90\x27\xbe\x32\xc9\x60"
2054 "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2"
2055 "\x46\xdd\x74\x0b\x7f\x16\xad\x21"
2056 "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93"
2057 "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05"
2058 "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77"
2059 "\x0e\x82\x19\xb0\x24\xbb\x52\xe9"
2060 "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38"
2061 "\xcf\x66\xfd\x71\x08\x9f\x13\xaa"
2062 "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c"
2063 "\xb3\x27\xbe\x55\xec\x60\xf7\x8e"
2064 "\x02\x99\x30\xc7\x3b\xd2\x69\x00"
2065 "\x74\x0b\xa2\x16\xad\x44\xdb\x4f"
2066 "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1"
2067 "\x58\xef\x63\xfa\x91\x05\x9c\x33"
2068 "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5"
2069 "\x19\xb0\x47\xde\x52\xe9\x80\x17"
2070 "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66"
2071 "\xfd\x94\x08\x9f\x36\xcd\x41\xd8"
2072 "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a"
2073 "\xe1\x55\xec\x83\x1a\x8e\x25\xbc"
2074 "\x30\xc7\x5e\xf5\x69\x00\x97\x0b"
2075 "\xa2\x39\xd0\x44\xdb\x72\x09\x7d"
2076 "\x14\xab\x1f\xb6\x4d\xe4\x58\xef"
2077 "\x86\x1d\x91\x28\xbf\x33\xca\x61"
2078 "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3"
2079 "\x47\xde\x75\x0c\x80\x17\xae\x22"
2080 "\xb9\x50\xe7\x5b\xf2\x89\x20\x94"
2081 "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06"
2082 "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78"
2083 "\x0f\x83\x1a\xb1\x25\xbc\x53\xea"
2084 "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39"
2085 "\xd0\x67\xfe\x72\x09\xa0\x14\xab"
2086 "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d"
2087 "\xb4\x28\xbf\x56\xed\x61\xf8\x8f"
2088 "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01"
2089 "\x75\x0c\xa3\x17\xae\x45\xdc\x50"
2090 "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2"
2091 "\x59\xf0\x64\xfb\x92\x06\x9d\x34"
2092 "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6"
2093 "\x1a\xb1\x48\xdf\x53\xea\x81\x18"
2094 "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67"
2095 "\xfe\x95\x09\xa0\x37\xce\x42\xd9"
2096 "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b"
2097 "\xe2\x56\xed\x84\x1b\x8f\x26\xbd"
2098 "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c"
2099 "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e"
2100 "\x15\xac\x20\xb7\x4e\xe5\x59\xf0"
2101 "\x87\x1e\x92\x29\xc0\x34\xcb\x62"
2102 "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4"
2103 "\x48\xdf\x76\x0d\x81\x18\xaf\x23"
2104 "\xba\x51\xe8\x5c\xf3\x8a\x21\x95"
2105 "\x2c\xc3\x37\xce\x65\xfc\x70\x07"
2106 "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79"
2107 "\x10\x84\x1b\xb2\x26\xbd\x54\xeb"
2108 "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a"
2109 "\xd1\x68\xff\x73\x0a\xa1\x15\xac"
2110 "\x43\xda\x4e\xe5\x7c\x13\x87\x1e"
2111 "\xb5\x29\xc0\x57\xee\x62\xf9\x90"
2112 "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02"
2113 "\x76\x0d\xa4\x18\xaf\x46\xdd\x51"
2114 "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3"
2115 "\x5a\xf1\x65\xfc\x93\x07\x9e\x35"
2116 "\xcc\x40\xd7\x6e\x05\x79\x10\xa7"
2117 "\x1b\xb2\x49\xe0\x54\xeb\x82\x19"
2118 "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68"
2119 "\xff\x96\x0a\xa1\x38\xcf\x43\xda"
2120 "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c"
2121 "\xe3\x57\xee\x85\x1c\x90\x27\xbe"
2122 "\x32\xc9\x60\xf7\x6b\x02\x99\x0d"
2123 "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f"
2124 "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1"
2125 "\x88\x1f\x93\x2a\xc1\x35\xcc\x63"
2126 "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5"
2127 "\x49\xe0\x77\x0e\x82\x19\xb0\x24"
2128 "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96"
2129 "\x2d\xc4\x38\xcf\x66\xfd\x71\x08"
2130 "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a"
2131 "\x11\x85\x1c\xb3\x27\xbe\x55\xec"
2132 "\x60\xf7\x8e\x02\x99\x30\xc7\x3b"
2133 "\xd2\x69\x00\x74\x0b\xa2\x16\xad"
2134 "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f"
2135 "\xb6\x2a\xc1\x58\xef\x63\xfa\x91"
2136 "\x05\x9c\x33\xca\x3e\xd5\x6c\x03"
2137 "\x77\x0e\xa5\x19\xb0\x47\xde\x52"
2138 "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4"
2139 "\x5b\xf2\x66\xfd\x94\x08\x9f\x36"
2140 "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8"
2141 "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a"
2142 "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69"
2143 "\x00\x97\x0b\xa2\x39\xd0\x44\xdb"
2144 "\x72\x09\x7d\x14\xab\x1f\xb6\x4d"
2145 "\xe4\x58\xef\x86\x1d\x91\x28\xbf"
2146 "\x33\xca\x61\xf8\x6c\x03\x9a\x0e"
2147 "\xa5\x3c\xd3\x47\xde\x75\x0c\x80"
2148 "\x17\xae\x22\xb9\x50\xe7\x5b\xf2"
2149 "\x89\x20\x94\x2b\xc2\x36\xcd\x64"
2150 "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6"
2151 "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25"
2152 "\xbc\x53\xea\x5e\xf5\x8c\x00\x97"
2153 "\x2e\xc5\x39\xd0\x67\xfe\x72\x09"
2154 "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b"
2155 "\x12\x86\x1d\xb4\x28\xbf\x56\xed"
2156 "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c"
2157 "\xd3\x6a\x01\x75\x0c\xa3\x17\xae"
2158 "\x45\xdc\x50\xe7\x7e\x15\x89\x20"
2159 "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92"
2160 "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04"
2161 "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53"
2162 "\xea\x81\x18\x8c\x23\xba\x2e\xc5"
2163 "\x5c\xf3\x67\xfe\x95\x09\xa0\x37"
2164 "\xce\x42\xd9\x70\x07\x7b\x12\xa9"
2165 "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b"
2166 "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a"
2167 "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc"
2168 "\x73\x0a\x7e\x15\xac\x20\xb7\x4e"
2169 "\xe5\x59\xf0\x87\x1e\x92\x29\xc0"
2170 "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f"
2171 "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81"
2172 "\x18\xaf\x23\xba\x51\xe8\x5c\xf3"
2173 "\x8a\x21\x95\x2c\xc3\x37\xce\x65"
2174 "\xfc\x70\x07\x9e\x12\xa9\x40\xd7"
2175 "\x4b\xe2\x79\x10\x84\x1b\xb2\x26"
2176 "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98"
2177 "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a"
2178 "\xa1\x15\xac\x43\xda\x4e\xe5\x7c"
2179 "\x13\x87\x1e\xb5\x29\xc0\x57\xee"
2180 "\x62\xf9\x90\x04\x9b\x32\xc9\x3d"
2181 "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf"
2182 "\x46\xdd\x51\xe8\x7f\x16\x8a\x21"
2183 "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93"
2184 "\x07\x9e\x35\xcc\x40\xd7\x6e\x05"
2185 "\x79\x10\xa7\x1b\xb2\x49\xe0\x54"
2186 "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6"
2187 "\x5d\xf4\x68\xff\x96\x0a\xa1\x38"
2188 "\xcf\x43\xda\x71\x08\x7c\x13\xaa"
2189 "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c"
2190 "\x90\x27\xbe\x32\xc9\x60\xf7\x6b"
2191 "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd"
2192 "\x74\x0b\x7f\x16\xad\x21\xb8\x4f"
2193 "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1"
2194 "\x35\xcc\x63\xfa\x6e\x05\x9c\x10"
2195 "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82"
2196 "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4"
2197 "\x8b\x22\x96\x2d\xc4\x38\xcf\x66"
2198 "\xfd\x71\x08\x9f\x13\xaa\x41\xd8"
2199 "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27"
2200 "\xbe\x55\xec\x60\xf7\x8e\x02\x99"
2201 "\x30\xc7\x3b\xd2\x69\x00\x74\x0b"
2202 "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d"
2203 "\x14\x88\x1f\xb6\x2a\xc1\x58\xef"
2204 "\x63\xfa\x91\x05\x9c\x33\xca\x3e"
2205 "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0"
2206 "\x47\xde\x52\xe9\x80\x17\x8b\x22"
2207 "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94"
2208 "\x08\x9f\x36\xcd\x41\xd8\x6f\x06"
2209 "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55"
2210 "\xec\x83\x1a\x8e\x25\xbc\x30\xc7"
2211 "\x5e\xf5\x69\x00\x97\x0b\xa2\x39"
2212 "\xd0\x44\xdb\x72\x09\x7d\x14\xab"
2213 "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d"
2214 "\x91\x28\xbf\x33\xca\x61\xf8\x6c"
2215 "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde"
2216 "\x75\x0c\x80\x17\xae\x22\xb9\x50"
2217 "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2"
2218 "\x36\xcd\x64\xfb\x6f\x06\x9d\x11"
2219 "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83"
2220 "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5"
2221 "\x8c\x00\x97\x2e\xc5\x39\xd0\x67"
2222 "\xfe\x72\x09\xa0\x14\xab\x42\xd9"
2223 "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28"
2224 "\xbf\x56\xed\x61\xf8\x8f\x03\x9a"
2225 "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c"
2226 "\xa3\x17\xae\x45\xdc\x50\xe7\x7e"
2227 "\x15\x89\x20\xb7\x2b\xc2\x59\xf0"
2228 "\x64\xfb\x92\x06\x9d\x34\xcb\x3f"
2229 "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1"
2230 "\x48\xdf\x53\xea\x81\x18\x8c\x23"
2231 "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95"
2232 "\x09\xa0\x37\xce\x42\xd9\x70\x07"
2233 "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56"
2234 "\xed\x84\x1b\x8f\x26\xbd\x31\xc8"
2235 "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a"
2236 "\xd1\x45\xdc\x73\x0a\x7e\x15\xac"
2237 "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e"
2238 "\x92\x29\xc0\x34\xcb\x62\xf9\x6d"
2239 "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf"
2240 "\x76\x0d\x81\x18\xaf\x23\xba\x51"
2241 "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3"
2242 "\x37\xce\x65\xfc\x70\x07\x9e\x12"
2243 "\xa9\x40\xd7\x4b\xe2\x79\x10\x84"
2244 "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6"
2245 "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68"
2246 "\xff\x73\x0a\xa1\x15\xac\x43\xda"
2247 "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29"
2248 "\xc0\x57\xee\x62\xf9\x90\x04\x9b"
2249 "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d"
2250 "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f"
2251 "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1"
2252 "\x65\xfc\x93\x07\x9e\x35\xcc\x40"
2253 "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2"
2254 "\x49\xe0\x54\xeb\x82\x19\x8d\x24"
2255 "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96"
2256 "\x0a\xa1\x38\xcf\x43\xda\x71\x08"
2257 "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57"
2258 "\xee\x85\x1c\x90\x27\xbe\x32\xc9"
2259 "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b"
2260 "\xd2\x46\xdd\x74\x0b\x7f\x16\xad"
2261 "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f"
2262 "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e"
2263 "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0"
2264 "\x77\x0e\x82\x19\xb0\x24\xbb\x52"
2265 "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4"
2266 "\x38\xcf\x66\xfd\x71\x08\x9f\x13"
2267 "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85"
2268 "\x1c\xb3\x27\xbe\x55\xec\x60\xf7"
2269 "\x8e\x02\x99\x30\xc7\x3b\xd2\x69"
2270 "\x00\x74\x0b\xa2\x16\xad\x44\xdb"
2271 "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a"
2272 "\xc1\x58\xef\x63\xfa\x91\x05\x9c"
2273 "\x33\xca\x3e\xd5\x6c\x03\x77\x0e"
2274 "\xa5\x19\xb0\x47\xde\x52\xe9\x80"
2275 "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2"
2276 "\x66\xfd\x94\x08\x9f\x36\xcd\x41"
2277 "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3"
2278 "\x4a\xe1\x55\xec\x83\x1a\x8e\x25"
2279 "\xbc\x30\xc7\x5e\xf5\x69\x00\x97"
2280 "\x0b\xa2\x39\xd0\x44\xdb\x72\x09"
2281 "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58"
2282 "\xef\x86\x1d\x91\x28\xbf\x33\xca"
2283 "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c"
2284 "\xd3\x47\xde\x75\x0c\x80\x17\xae"
2285 "\x22\xb9\x50\xe7\x5b\xf2\x89\x20"
2286 "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f"
2287 "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1"
2288 "\x78\x0f\x83\x1a\xb1\x25\xbc\x53"
2289 "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5"
2290 "\x39\xd0\x67\xfe\x72\x09\xa0\x14"
2291 "\xab\x42\xd9\x4d\xe4\x7b\x12\x86"
2292 "\x1d\xb4\x28\xbf\x56\xed\x61\xf8"
2293 "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a"
2294 "\x01\x75\x0c\xa3\x17\xae\x45\xdc"
2295 "\x50\xe7\x7e\x15\x89\x20\xb7\x2b"
2296 "\xc2\x59\xf0\x64\xfb\x92\x06\x9d"
2297 "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f"
2298 "\xa6\x1a\xb1\x48\xdf\x53\xea\x81"
2299 "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3"
2300 "\x67\xfe\x95\x09\xa0\x37\xce\x42"
2301 "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4"
2302 "\x4b\xe2\x56\xed\x84\x1b\x8f\x26"
2303 "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98",
2304 .psize = 2048,
2305 .digest = (u8 *)(u16 []){ 0x23ca },
2047 } 2306 }
2048}; 2307};
2049 2308
@@ -14323,6 +14582,1623 @@ static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
14323 }, 14582 },
14324}; 14583};
14325 14584
14585/*
14586 * SM4 test vector taken from the draft RFC
14587 * https://tools.ietf.org/html/draft-crypto-sm4-00#ref-GBT.32907-2016
14588 */
14589
14590static const struct cipher_testvec sm4_enc_tv_template[] = {
14591 { /* SM4 Appendix A: Example Calculations. Example 1. */
14592 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
14593 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
14594 .klen = 16,
14595 .input = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
14596 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
14597 .ilen = 16,
14598 .result = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
14599 "\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
14600 .rlen = 16,
14601 }, { /*
14602 * SM4 Appendix A: Example Calculations.
14603 * Last 10 iterations of Example 2.
14604 */
14605 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
14606 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
14607 .klen = 16,
14608 .input = "\x99\x4a\xc3\xe7\xc3\x57\x89\x6a"
14609 "\x81\xfc\xa8\xe\x38\x3e\xef\x80"
14610 "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
14611 "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
14612 "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
14613 "\xad\x57\x15\xab\x31\x5d\xc\xef"
14614 "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
14615 "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
14616 "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
14617 "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
14618 "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
14619 "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
14620 "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
14621 "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
14622 "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
14623 "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
14624 "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
14625 "\xed\xce\x0\x19\xe\x16\x2\x6e"
14626 "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
14627 "\x31\x51\xec\x47\xc3\x51\x83\xc1",
14628 .ilen = 160,
14629 .result = "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
14630 "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
14631 "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
14632 "\xad\x57\x15\xab\x31\x5d\xc\xef"
14633 "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
14634 "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
14635 "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
14636 "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
14637 "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
14638 "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
14639 "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
14640 "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
14641 "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
14642 "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
14643 "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
14644 "\xed\xce\x0\x19\xe\x16\x2\x6e"
14645 "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
14646 "\x31\x51\xec\x47\xc3\x51\x83\xc1"
14647 "\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
14648 "\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
14649 .rlen = 160
14650 }
14651};
14652
14653static const struct cipher_testvec sm4_dec_tv_template[] = {
14654 { /* SM4 Appendix A: Example Calculations. Example 1. */
14655 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
14656 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
14657 .klen = 16,
14658 .input = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
14659 "\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
14660 .ilen = 16,
14661 .result = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
14662 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
14663 .rlen = 16,
14664 }, { /*
14665 * SM4 Appendix A: Example Calculations.
14666 * Last 10 iterations of Example 2.
14667 */
14668 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
14669 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
14670 .klen = 16,
14671 .input = "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
14672 "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
14673 "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
14674 "\xad\x57\x15\xab\x31\x5d\xc\xef"
14675 "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
14676 "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
14677 "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
14678 "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
14679 "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
14680 "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
14681 "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
14682 "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
14683 "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
14684 "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
14685 "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
14686 "\xed\xce\x0\x19\xe\x16\x2\x6e"
14687 "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
14688 "\x31\x51\xec\x47\xc3\x51\x83\xc1"
14689 "\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
14690 "\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
14691 .ilen = 160,
14692 .result = "\x99\x4a\xc3\xe7\xc3\x57\x89\x6a"
14693 "\x81\xfc\xa8\xe\x38\x3e\xef\x80"
14694 "\xb1\x98\xf2\xde\x3f\x4b\xae\xd1"
14695 "\xf0\xf1\x30\x4c\x1\x27\x5a\x8f"
14696 "\x45\xe1\x39\xb7\xae\xff\x1f\x27"
14697 "\xad\x57\x15\xab\x31\x5d\xc\xef"
14698 "\x8c\xc8\x80\xbd\x11\x98\xf3\x7b"
14699 "\xa2\xdd\x14\x20\xf9\xe8\xbb\x82"
14700 "\xf7\x32\xca\x4b\xa8\xf7\xb3\x4d"
14701 "\x27\xd1\xcd\xe6\xb6\x65\x5a\x23"
14702 "\xc2\xf3\x54\x84\x53\xe3\xb9\x20"
14703 "\xa5\x37\x0\xbe\xe7\x7b\x48\xfb"
14704 "\x21\x3d\x9e\x48\x1d\x9e\xf5\xbf"
14705 "\x77\xd5\xb4\x4a\x53\x71\x94\x7a"
14706 "\x88\xa6\x6e\x6\x93\xca\x43\xa5"
14707 "\xc4\xf6\xcd\x53\x4b\x7b\x8e\xfe"
14708 "\xb4\x28\x7c\x42\x29\x32\x5d\x88"
14709 "\xed\xce\x0\x19\xe\x16\x2\x6e"
14710 "\x87\xff\x2c\xac\xe8\xe7\xe9\xbf"
14711 "\x31\x51\xec\x47\xc3\x51\x83\xc1",
14712 .rlen = 160
14713 }
14714};
14715
14716/*
14717 * Speck test vectors taken from the original paper:
14718 * "The Simon and Speck Families of Lightweight Block Ciphers"
14719 * https://eprint.iacr.org/2013/404.pdf
14720 *
14721 * Note that the paper does not make byte and word order clear. But it was
14722 * confirmed with the authors that the intended orders are little endian byte
14723 * order and (y, x) word order. Equivalently, the printed test vectors, when
14724 * looking at only the bytes (ignoring the whitespace that divides them into
14725 * words), are backwards: the left-most byte is actually the one with the
14726 * highest memory address, while the right-most byte is actually the one with
14727 * the lowest memory address.
14728 */
14729
14730static const struct cipher_testvec speck128_enc_tv_template[] = {
14731 { /* Speck128/128 */
14732 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
14733 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
14734 .klen = 16,
14735 .input = "\x20\x6d\x61\x64\x65\x20\x69\x74"
14736 "\x20\x65\x71\x75\x69\x76\x61\x6c",
14737 .ilen = 16,
14738 .result = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
14739 "\x65\x32\x78\x79\x51\x98\x5d\xa6",
14740 .rlen = 16,
14741 }, { /* Speck128/192 */
14742 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
14743 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
14744 "\x10\x11\x12\x13\x14\x15\x16\x17",
14745 .klen = 24,
14746 .input = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
14747 "\x68\x69\x65\x66\x20\x48\x61\x72",
14748 .ilen = 16,
14749 .result = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
14750 "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
14751 .rlen = 16,
14752 }, { /* Speck128/256 */
14753 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
14754 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
14755 "\x10\x11\x12\x13\x14\x15\x16\x17"
14756 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
14757 .klen = 32,
14758 .input = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
14759 "\x49\x6e\x20\x74\x68\x6f\x73\x65",
14760 .ilen = 16,
14761 .result = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
14762 "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
14763 .rlen = 16,
14764 },
14765};
14766
14767static const struct cipher_testvec speck128_dec_tv_template[] = {
14768 { /* Speck128/128 */
14769 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
14770 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
14771 .klen = 16,
14772 .input = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
14773 "\x65\x32\x78\x79\x51\x98\x5d\xa6",
14774 .ilen = 16,
14775 .result = "\x20\x6d\x61\x64\x65\x20\x69\x74"
14776 "\x20\x65\x71\x75\x69\x76\x61\x6c",
14777 .rlen = 16,
14778 }, { /* Speck128/192 */
14779 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
14780 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
14781 "\x10\x11\x12\x13\x14\x15\x16\x17",
14782 .klen = 24,
14783 .input = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
14784 "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
14785 .ilen = 16,
14786 .result = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
14787 "\x68\x69\x65\x66\x20\x48\x61\x72",
14788 .rlen = 16,
14789 }, { /* Speck128/256 */
14790 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
14791 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
14792 "\x10\x11\x12\x13\x14\x15\x16\x17"
14793 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
14794 .klen = 32,
14795 .input = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
14796 "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
14797 .ilen = 16,
14798 .result = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
14799 "\x49\x6e\x20\x74\x68\x6f\x73\x65",
14800 .rlen = 16,
14801 },
14802};
14803
14804/*
14805 * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
14806 * result recomputed with Speck128 as the cipher
14807 */
14808
14809static const struct cipher_testvec speck128_xts_enc_tv_template[] = {
14810 {
14811 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
14812 "\x00\x00\x00\x00\x00\x00\x00\x00"
14813 "\x00\x00\x00\x00\x00\x00\x00\x00"
14814 "\x00\x00\x00\x00\x00\x00\x00\x00",
14815 .klen = 32,
14816 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
14817 "\x00\x00\x00\x00\x00\x00\x00\x00",
14818 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
14819 "\x00\x00\x00\x00\x00\x00\x00\x00"
14820 "\x00\x00\x00\x00\x00\x00\x00\x00"
14821 "\x00\x00\x00\x00\x00\x00\x00\x00",
14822 .ilen = 32,
14823 .result = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
14824 "\x3b\x99\x4a\x64\x74\x77\xac\xed"
14825 "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
14826 "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
14827 .rlen = 32,
14828 }, {
14829 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
14830 "\x11\x11\x11\x11\x11\x11\x11\x11"
14831 "\x22\x22\x22\x22\x22\x22\x22\x22"
14832 "\x22\x22\x22\x22\x22\x22\x22\x22",
14833 .klen = 32,
14834 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
14835 "\x00\x00\x00\x00\x00\x00\x00\x00",
14836 .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
14837 "\x44\x44\x44\x44\x44\x44\x44\x44"
14838 "\x44\x44\x44\x44\x44\x44\x44\x44"
14839 "\x44\x44\x44\x44\x44\x44\x44\x44",
14840 .ilen = 32,
14841 .result = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
14842 "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
14843 "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
14844 "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
14845 .rlen = 32,
14846 }, {
14847 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
14848 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
14849 "\x22\x22\x22\x22\x22\x22\x22\x22"
14850 "\x22\x22\x22\x22\x22\x22\x22\x22",
14851 .klen = 32,
14852 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
14853 "\x00\x00\x00\x00\x00\x00\x00\x00",
14854 .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
14855 "\x44\x44\x44\x44\x44\x44\x44\x44"
14856 "\x44\x44\x44\x44\x44\x44\x44\x44"
14857 "\x44\x44\x44\x44\x44\x44\x44\x44",
14858 .ilen = 32,
14859 .result = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
14860 "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
14861 "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
14862 "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
14863 .rlen = 32,
14864 }, {
14865 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
14866 "\x23\x53\x60\x28\x74\x71\x35\x26"
14867 "\x31\x41\x59\x26\x53\x58\x97\x93"
14868 "\x23\x84\x62\x64\x33\x83\x27\x95",
14869 .klen = 32,
14870 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
14871 "\x00\x00\x00\x00\x00\x00\x00\x00",
14872 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
14873 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
14874 "\x10\x11\x12\x13\x14\x15\x16\x17"
14875 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
14876 "\x20\x21\x22\x23\x24\x25\x26\x27"
14877 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
14878 "\x30\x31\x32\x33\x34\x35\x36\x37"
14879 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
14880 "\x40\x41\x42\x43\x44\x45\x46\x47"
14881 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
14882 "\x50\x51\x52\x53\x54\x55\x56\x57"
14883 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
14884 "\x60\x61\x62\x63\x64\x65\x66\x67"
14885 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
14886 "\x70\x71\x72\x73\x74\x75\x76\x77"
14887 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
14888 "\x80\x81\x82\x83\x84\x85\x86\x87"
14889 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
14890 "\x90\x91\x92\x93\x94\x95\x96\x97"
14891 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
14892 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
14893 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
14894 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
14895 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
14896 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
14897 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
14898 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
14899 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
14900 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
14901 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
14902 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
14903 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
14904 "\x00\x01\x02\x03\x04\x05\x06\x07"
14905 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
14906 "\x10\x11\x12\x13\x14\x15\x16\x17"
14907 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
14908 "\x20\x21\x22\x23\x24\x25\x26\x27"
14909 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
14910 "\x30\x31\x32\x33\x34\x35\x36\x37"
14911 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
14912 "\x40\x41\x42\x43\x44\x45\x46\x47"
14913 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
14914 "\x50\x51\x52\x53\x54\x55\x56\x57"
14915 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
14916 "\x60\x61\x62\x63\x64\x65\x66\x67"
14917 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
14918 "\x70\x71\x72\x73\x74\x75\x76\x77"
14919 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
14920 "\x80\x81\x82\x83\x84\x85\x86\x87"
14921 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
14922 "\x90\x91\x92\x93\x94\x95\x96\x97"
14923 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
14924 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
14925 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
14926 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
14927 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
14928 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
14929 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
14930 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
14931 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
14932 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
14933 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
14934 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
14935 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
14936 .ilen = 512,
14937 .result = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
14938 "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
14939 "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
14940 "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
14941 "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
14942 "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
14943 "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
14944 "\x19\xc5\x58\x84\x63\xb9\x12\x68"
14945 "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
14946 "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
14947 "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
14948 "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
14949 "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
14950 "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
14951 "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
14952 "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
14953 "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
14954 "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
14955 "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
14956 "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
14957 "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
14958 "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
14959 "\xa5\x20\xac\x24\x1c\x73\x59\x73"
14960 "\x58\x61\x3a\x87\x58\xb3\x20\x56"
14961 "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
14962 "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
14963 "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
14964 "\x09\x35\x71\x50\x65\xac\x92\xe3"
14965 "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
14966 "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
14967 "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
14968 "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
14969 "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
14970 "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
14971 "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
14972 "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
14973 "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
14974 "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
14975 "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
14976 "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
14977 "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
14978 "\x87\x30\xac\xd5\xea\x73\x49\x10"
14979 "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
14980 "\x66\x02\x35\x3d\x60\x06\x36\x4f"
14981 "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
14982 "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
14983 "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
14984 "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
14985 "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
14986 "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
14987 "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
14988 "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
14989 "\x51\x66\x02\x69\x04\x97\x36\xd4"
14990 "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
14991 "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
14992 "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
14993 "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
14994 "\x03\xe7\x05\x39\xf5\x05\x26\xee"
14995 "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
14996 "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
14997 "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
14998 "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
14999 "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
15000 "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
15001 .rlen = 512,
15002 }, {
15003 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
15004 "\x23\x53\x60\x28\x74\x71\x35\x26"
15005 "\x62\x49\x77\x57\x24\x70\x93\x69"
15006 "\x99\x59\x57\x49\x66\x96\x76\x27"
15007 "\x31\x41\x59\x26\x53\x58\x97\x93"
15008 "\x23\x84\x62\x64\x33\x83\x27\x95"
15009 "\x02\x88\x41\x97\x16\x93\x99\x37"
15010 "\x51\x05\x82\x09\x74\x94\x45\x92",
15011 .klen = 64,
15012 .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
15013 "\x00\x00\x00\x00\x00\x00\x00\x00",
15014 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
15015 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15016 "\x10\x11\x12\x13\x14\x15\x16\x17"
15017 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15018 "\x20\x21\x22\x23\x24\x25\x26\x27"
15019 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15020 "\x30\x31\x32\x33\x34\x35\x36\x37"
15021 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15022 "\x40\x41\x42\x43\x44\x45\x46\x47"
15023 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15024 "\x50\x51\x52\x53\x54\x55\x56\x57"
15025 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15026 "\x60\x61\x62\x63\x64\x65\x66\x67"
15027 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15028 "\x70\x71\x72\x73\x74\x75\x76\x77"
15029 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15030 "\x80\x81\x82\x83\x84\x85\x86\x87"
15031 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15032 "\x90\x91\x92\x93\x94\x95\x96\x97"
15033 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15034 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15035 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15036 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15037 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15038 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15039 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15040 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15041 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15042 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15043 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15044 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15045 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
15046 "\x00\x01\x02\x03\x04\x05\x06\x07"
15047 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15048 "\x10\x11\x12\x13\x14\x15\x16\x17"
15049 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15050 "\x20\x21\x22\x23\x24\x25\x26\x27"
15051 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15052 "\x30\x31\x32\x33\x34\x35\x36\x37"
15053 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15054 "\x40\x41\x42\x43\x44\x45\x46\x47"
15055 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15056 "\x50\x51\x52\x53\x54\x55\x56\x57"
15057 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15058 "\x60\x61\x62\x63\x64\x65\x66\x67"
15059 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15060 "\x70\x71\x72\x73\x74\x75\x76\x77"
15061 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15062 "\x80\x81\x82\x83\x84\x85\x86\x87"
15063 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15064 "\x90\x91\x92\x93\x94\x95\x96\x97"
15065 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15066 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15067 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15068 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15069 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15070 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15071 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15072 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15073 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15074 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15075 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15076 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15077 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
15078 .ilen = 512,
15079 .result = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
15080 "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
15081 "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
15082 "\x92\x99\xde\xd3\x76\xed\xcd\x63"
15083 "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
15084 "\x79\x36\x31\x19\x62\xae\x10\x7e"
15085 "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
15086 "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
15087 "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
15088 "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
15089 "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
15090 "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
15091 "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
15092 "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
15093 "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
15094 "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
15095 "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
15096 "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
15097 "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
15098 "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
15099 "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
15100 "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
15101 "\xac\xa5\xd0\x38\xef\x19\x56\x53"
15102 "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
15103 "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
15104 "\xed\x22\x34\x1c\x5d\xed\x17\x06"
15105 "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
15106 "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
15107 "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
15108 "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
15109 "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
15110 "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
15111 "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
15112 "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
15113 "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
15114 "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
15115 "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
15116 "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
15117 "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
15118 "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
15119 "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
15120 "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
15121 "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
15122 "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
15123 "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
15124 "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
15125 "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
15126 "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
15127 "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
15128 "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
15129 "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
15130 "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
15131 "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
15132 "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
15133 "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
15134 "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
15135 "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
15136 "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
15137 "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
15138 "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
15139 "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
15140 "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
15141 "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
15142 "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
15143 .rlen = 512,
15144 .also_non_np = 1,
15145 .np = 3,
15146 .tap = { 512 - 20, 4, 16 },
15147 }
15148};
15149
15150static const struct cipher_testvec speck128_xts_dec_tv_template[] = {
15151 {
15152 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
15153 "\x00\x00\x00\x00\x00\x00\x00\x00"
15154 "\x00\x00\x00\x00\x00\x00\x00\x00"
15155 "\x00\x00\x00\x00\x00\x00\x00\x00",
15156 .klen = 32,
15157 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
15158 "\x00\x00\x00\x00\x00\x00\x00\x00",
15159 .input = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
15160 "\x3b\x99\x4a\x64\x74\x77\xac\xed"
15161 "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
15162 "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
15163 .ilen = 32,
15164 .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
15165 "\x00\x00\x00\x00\x00\x00\x00\x00"
15166 "\x00\x00\x00\x00\x00\x00\x00\x00"
15167 "\x00\x00\x00\x00\x00\x00\x00\x00",
15168 .rlen = 32,
15169 }, {
15170 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
15171 "\x11\x11\x11\x11\x11\x11\x11\x11"
15172 "\x22\x22\x22\x22\x22\x22\x22\x22"
15173 "\x22\x22\x22\x22\x22\x22\x22\x22",
15174 .klen = 32,
15175 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
15176 "\x00\x00\x00\x00\x00\x00\x00\x00",
15177 .input = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
15178 "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
15179 "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
15180 "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
15181 .ilen = 32,
15182 .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
15183 "\x44\x44\x44\x44\x44\x44\x44\x44"
15184 "\x44\x44\x44\x44\x44\x44\x44\x44"
15185 "\x44\x44\x44\x44\x44\x44\x44\x44",
15186 .rlen = 32,
15187 }, {
15188 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
15189 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
15190 "\x22\x22\x22\x22\x22\x22\x22\x22"
15191 "\x22\x22\x22\x22\x22\x22\x22\x22",
15192 .klen = 32,
15193 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
15194 "\x00\x00\x00\x00\x00\x00\x00\x00",
15195 .input = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
15196 "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
15197 "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
15198 "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
15199 .ilen = 32,
15200 .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
15201 "\x44\x44\x44\x44\x44\x44\x44\x44"
15202 "\x44\x44\x44\x44\x44\x44\x44\x44"
15203 "\x44\x44\x44\x44\x44\x44\x44\x44",
15204 .rlen = 32,
15205 }, {
15206 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
15207 "\x23\x53\x60\x28\x74\x71\x35\x26"
15208 "\x31\x41\x59\x26\x53\x58\x97\x93"
15209 "\x23\x84\x62\x64\x33\x83\x27\x95",
15210 .klen = 32,
15211 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
15212 "\x00\x00\x00\x00\x00\x00\x00\x00",
15213 .input = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
15214 "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
15215 "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
15216 "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
15217 "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
15218 "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
15219 "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
15220 "\x19\xc5\x58\x84\x63\xb9\x12\x68"
15221 "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
15222 "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
15223 "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
15224 "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
15225 "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
15226 "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
15227 "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
15228 "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
15229 "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
15230 "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
15231 "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
15232 "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
15233 "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
15234 "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
15235 "\xa5\x20\xac\x24\x1c\x73\x59\x73"
15236 "\x58\x61\x3a\x87\x58\xb3\x20\x56"
15237 "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
15238 "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
15239 "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
15240 "\x09\x35\x71\x50\x65\xac\x92\xe3"
15241 "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
15242 "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
15243 "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
15244 "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
15245 "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
15246 "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
15247 "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
15248 "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
15249 "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
15250 "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
15251 "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
15252 "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
15253 "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
15254 "\x87\x30\xac\xd5\xea\x73\x49\x10"
15255 "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
15256 "\x66\x02\x35\x3d\x60\x06\x36\x4f"
15257 "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
15258 "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
15259 "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
15260 "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
15261 "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
15262 "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
15263 "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
15264 "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
15265 "\x51\x66\x02\x69\x04\x97\x36\xd4"
15266 "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
15267 "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
15268 "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
15269 "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
15270 "\x03\xe7\x05\x39\xf5\x05\x26\xee"
15271 "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
15272 "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
15273 "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
15274 "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
15275 "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
15276 "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
15277 .ilen = 512,
15278 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
15279 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15280 "\x10\x11\x12\x13\x14\x15\x16\x17"
15281 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15282 "\x20\x21\x22\x23\x24\x25\x26\x27"
15283 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15284 "\x30\x31\x32\x33\x34\x35\x36\x37"
15285 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15286 "\x40\x41\x42\x43\x44\x45\x46\x47"
15287 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15288 "\x50\x51\x52\x53\x54\x55\x56\x57"
15289 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15290 "\x60\x61\x62\x63\x64\x65\x66\x67"
15291 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15292 "\x70\x71\x72\x73\x74\x75\x76\x77"
15293 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15294 "\x80\x81\x82\x83\x84\x85\x86\x87"
15295 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15296 "\x90\x91\x92\x93\x94\x95\x96\x97"
15297 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15298 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15299 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15300 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15301 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15302 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15303 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15304 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15305 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15306 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15307 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15308 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15309 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
15310 "\x00\x01\x02\x03\x04\x05\x06\x07"
15311 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15312 "\x10\x11\x12\x13\x14\x15\x16\x17"
15313 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15314 "\x20\x21\x22\x23\x24\x25\x26\x27"
15315 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15316 "\x30\x31\x32\x33\x34\x35\x36\x37"
15317 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15318 "\x40\x41\x42\x43\x44\x45\x46\x47"
15319 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15320 "\x50\x51\x52\x53\x54\x55\x56\x57"
15321 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15322 "\x60\x61\x62\x63\x64\x65\x66\x67"
15323 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15324 "\x70\x71\x72\x73\x74\x75\x76\x77"
15325 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15326 "\x80\x81\x82\x83\x84\x85\x86\x87"
15327 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15328 "\x90\x91\x92\x93\x94\x95\x96\x97"
15329 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15330 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15331 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15332 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15333 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15334 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15335 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15336 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15337 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15338 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15339 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15340 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15341 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
15342 .rlen = 512,
15343 }, {
15344 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
15345 "\x23\x53\x60\x28\x74\x71\x35\x26"
15346 "\x62\x49\x77\x57\x24\x70\x93\x69"
15347 "\x99\x59\x57\x49\x66\x96\x76\x27"
15348 "\x31\x41\x59\x26\x53\x58\x97\x93"
15349 "\x23\x84\x62\x64\x33\x83\x27\x95"
15350 "\x02\x88\x41\x97\x16\x93\x99\x37"
15351 "\x51\x05\x82\x09\x74\x94\x45\x92",
15352 .klen = 64,
15353 .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
15354 "\x00\x00\x00\x00\x00\x00\x00\x00",
15355 .input = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
15356 "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
15357 "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
15358 "\x92\x99\xde\xd3\x76\xed\xcd\x63"
15359 "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
15360 "\x79\x36\x31\x19\x62\xae\x10\x7e"
15361 "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
15362 "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
15363 "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
15364 "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
15365 "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
15366 "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
15367 "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
15368 "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
15369 "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
15370 "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
15371 "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
15372 "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
15373 "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
15374 "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
15375 "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
15376 "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
15377 "\xac\xa5\xd0\x38\xef\x19\x56\x53"
15378 "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
15379 "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
15380 "\xed\x22\x34\x1c\x5d\xed\x17\x06"
15381 "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
15382 "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
15383 "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
15384 "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
15385 "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
15386 "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
15387 "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
15388 "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
15389 "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
15390 "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
15391 "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
15392 "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
15393 "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
15394 "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
15395 "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
15396 "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
15397 "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
15398 "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
15399 "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
15400 "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
15401 "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
15402 "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
15403 "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
15404 "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
15405 "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
15406 "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
15407 "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
15408 "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
15409 "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
15410 "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
15411 "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
15412 "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
15413 "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
15414 "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
15415 "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
15416 "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
15417 "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
15418 "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
15419 .ilen = 512,
15420 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
15421 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15422 "\x10\x11\x12\x13\x14\x15\x16\x17"
15423 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15424 "\x20\x21\x22\x23\x24\x25\x26\x27"
15425 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15426 "\x30\x31\x32\x33\x34\x35\x36\x37"
15427 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15428 "\x40\x41\x42\x43\x44\x45\x46\x47"
15429 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15430 "\x50\x51\x52\x53\x54\x55\x56\x57"
15431 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15432 "\x60\x61\x62\x63\x64\x65\x66\x67"
15433 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15434 "\x70\x71\x72\x73\x74\x75\x76\x77"
15435 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15436 "\x80\x81\x82\x83\x84\x85\x86\x87"
15437 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15438 "\x90\x91\x92\x93\x94\x95\x96\x97"
15439 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15440 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15441 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15442 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15443 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15444 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15445 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15446 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15447 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15448 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15449 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15450 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15451 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
15452 "\x00\x01\x02\x03\x04\x05\x06\x07"
15453 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15454 "\x10\x11\x12\x13\x14\x15\x16\x17"
15455 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15456 "\x20\x21\x22\x23\x24\x25\x26\x27"
15457 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15458 "\x30\x31\x32\x33\x34\x35\x36\x37"
15459 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15460 "\x40\x41\x42\x43\x44\x45\x46\x47"
15461 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15462 "\x50\x51\x52\x53\x54\x55\x56\x57"
15463 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15464 "\x60\x61\x62\x63\x64\x65\x66\x67"
15465 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15466 "\x70\x71\x72\x73\x74\x75\x76\x77"
15467 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15468 "\x80\x81\x82\x83\x84\x85\x86\x87"
15469 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15470 "\x90\x91\x92\x93\x94\x95\x96\x97"
15471 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15472 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15473 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15474 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15475 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15476 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15477 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15478 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15479 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15480 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15481 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15482 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15483 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
15484 .rlen = 512,
15485 .also_non_np = 1,
15486 .np = 3,
15487 .tap = { 512 - 20, 4, 16 },
15488 }
15489};
15490
15491static const struct cipher_testvec speck64_enc_tv_template[] = {
15492 { /* Speck64/96 */
15493 .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
15494 "\x10\x11\x12\x13",
15495 .klen = 12,
15496 .input = "\x65\x61\x6e\x73\x20\x46\x61\x74",
15497 .ilen = 8,
15498 .result = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
15499 .rlen = 8,
15500 }, { /* Speck64/128 */
15501 .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
15502 "\x10\x11\x12\x13\x18\x19\x1a\x1b",
15503 .klen = 16,
15504 .input = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
15505 .ilen = 8,
15506 .result = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
15507 .rlen = 8,
15508 },
15509};
15510
15511static const struct cipher_testvec speck64_dec_tv_template[] = {
15512 { /* Speck64/96 */
15513 .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
15514 "\x10\x11\x12\x13",
15515 .klen = 12,
15516 .input = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
15517 .ilen = 8,
15518 .result = "\x65\x61\x6e\x73\x20\x46\x61\x74",
15519 .rlen = 8,
15520 }, { /* Speck64/128 */
15521 .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
15522 "\x10\x11\x12\x13\x18\x19\x1a\x1b",
15523 .klen = 16,
15524 .input = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
15525 .ilen = 8,
15526 .result = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
15527 .rlen = 8,
15528 },
15529};
15530
15531/*
15532 * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the result
15533 * recomputed with Speck64 as the cipher, and key lengths adjusted
15534 */
15535
15536static const struct cipher_testvec speck64_xts_enc_tv_template[] = {
15537 {
15538 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
15539 "\x00\x00\x00\x00\x00\x00\x00\x00"
15540 "\x00\x00\x00\x00\x00\x00\x00\x00",
15541 .klen = 24,
15542 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
15543 "\x00\x00\x00\x00\x00\x00\x00\x00",
15544 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
15545 "\x00\x00\x00\x00\x00\x00\x00\x00"
15546 "\x00\x00\x00\x00\x00\x00\x00\x00"
15547 "\x00\x00\x00\x00\x00\x00\x00\x00",
15548 .ilen = 32,
15549 .result = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
15550 "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
15551 "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
15552 "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
15553 .rlen = 32,
15554 }, {
15555 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
15556 "\x11\x11\x11\x11\x11\x11\x11\x11"
15557 "\x22\x22\x22\x22\x22\x22\x22\x22",
15558 .klen = 24,
15559 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
15560 "\x00\x00\x00\x00\x00\x00\x00\x00",
15561 .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
15562 "\x44\x44\x44\x44\x44\x44\x44\x44"
15563 "\x44\x44\x44\x44\x44\x44\x44\x44"
15564 "\x44\x44\x44\x44\x44\x44\x44\x44",
15565 .ilen = 32,
15566 .result = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
15567 "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
15568 "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
15569 "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
15570 .rlen = 32,
15571 }, {
15572 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
15573 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
15574 "\x22\x22\x22\x22\x22\x22\x22\x22",
15575 .klen = 24,
15576 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
15577 "\x00\x00\x00\x00\x00\x00\x00\x00",
15578 .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
15579 "\x44\x44\x44\x44\x44\x44\x44\x44"
15580 "\x44\x44\x44\x44\x44\x44\x44\x44"
15581 "\x44\x44\x44\x44\x44\x44\x44\x44",
15582 .ilen = 32,
15583 .result = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
15584 "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
15585 "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
15586 "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
15587 .rlen = 32,
15588 }, {
15589 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
15590 "\x23\x53\x60\x28\x74\x71\x35\x26"
15591 "\x31\x41\x59\x26\x53\x58\x97\x93",
15592 .klen = 24,
15593 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
15594 "\x00\x00\x00\x00\x00\x00\x00\x00",
15595 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
15596 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15597 "\x10\x11\x12\x13\x14\x15\x16\x17"
15598 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15599 "\x20\x21\x22\x23\x24\x25\x26\x27"
15600 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15601 "\x30\x31\x32\x33\x34\x35\x36\x37"
15602 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15603 "\x40\x41\x42\x43\x44\x45\x46\x47"
15604 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15605 "\x50\x51\x52\x53\x54\x55\x56\x57"
15606 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15607 "\x60\x61\x62\x63\x64\x65\x66\x67"
15608 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15609 "\x70\x71\x72\x73\x74\x75\x76\x77"
15610 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15611 "\x80\x81\x82\x83\x84\x85\x86\x87"
15612 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15613 "\x90\x91\x92\x93\x94\x95\x96\x97"
15614 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15615 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15616 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15617 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15618 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15619 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15620 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15621 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15622 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15623 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15624 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15625 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15626 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
15627 "\x00\x01\x02\x03\x04\x05\x06\x07"
15628 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15629 "\x10\x11\x12\x13\x14\x15\x16\x17"
15630 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15631 "\x20\x21\x22\x23\x24\x25\x26\x27"
15632 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15633 "\x30\x31\x32\x33\x34\x35\x36\x37"
15634 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15635 "\x40\x41\x42\x43\x44\x45\x46\x47"
15636 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15637 "\x50\x51\x52\x53\x54\x55\x56\x57"
15638 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15639 "\x60\x61\x62\x63\x64\x65\x66\x67"
15640 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15641 "\x70\x71\x72\x73\x74\x75\x76\x77"
15642 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15643 "\x80\x81\x82\x83\x84\x85\x86\x87"
15644 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15645 "\x90\x91\x92\x93\x94\x95\x96\x97"
15646 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15647 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15648 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15649 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15650 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15651 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15652 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15653 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15654 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15655 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15656 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15657 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15658 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
15659 .ilen = 512,
15660 .result = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
15661 "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
15662 "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
15663 "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
15664 "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
15665 "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
15666 "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
15667 "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
15668 "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
15669 "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
15670 "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
15671 "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
15672 "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
15673 "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
15674 "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
15675 "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
15676 "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
15677 "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
15678 "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
15679 "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
15680 "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
15681 "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
15682 "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
15683 "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
15684 "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
15685 "\x07\xff\xf3\x72\x74\x48\xb5\x40"
15686 "\x50\xb5\xdd\x90\x43\x31\x18\x15"
15687 "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
15688 "\x29\x93\x90\x8b\xda\x07\xf0\x35"
15689 "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
15690 "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
15691 "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
15692 "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
15693 "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
15694 "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
15695 "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
15696 "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
15697 "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
15698 "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
15699 "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
15700 "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
15701 "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
15702 "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
15703 "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
15704 "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
15705 "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
15706 "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
15707 "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
15708 "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
15709 "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
15710 "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
15711 "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
15712 "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
15713 "\x59\xda\xee\x1a\x22\x18\xda\x0d"
15714 "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
15715 "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
15716 "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
15717 "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
15718 "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
15719 "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
15720 "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
15721 "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
15722 "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
15723 "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
15724 .rlen = 512,
15725 }, {
15726 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
15727 "\x23\x53\x60\x28\x74\x71\x35\x26"
15728 "\x62\x49\x77\x57\x24\x70\x93\x69"
15729 "\x99\x59\x57\x49\x66\x96\x76\x27",
15730 .klen = 32,
15731 .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
15732 "\x00\x00\x00\x00\x00\x00\x00\x00",
15733 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
15734 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15735 "\x10\x11\x12\x13\x14\x15\x16\x17"
15736 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15737 "\x20\x21\x22\x23\x24\x25\x26\x27"
15738 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15739 "\x30\x31\x32\x33\x34\x35\x36\x37"
15740 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15741 "\x40\x41\x42\x43\x44\x45\x46\x47"
15742 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15743 "\x50\x51\x52\x53\x54\x55\x56\x57"
15744 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15745 "\x60\x61\x62\x63\x64\x65\x66\x67"
15746 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15747 "\x70\x71\x72\x73\x74\x75\x76\x77"
15748 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15749 "\x80\x81\x82\x83\x84\x85\x86\x87"
15750 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15751 "\x90\x91\x92\x93\x94\x95\x96\x97"
15752 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15753 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15754 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15755 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15756 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15757 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15758 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15759 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15760 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15761 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15762 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15763 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15764 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
15765 "\x00\x01\x02\x03\x04\x05\x06\x07"
15766 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15767 "\x10\x11\x12\x13\x14\x15\x16\x17"
15768 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15769 "\x20\x21\x22\x23\x24\x25\x26\x27"
15770 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15771 "\x30\x31\x32\x33\x34\x35\x36\x37"
15772 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
15773 "\x40\x41\x42\x43\x44\x45\x46\x47"
15774 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
15775 "\x50\x51\x52\x53\x54\x55\x56\x57"
15776 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
15777 "\x60\x61\x62\x63\x64\x65\x66\x67"
15778 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
15779 "\x70\x71\x72\x73\x74\x75\x76\x77"
15780 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
15781 "\x80\x81\x82\x83\x84\x85\x86\x87"
15782 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
15783 "\x90\x91\x92\x93\x94\x95\x96\x97"
15784 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
15785 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
15786 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
15787 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
15788 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
15789 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
15790 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
15791 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
15792 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
15793 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
15794 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
15795 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
15796 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
15797 .ilen = 512,
15798 .result = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
15799 "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
15800 "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
15801 "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
15802 "\x78\x16\xea\x80\xdb\x33\x75\x94"
15803 "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
15804 "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
15805 "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
15806 "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
15807 "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
15808 "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
15809 "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
15810 "\x84\x5e\x46\xed\x20\x89\xb6\x44"
15811 "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
15812 "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
15813 "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
15814 "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
15815 "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
15816 "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
15817 "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
15818 "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
15819 "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
15820 "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
15821 "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
15822 "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
15823 "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
15824 "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
15825 "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
15826 "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
15827 "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
15828 "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
15829 "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
15830 "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
15831 "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
15832 "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
15833 "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
15834 "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
15835 "\x52\x7f\x29\x51\x94\x20\x67\x3c"
15836 "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
15837 "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
15838 "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
15839 "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
15840 "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
15841 "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
15842 "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
15843 "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
15844 "\x65\x53\x0f\x41\x11\xbd\x98\x99"
15845 "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
15846 "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
15847 "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
15848 "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
15849 "\x63\x51\x34\x9d\x96\x12\xae\xce"
15850 "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
15851 "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
15852 "\x54\x27\x74\xbb\x10\x86\x57\x46"
15853 "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
15854 "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
15855 "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
15856 "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
15857 "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
15858 "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
15859 "\x9b\x63\x76\x32\x2f\x19\x72\x10"
15860 "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
15861 "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
15862 .rlen = 512,
15863 .also_non_np = 1,
15864 .np = 3,
15865 .tap = { 512 - 20, 4, 16 },
15866 }
15867};
15868
15869static const struct cipher_testvec speck64_xts_dec_tv_template[] = {
15870 {
15871 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
15872 "\x00\x00\x00\x00\x00\x00\x00\x00"
15873 "\x00\x00\x00\x00\x00\x00\x00\x00",
15874 .klen = 24,
15875 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
15876 "\x00\x00\x00\x00\x00\x00\x00\x00",
15877 .input = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
15878 "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
15879 "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
15880 "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
15881 .ilen = 32,
15882 .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
15883 "\x00\x00\x00\x00\x00\x00\x00\x00"
15884 "\x00\x00\x00\x00\x00\x00\x00\x00"
15885 "\x00\x00\x00\x00\x00\x00\x00\x00",
15886 .rlen = 32,
15887 }, {
15888 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
15889 "\x11\x11\x11\x11\x11\x11\x11\x11"
15890 "\x22\x22\x22\x22\x22\x22\x22\x22",
15891 .klen = 24,
15892 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
15893 "\x00\x00\x00\x00\x00\x00\x00\x00",
15894 .input = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
15895 "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
15896 "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
15897 "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
15898 .ilen = 32,
15899 .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
15900 "\x44\x44\x44\x44\x44\x44\x44\x44"
15901 "\x44\x44\x44\x44\x44\x44\x44\x44"
15902 "\x44\x44\x44\x44\x44\x44\x44\x44",
15903 .rlen = 32,
15904 }, {
15905 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
15906 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
15907 "\x22\x22\x22\x22\x22\x22\x22\x22",
15908 .klen = 24,
15909 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
15910 "\x00\x00\x00\x00\x00\x00\x00\x00",
15911 .input = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
15912 "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
15913 "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
15914 "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
15915 .ilen = 32,
15916 .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
15917 "\x44\x44\x44\x44\x44\x44\x44\x44"
15918 "\x44\x44\x44\x44\x44\x44\x44\x44"
15919 "\x44\x44\x44\x44\x44\x44\x44\x44",
15920 .rlen = 32,
15921 }, {
15922 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
15923 "\x23\x53\x60\x28\x74\x71\x35\x26"
15924 "\x31\x41\x59\x26\x53\x58\x97\x93",
15925 .klen = 24,
15926 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
15927 "\x00\x00\x00\x00\x00\x00\x00\x00",
15928 .input = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
15929 "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
15930 "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
15931 "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
15932 "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
15933 "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
15934 "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
15935 "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
15936 "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
15937 "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
15938 "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
15939 "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
15940 "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
15941 "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
15942 "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
15943 "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
15944 "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
15945 "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
15946 "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
15947 "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
15948 "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
15949 "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
15950 "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
15951 "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
15952 "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
15953 "\x07\xff\xf3\x72\x74\x48\xb5\x40"
15954 "\x50\xb5\xdd\x90\x43\x31\x18\x15"
15955 "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
15956 "\x29\x93\x90\x8b\xda\x07\xf0\x35"
15957 "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
15958 "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
15959 "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
15960 "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
15961 "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
15962 "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
15963 "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
15964 "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
15965 "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
15966 "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
15967 "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
15968 "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
15969 "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
15970 "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
15971 "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
15972 "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
15973 "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
15974 "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
15975 "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
15976 "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
15977 "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
15978 "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
15979 "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
15980 "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
15981 "\x59\xda\xee\x1a\x22\x18\xda\x0d"
15982 "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
15983 "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
15984 "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
15985 "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
15986 "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
15987 "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
15988 "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
15989 "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
15990 "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
15991 "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
15992 .ilen = 512,
15993 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
15994 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
15995 "\x10\x11\x12\x13\x14\x15\x16\x17"
15996 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
15997 "\x20\x21\x22\x23\x24\x25\x26\x27"
15998 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
15999 "\x30\x31\x32\x33\x34\x35\x36\x37"
16000 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
16001 "\x40\x41\x42\x43\x44\x45\x46\x47"
16002 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
16003 "\x50\x51\x52\x53\x54\x55\x56\x57"
16004 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
16005 "\x60\x61\x62\x63\x64\x65\x66\x67"
16006 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
16007 "\x70\x71\x72\x73\x74\x75\x76\x77"
16008 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
16009 "\x80\x81\x82\x83\x84\x85\x86\x87"
16010 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
16011 "\x90\x91\x92\x93\x94\x95\x96\x97"
16012 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
16013 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
16014 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
16015 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
16016 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
16017 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
16018 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
16019 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
16020 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
16021 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
16022 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
16023 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
16024 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
16025 "\x00\x01\x02\x03\x04\x05\x06\x07"
16026 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
16027 "\x10\x11\x12\x13\x14\x15\x16\x17"
16028 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
16029 "\x20\x21\x22\x23\x24\x25\x26\x27"
16030 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
16031 "\x30\x31\x32\x33\x34\x35\x36\x37"
16032 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
16033 "\x40\x41\x42\x43\x44\x45\x46\x47"
16034 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
16035 "\x50\x51\x52\x53\x54\x55\x56\x57"
16036 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
16037 "\x60\x61\x62\x63\x64\x65\x66\x67"
16038 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
16039 "\x70\x71\x72\x73\x74\x75\x76\x77"
16040 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
16041 "\x80\x81\x82\x83\x84\x85\x86\x87"
16042 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
16043 "\x90\x91\x92\x93\x94\x95\x96\x97"
16044 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
16045 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
16046 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
16047 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
16048 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
16049 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
16050 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
16051 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
16052 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
16053 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
16054 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
16055 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
16056 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
16057 .rlen = 512,
16058 }, {
16059 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
16060 "\x23\x53\x60\x28\x74\x71\x35\x26"
16061 "\x62\x49\x77\x57\x24\x70\x93\x69"
16062 "\x99\x59\x57\x49\x66\x96\x76\x27",
16063 .klen = 32,
16064 .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
16065 "\x00\x00\x00\x00\x00\x00\x00\x00",
16066 .input = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
16067 "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
16068 "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
16069 "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
16070 "\x78\x16\xea\x80\xdb\x33\x75\x94"
16071 "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
16072 "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
16073 "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
16074 "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
16075 "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
16076 "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
16077 "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
16078 "\x84\x5e\x46\xed\x20\x89\xb6\x44"
16079 "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
16080 "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
16081 "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
16082 "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
16083 "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
16084 "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
16085 "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
16086 "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
16087 "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
16088 "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
16089 "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
16090 "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
16091 "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
16092 "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
16093 "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
16094 "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
16095 "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
16096 "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
16097 "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
16098 "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
16099 "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
16100 "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
16101 "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
16102 "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
16103 "\x52\x7f\x29\x51\x94\x20\x67\x3c"
16104 "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
16105 "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
16106 "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
16107 "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
16108 "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
16109 "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
16110 "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
16111 "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
16112 "\x65\x53\x0f\x41\x11\xbd\x98\x99"
16113 "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
16114 "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
16115 "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
16116 "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
16117 "\x63\x51\x34\x9d\x96\x12\xae\xce"
16118 "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
16119 "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
16120 "\x54\x27\x74\xbb\x10\x86\x57\x46"
16121 "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
16122 "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
16123 "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
16124 "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
16125 "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
16126 "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
16127 "\x9b\x63\x76\x32\x2f\x19\x72\x10"
16128 "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
16129 "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
16130 .ilen = 512,
16131 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
16132 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
16133 "\x10\x11\x12\x13\x14\x15\x16\x17"
16134 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
16135 "\x20\x21\x22\x23\x24\x25\x26\x27"
16136 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
16137 "\x30\x31\x32\x33\x34\x35\x36\x37"
16138 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
16139 "\x40\x41\x42\x43\x44\x45\x46\x47"
16140 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
16141 "\x50\x51\x52\x53\x54\x55\x56\x57"
16142 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
16143 "\x60\x61\x62\x63\x64\x65\x66\x67"
16144 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
16145 "\x70\x71\x72\x73\x74\x75\x76\x77"
16146 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
16147 "\x80\x81\x82\x83\x84\x85\x86\x87"
16148 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
16149 "\x90\x91\x92\x93\x94\x95\x96\x97"
16150 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
16151 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
16152 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
16153 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
16154 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
16155 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
16156 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
16157 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
16158 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
16159 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
16160 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
16161 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
16162 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
16163 "\x00\x01\x02\x03\x04\x05\x06\x07"
16164 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
16165 "\x10\x11\x12\x13\x14\x15\x16\x17"
16166 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
16167 "\x20\x21\x22\x23\x24\x25\x26\x27"
16168 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
16169 "\x30\x31\x32\x33\x34\x35\x36\x37"
16170 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
16171 "\x40\x41\x42\x43\x44\x45\x46\x47"
16172 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
16173 "\x50\x51\x52\x53\x54\x55\x56\x57"
16174 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
16175 "\x60\x61\x62\x63\x64\x65\x66\x67"
16176 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
16177 "\x70\x71\x72\x73\x74\x75\x76\x77"
16178 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
16179 "\x80\x81\x82\x83\x84\x85\x86\x87"
16180 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
16181 "\x90\x91\x92\x93\x94\x95\x96\x97"
16182 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
16183 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
16184 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
16185 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
16186 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
16187 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
16188 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
16189 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
16190 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
16191 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
16192 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
16193 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
16194 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
16195 .rlen = 512,
16196 .also_non_np = 1,
16197 .np = 3,
16198 .tap = { 512 - 20, 4, 16 },
16199 }
16200};
16201
14326/* Cast6 test vectors from RFC 2612 */ 16202/* Cast6 test vectors from RFC 2612 */
14327static const struct cipher_testvec cast6_enc_tv_template[] = { 16203static const struct cipher_testvec cast6_enc_tv_template[] = {
14328 { 16204 {
diff --git a/crypto/xts.c b/crypto/xts.c
index f317c48b5e43..12284183bd20 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -357,78 +357,6 @@ static int decrypt(struct skcipher_request *req)
357 return do_decrypt(req, init_crypt(req, decrypt_done)); 357 return do_decrypt(req, init_crypt(req, decrypt_done));
358} 358}
359 359
360int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
361 struct scatterlist *ssrc, unsigned int nbytes,
362 struct xts_crypt_req *req)
363{
364 const unsigned int bsize = XTS_BLOCK_SIZE;
365 const unsigned int max_blks = req->tbuflen / bsize;
366 struct blkcipher_walk walk;
367 unsigned int nblocks;
368 le128 *src, *dst, *t;
369 le128 *t_buf = req->tbuf;
370 int err, i;
371
372 BUG_ON(max_blks < 1);
373
374 blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
375
376 err = blkcipher_walk_virt(desc, &walk);
377 nbytes = walk.nbytes;
378 if (!nbytes)
379 return err;
380
381 nblocks = min(nbytes / bsize, max_blks);
382 src = (le128 *)walk.src.virt.addr;
383 dst = (le128 *)walk.dst.virt.addr;
384
385 /* calculate first value of T */
386 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
387
388 i = 0;
389 goto first;
390
391 for (;;) {
392 do {
393 for (i = 0; i < nblocks; i++) {
394 gf128mul_x_ble(&t_buf[i], t);
395first:
396 t = &t_buf[i];
397
398 /* PP <- T xor P */
399 le128_xor(dst + i, t, src + i);
400 }
401
402 /* CC <- E(Key2,PP) */
403 req->crypt_fn(req->crypt_ctx, (u8 *)dst,
404 nblocks * bsize);
405
406 /* C <- T xor CC */
407 for (i = 0; i < nblocks; i++)
408 le128_xor(dst + i, dst + i, &t_buf[i]);
409
410 src += nblocks;
411 dst += nblocks;
412 nbytes -= nblocks * bsize;
413 nblocks = min(nbytes / bsize, max_blks);
414 } while (nblocks > 0);
415
416 *(le128 *)walk.iv = *t;
417
418 err = blkcipher_walk_done(desc, &walk, nbytes);
419 nbytes = walk.nbytes;
420 if (!nbytes)
421 break;
422
423 nblocks = min(nbytes / bsize, max_blks);
424 src = (le128 *)walk.src.virt.addr;
425 dst = (le128 *)walk.dst.virt.addr;
426 }
427
428 return err;
429}
430EXPORT_SYMBOL_GPL(xts_crypt);
431
432static int init_tfm(struct crypto_skcipher *tfm) 360static int init_tfm(struct crypto_skcipher *tfm)
433{ 361{
434 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 362 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 4d0f571c15f9..d53541e96bee 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -452,3 +452,10 @@ config UML_RANDOM
452 (check your distro, or download from 452 (check your distro, or download from
453 http://sourceforge.net/projects/gkernel/). rngd periodically reads 453 http://sourceforge.net/projects/gkernel/). rngd periodically reads
454 /dev/hwrng and injects the entropy into /dev/random. 454 /dev/hwrng and injects the entropy into /dev/random.
455
456config HW_RANDOM_KEYSTONE
457 depends on ARCH_KEYSTONE
458 default HW_RANDOM
459 tristate "TI Keystone NETCP SA Hardware random number generator"
460 help
461 This option enables Keystone's hardware random generator.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b780370bd4eb..533e913c93d1 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -38,3 +38,4 @@ obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
38obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o 38obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
39obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o 39obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
40obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o 40obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
41obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 7a84cec30c3a..6767d965c36c 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -163,6 +163,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
163 163
164 /* Clock is optional on most platforms */ 164 /* Clock is optional on most platforms */
165 priv->clk = devm_clk_get(dev, NULL); 165 priv->clk = devm_clk_get(dev, NULL);
166 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
167 return -EPROBE_DEFER;
166 168
167 priv->rng.name = pdev->name; 169 priv->rng.name = pdev->name;
168 priv->rng.init = bcm2835_rng_init; 170 priv->rng.init = bcm2835_rng_init;
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index dd1007aecb10..2d1352b67168 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -77,7 +77,7 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
77} 77}
78 78
79/* Remove the VF */ 79/* Remove the VF */
80void cavium_rng_remove_vf(struct pci_dev *pdev) 80static void cavium_rng_remove_vf(struct pci_dev *pdev)
81{ 81{
82 struct cavium_rng *rng; 82 struct cavium_rng *rng;
83 83
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
index a944e0a47f42..63d6e68c24d2 100644
--- a/drivers/char/hw_random/cavium-rng.c
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -62,7 +62,7 @@ static int cavium_rng_probe(struct pci_dev *pdev,
62} 62}
63 63
64/* Disable VF and RNG Hardware */ 64/* Disable VF and RNG Hardware */
65void cavium_rng_remove(struct pci_dev *pdev) 65static void cavium_rng_remove(struct pci_dev *pdev)
66{ 66{
67 struct cavium_rng_pf *rng; 67 struct cavium_rng_pf *rng;
68 68
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index eca87249bcff..250123bc4905 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -300,7 +300,7 @@ static int __maybe_unused imx_rngc_resume(struct device *dev)
300 return 0; 300 return 0;
301} 301}
302 302
303SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); 303static SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
304 304
305static const struct of_device_id imx_rngc_dt_ids[] = { 305static const struct of_device_id imx_rngc_dt_ids[] = {
306 { .compatible = "fsl,imx25-rngb", .data = NULL, }, 306 { .compatible = "fsl,imx25-rngb", .data = NULL, },
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
new file mode 100644
index 000000000000..62c6696c1dbd
--- /dev/null
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -0,0 +1,257 @@
1/*
2 * Random Number Generator driver for the Keystone SOC
3 *
4 * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Sandeep Nair
7 * Vitaly Andrianov
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#include <linux/hw_random.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/clk.h>
25#include <linux/pm_runtime.h>
26#include <linux/err.h>
27#include <linux/regmap.h>
28#include <linux/mfd/syscon.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/delay.h>
32
33#define SA_CMD_STATUS_OFS 0x8
34
35/* TRNG enable control in SA System module*/
36#define SA_CMD_STATUS_REG_TRNG_ENABLE BIT(3)
37
38/* TRNG start control in TRNG module */
39#define TRNG_CNTL_REG_TRNG_ENABLE BIT(10)
40
41/* Data ready indicator in STATUS register */
42#define TRNG_STATUS_REG_READY BIT(0)
43
44/* Data ready clear control in INTACK register */
45#define TRNG_INTACK_REG_READY BIT(0)
46
47/*
48 * Number of samples taken to gather entropy during startup.
49 * If value is 0, the number of samples is 2^24 else
50 * equals value times 2^8.
51 */
52#define TRNG_DEF_STARTUP_CYCLES 0
53#define TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT 16
54
55/*
56 * Minimum number of samples taken to regenerate entropy
57 * If value is 0, the number of samples is 2^24 else
58 * equals value times 2^6.
59 */
60#define TRNG_DEF_MIN_REFILL_CYCLES 1
61#define TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT 0
62
63/*
64 * Maximum number of samples taken to regenerate entropy
65 * If value is 0, the number of samples is 2^24 else
66 * equals value times 2^8.
67 */
68#define TRNG_DEF_MAX_REFILL_CYCLES 0
69#define TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT 16
70
71/* Number of CLK input cycles between samples */
72#define TRNG_DEF_CLK_DIV_CYCLES 0
73#define TRNG_CFG_REG_SAMPLE_DIV_SHIFT 8
74
75/* Maximum retries to get rng data */
76#define SA_MAX_RNG_DATA_RETRIES 5
77/* Delay between retries (in usecs) */
78#define SA_RNG_DATA_RETRY_DELAY 5
79
80struct trng_regs {
81 u32 output_l;
82 u32 output_h;
83 u32 status;
84 u32 intmask;
85 u32 intack;
86 u32 control;
87 u32 config;
88};
89
90struct ks_sa_rng {
91 struct device *dev;
92 struct hwrng rng;
93 struct clk *clk;
94 struct regmap *regmap_cfg;
95 struct trng_regs *reg_rng;
96};
97
98static int ks_sa_rng_init(struct hwrng *rng)
99{
100 u32 value;
101 struct device *dev = (struct device *)rng->priv;
102 struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
103
104 /* Enable RNG module */
105 regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
106 SA_CMD_STATUS_REG_TRNG_ENABLE,
107 SA_CMD_STATUS_REG_TRNG_ENABLE);
108
109 /* Configure RNG module */
110 writel(0, &ks_sa_rng->reg_rng->control);
111 value = TRNG_DEF_STARTUP_CYCLES << TRNG_CNTL_REG_STARTUP_CYCLES_SHIFT;
112 writel(value, &ks_sa_rng->reg_rng->control);
113
114 value = (TRNG_DEF_MIN_REFILL_CYCLES <<
115 TRNG_CFG_REG_MIN_REFILL_CYCLES_SHIFT) |
116 (TRNG_DEF_MAX_REFILL_CYCLES <<
117 TRNG_CFG_REG_MAX_REFILL_CYCLES_SHIFT) |
118 (TRNG_DEF_CLK_DIV_CYCLES <<
119 TRNG_CFG_REG_SAMPLE_DIV_SHIFT);
120
121 writel(value, &ks_sa_rng->reg_rng->config);
122
123 /* Disable all interrupts from TRNG */
124 writel(0, &ks_sa_rng->reg_rng->intmask);
125
126 /* Enable RNG */
127 value = readl(&ks_sa_rng->reg_rng->control);
128 value |= TRNG_CNTL_REG_TRNG_ENABLE;
129 writel(value, &ks_sa_rng->reg_rng->control);
130
131 return 0;
132}
133
134static void ks_sa_rng_cleanup(struct hwrng *rng)
135{
136 struct device *dev = (struct device *)rng->priv;
137 struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
138
139 /* Disable RNG */
140 writel(0, &ks_sa_rng->reg_rng->control);
141 regmap_write_bits(ks_sa_rng->regmap_cfg, SA_CMD_STATUS_OFS,
142 SA_CMD_STATUS_REG_TRNG_ENABLE, 0);
143}
144
145static int ks_sa_rng_data_read(struct hwrng *rng, u32 *data)
146{
147 struct device *dev = (struct device *)rng->priv;
148 struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
149
150 /* Read random data */
151 data[0] = readl(&ks_sa_rng->reg_rng->output_l);
152 data[1] = readl(&ks_sa_rng->reg_rng->output_h);
153
154 writel(TRNG_INTACK_REG_READY, &ks_sa_rng->reg_rng->intack);
155
156 return sizeof(u32) * 2;
157}
158
159static int ks_sa_rng_data_present(struct hwrng *rng, int wait)
160{
161 struct device *dev = (struct device *)rng->priv;
162 struct ks_sa_rng *ks_sa_rng = dev_get_drvdata(dev);
163
164 u32 ready;
165 int j;
166
167 for (j = 0; j < SA_MAX_RNG_DATA_RETRIES; j++) {
168 ready = readl(&ks_sa_rng->reg_rng->status);
169 ready &= TRNG_STATUS_REG_READY;
170
171 if (ready || !wait)
172 break;
173
174 udelay(SA_RNG_DATA_RETRY_DELAY);
175 }
176
177 return ready;
178}
179
180static int ks_sa_rng_probe(struct platform_device *pdev)
181{
182 struct ks_sa_rng *ks_sa_rng;
183 struct device *dev = &pdev->dev;
184 int ret;
185 struct resource *mem;
186
187 ks_sa_rng = devm_kzalloc(dev, sizeof(*ks_sa_rng), GFP_KERNEL);
188 if (!ks_sa_rng)
189 return -ENOMEM;
190
191 ks_sa_rng->dev = dev;
192 ks_sa_rng->rng = (struct hwrng) {
193 .name = "ks_sa_hwrng",
194 .init = ks_sa_rng_init,
195 .data_read = ks_sa_rng_data_read,
196 .data_present = ks_sa_rng_data_present,
197 .cleanup = ks_sa_rng_cleanup,
198 };
199 ks_sa_rng->rng.priv = (unsigned long)dev;
200
201 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
202 ks_sa_rng->reg_rng = devm_ioremap_resource(dev, mem);
203 if (IS_ERR(ks_sa_rng->reg_rng))
204 return PTR_ERR(ks_sa_rng->reg_rng);
205
206 ks_sa_rng->regmap_cfg =
207 syscon_regmap_lookup_by_phandle(dev->of_node,
208 "ti,syscon-sa-cfg");
209
210 if (IS_ERR(ks_sa_rng->regmap_cfg)) {
211 dev_err(dev, "syscon_node_to_regmap failed\n");
212 return -EINVAL;
213 }
214
215 pm_runtime_enable(dev);
216 ret = pm_runtime_get_sync(dev);
217 if (ret < 0) {
218 dev_err(dev, "Failed to enable SA power-domain\n");
219 pm_runtime_disable(dev);
220 return ret;
221 }
222
223 platform_set_drvdata(pdev, ks_sa_rng);
224
225 return devm_hwrng_register(&pdev->dev, &ks_sa_rng->rng);
226}
227
228static int ks_sa_rng_remove(struct platform_device *pdev)
229{
230 pm_runtime_put_sync(&pdev->dev);
231 pm_runtime_disable(&pdev->dev);
232
233 return 0;
234}
235
236static const struct of_device_id ks_sa_rng_dt_match[] = {
237 {
238 .compatible = "ti,keystone-rng",
239 },
240 { },
241};
242MODULE_DEVICE_TABLE(of, ks_sa_rng_dt_match);
243
244static struct platform_driver ks_sa_rng_driver = {
245 .driver = {
246 .name = "ks-sa-rng",
247 .of_match_table = ks_sa_rng_dt_match,
248 },
249 .probe = ks_sa_rng_probe,
250 .remove = ks_sa_rng_remove,
251};
252
253module_platform_driver(ks_sa_rng_driver);
254
255MODULE_DESCRIPTION("Keystone NETCP SA H/W Random Number Generator driver");
256MODULE_AUTHOR("Vitaly Andrianov <vitalya@ti.com>");
257MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 467362262651..f83bee513d91 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -16,16 +16,13 @@
16 * This driver is based on other RNG drivers. 16 * This driver is based on other RNG drivers.
17 */ 17 */
18 18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/clk.h> 19#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/ioport.h>
25#include <linux/platform_device.h>
26#include <linux/hw_random.h>
27#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/hw_random.h>
28#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/platform_device.h>
29 26
30/* RNGA Registers */ 27/* RNGA Registers */
31#define RNGA_CONTROL 0x00 28#define RNGA_CONTROL 0x00
@@ -197,10 +194,18 @@ static int __exit mxc_rnga_remove(struct platform_device *pdev)
197 return 0; 194 return 0;
198} 195}
199 196
197static const struct of_device_id mxc_rnga_of_match[] = {
198 { .compatible = "fsl,imx21-rnga", },
199 { .compatible = "fsl,imx31-rnga", },
200 { /* sentinel */ },
201};
202MODULE_DEVICE_TABLE(of, mxc_rnga_of_match);
203
200static struct platform_driver mxc_rnga_driver = { 204static struct platform_driver mxc_rnga_driver = {
201 .driver = { 205 .driver = {
202 .name = "mxc_rnga", 206 .name = "mxc_rnga",
203 }, 207 .of_match_table = mxc_rnga_of_match,
208 },
204 .remove = __exit_p(mxc_rnga_remove), 209 .remove = __exit_p(mxc_rnga_remove),
205}; 210};
206 211
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 74d11ae6abe9..b65ff6962899 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -150,6 +150,7 @@ struct omap_rng_dev {
150 const struct omap_rng_pdata *pdata; 150 const struct omap_rng_pdata *pdata;
151 struct hwrng rng; 151 struct hwrng rng;
152 struct clk *clk; 152 struct clk *clk;
153 struct clk *clk_reg;
153}; 154};
154 155
155static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg) 156static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
@@ -480,6 +481,19 @@ static int omap_rng_probe(struct platform_device *pdev)
480 } 481 }
481 } 482 }
482 483
484 priv->clk_reg = devm_clk_get(&pdev->dev, "reg");
485 if (IS_ERR(priv->clk_reg) && PTR_ERR(priv->clk_reg) == -EPROBE_DEFER)
486 return -EPROBE_DEFER;
487 if (!IS_ERR(priv->clk_reg)) {
488 ret = clk_prepare_enable(priv->clk_reg);
489 if (ret) {
490 dev_err(&pdev->dev,
491 "Unable to enable the register clk: %d\n",
492 ret);
493 goto err_register;
494 }
495 }
496
483 ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : 497 ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
484 get_omap_rng_device_details(priv); 498 get_omap_rng_device_details(priv);
485 if (ret) 499 if (ret)
@@ -499,8 +513,8 @@ err_register:
499 pm_runtime_put_sync(&pdev->dev); 513 pm_runtime_put_sync(&pdev->dev);
500 pm_runtime_disable(&pdev->dev); 514 pm_runtime_disable(&pdev->dev);
501 515
502 if (!IS_ERR(priv->clk)) 516 clk_disable_unprepare(priv->clk_reg);
503 clk_disable_unprepare(priv->clk); 517 clk_disable_unprepare(priv->clk);
504err_ioremap: 518err_ioremap:
505 dev_err(dev, "initialization failed.\n"); 519 dev_err(dev, "initialization failed.\n");
506 return ret; 520 return ret;
@@ -517,8 +531,8 @@ static int omap_rng_remove(struct platform_device *pdev)
517 pm_runtime_put_sync(&pdev->dev); 531 pm_runtime_put_sync(&pdev->dev);
518 pm_runtime_disable(&pdev->dev); 532 pm_runtime_disable(&pdev->dev);
519 533
520 if (!IS_ERR(priv->clk)) 534 clk_disable_unprepare(priv->clk);
521 clk_disable_unprepare(priv->clk); 535 clk_disable_unprepare(priv->clk_reg);
522 536
523 return 0; 537 return 0;
524} 538}
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 63d84e6f1891..0d2328da3b76 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -16,15 +16,18 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/hw_random.h> 17#include <linux/hw_random.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/iopoll.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/of_address.h> 22#include <linux/of_address.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
23#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25#include <linux/reset.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
25 27
26#define RNG_CR 0x00 28#define RNG_CR 0x00
27#define RNG_CR_RNGEN BIT(2) 29#define RNG_CR_RNGEN BIT(2)
30#define RNG_CR_CED BIT(5)
28 31
29#define RNG_SR 0x04 32#define RNG_SR 0x04
30#define RNG_SR_SEIS BIT(6) 33#define RNG_SR_SEIS BIT(6)
@@ -33,19 +36,12 @@
33 36
34#define RNG_DR 0x08 37#define RNG_DR 0x08
35 38
36/*
37 * It takes 40 cycles @ 48MHz to generate each random number (e.g. <1us).
38 * At the time of writing STM32 parts max out at ~200MHz meaning a timeout
39 * of 500 leaves us a very comfortable margin for error. The loop to which
40 * the timeout applies takes at least 4 instructions per iteration so the
41 * timeout is enough to take us up to multi-GHz parts!
42 */
43#define RNG_TIMEOUT 500
44
45struct stm32_rng_private { 39struct stm32_rng_private {
46 struct hwrng rng; 40 struct hwrng rng;
47 void __iomem *base; 41 void __iomem *base;
48 struct clk *clk; 42 struct clk *clk;
43 struct reset_control *rst;
44 bool ced;
49}; 45};
50 46
51static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) 47static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
@@ -59,13 +55,16 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
59 55
60 while (max > sizeof(u32)) { 56 while (max > sizeof(u32)) {
61 sr = readl_relaxed(priv->base + RNG_SR); 57 sr = readl_relaxed(priv->base + RNG_SR);
58 /* Manage timeout which is based on timer and take */
59 /* care of initial delay time when enabling rng */
62 if (!sr && wait) { 60 if (!sr && wait) {
63 unsigned int timeout = RNG_TIMEOUT; 61 retval = readl_relaxed_poll_timeout_atomic(priv->base
64 62 + RNG_SR,
65 do { 63 sr, sr,
66 cpu_relax(); 64 10, 50000);
67 sr = readl_relaxed(priv->base + RNG_SR); 65 if (retval)
68 } while (!sr && --timeout); 66 dev_err((struct device *)priv->rng.priv,
67 "%s: timeout %x!\n", __func__, sr);
69 } 68 }
70 69
71 /* If error detected or data not ready... */ 70 /* If error detected or data not ready... */
@@ -99,7 +98,11 @@ static int stm32_rng_init(struct hwrng *rng)
99 if (err) 98 if (err)
100 return err; 99 return err;
101 100
102 writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR); 101 if (priv->ced)
102 writel_relaxed(RNG_CR_RNGEN, priv->base + RNG_CR);
103 else
104 writel_relaxed(RNG_CR_RNGEN | RNG_CR_CED,
105 priv->base + RNG_CR);
103 106
104 /* clear error indicators */ 107 /* clear error indicators */
105 writel_relaxed(0, priv->base + RNG_SR); 108 writel_relaxed(0, priv->base + RNG_SR);
@@ -140,6 +143,15 @@ static int stm32_rng_probe(struct platform_device *ofdev)
140 if (IS_ERR(priv->clk)) 143 if (IS_ERR(priv->clk))
141 return PTR_ERR(priv->clk); 144 return PTR_ERR(priv->clk);
142 145
146 priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
147 if (!IS_ERR(priv->rst)) {
148 reset_control_assert(priv->rst);
149 udelay(2);
150 reset_control_deassert(priv->rst);
151 }
152
153 priv->ced = of_property_read_bool(np, "clock-error-detect");
154
143 dev_set_drvdata(dev, priv); 155 dev_set_drvdata(dev, priv);
144 156
145 priv->rng.name = dev_driver_string(dev), 157 priv->rng.name = dev_driver_string(dev),
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4b741b83e23f..d1ea1a07cecb 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -464,13 +464,6 @@ if CRYPTO_DEV_UX500
464 source "drivers/crypto/ux500/Kconfig" 464 source "drivers/crypto/ux500/Kconfig"
465endif # if CRYPTO_DEV_UX500 465endif # if CRYPTO_DEV_UX500
466 466
467config CRYPTO_DEV_BFIN_CRC
468 tristate "Support for Blackfin CRC hardware"
469 depends on BF60x
470 help
471 Newer Blackfin processors have CRC hardware. Select this if you
472 want to use the Blackfin CRC module.
473
474config CRYPTO_DEV_ATMEL_AUTHENC 467config CRYPTO_DEV_ATMEL_AUTHENC
475 tristate "Support for Atmel IPSEC/SSL hw accelerator" 468 tristate "Support for Atmel IPSEC/SSL hw accelerator"
476 depends on HAS_DMA 469 depends on HAS_DMA
@@ -730,4 +723,31 @@ config CRYPTO_DEV_ARTPEC6
730 723
731 To compile this driver as a module, choose M here. 724 To compile this driver as a module, choose M here.
732 725
726config CRYPTO_DEV_CCREE
727 tristate "Support for ARM TrustZone CryptoCell family of security processors"
728 depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
729 default n
730 select CRYPTO_HASH
731 select CRYPTO_BLKCIPHER
732 select CRYPTO_DES
733 select CRYPTO_AEAD
734 select CRYPTO_AUTHENC
735 select CRYPTO_SHA1
736 select CRYPTO_MD5
737 select CRYPTO_SHA256
738 select CRYPTO_SHA512
739 select CRYPTO_HMAC
740 select CRYPTO_AES
741 select CRYPTO_CBC
742 select CRYPTO_ECB
743 select CRYPTO_CTR
744 select CRYPTO_XTS
745 help
746 Say 'Y' to enable a driver for the REE interface of the Arm
747 TrustZone CryptoCell family of processors. Currently the
748 CryptoCell 712, 710 and 630 are supported.
749 Choose this if you wish to use hardware acceleration of
750 cryptographic operations on the system REE.
751 If unsure say Y.
752
733endif # CRYPTO_HW 753endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 2513d13ea2c4..7ae87b4f6c8d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -3,9 +3,9 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
3obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o 3obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
4obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o 4obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
5obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o 5obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
6obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
7obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ 6obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
8obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ 7obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
8obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
9obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ 9obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
10obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/ 10obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
11obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/ 11obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 691c6465b71e..801aeab5ab1e 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2155,7 +2155,7 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
2155 2155
2156badkey: 2156badkey:
2157 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 2157 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2158 memzero_explicit(&key, sizeof(keys)); 2158 memzero_explicit(&keys, sizeof(keys));
2159 return -EINVAL; 2159 return -EINVAL;
2160} 2160}
2161 2161
@@ -2602,16 +2602,13 @@ static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pd
2602 } 2602 }
2603 2603
2604 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 2604 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2605 if (!pdata) { 2605 if (!pdata)
2606 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
2607 return ERR_PTR(-ENOMEM); 2606 return ERR_PTR(-ENOMEM);
2608 }
2609 2607
2610 pdata->dma_slave = devm_kzalloc(&pdev->dev, 2608 pdata->dma_slave = devm_kzalloc(&pdev->dev,
2611 sizeof(*(pdata->dma_slave)), 2609 sizeof(*(pdata->dma_slave)),
2612 GFP_KERNEL); 2610 GFP_KERNEL);
2613 if (!pdata->dma_slave) { 2611 if (!pdata->dma_slave) {
2614 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
2615 devm_kfree(&pdev->dev, pdata); 2612 devm_kfree(&pdev->dev, pdata);
2616 return ERR_PTR(-ENOMEM); 2613 return ERR_PTR(-ENOMEM);
2617 } 2614 }
@@ -2649,7 +2646,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
2649 2646
2650 aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL); 2647 aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2651 if (aes_dd == NULL) { 2648 if (aes_dd == NULL) {
2652 dev_err(dev, "unable to alloc data struct.\n");
2653 err = -ENOMEM; 2649 err = -ENOMEM;
2654 goto aes_dd_err; 2650 goto aes_dd_err;
2655 } 2651 }
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8874aa5ca0f7..4d43081120db 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2726,18 +2726,14 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd
2726 } 2726 }
2727 2727
2728 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 2728 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2729 if (!pdata) { 2729 if (!pdata)
2730 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
2731 return ERR_PTR(-ENOMEM); 2730 return ERR_PTR(-ENOMEM);
2732 }
2733 2731
2734 pdata->dma_slave = devm_kzalloc(&pdev->dev, 2732 pdata->dma_slave = devm_kzalloc(&pdev->dev,
2735 sizeof(*(pdata->dma_slave)), 2733 sizeof(*(pdata->dma_slave)),
2736 GFP_KERNEL); 2734 GFP_KERNEL);
2737 if (!pdata->dma_slave) { 2735 if (!pdata->dma_slave)
2738 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
2739 return ERR_PTR(-ENOMEM); 2736 return ERR_PTR(-ENOMEM);
2740 }
2741 2737
2742 return pdata; 2738 return pdata;
2743} 2739}
@@ -2758,7 +2754,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
2758 2754
2759 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); 2755 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
2760 if (sha_dd == NULL) { 2756 if (sha_dd == NULL) {
2761 dev_err(dev, "unable to alloc data struct.\n");
2762 err = -ENOMEM; 2757 err = -ENOMEM;
2763 goto sha_dd_err; 2758 goto sha_dd_err;
2764 } 2759 }
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 592124f8382b..97b0423efa7f 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1312,18 +1312,14 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p
1312 } 1312 }
1313 1313
1314 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1314 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1315 if (!pdata) { 1315 if (!pdata)
1316 dev_err(&pdev->dev, "could not allocate memory for pdata\n");
1317 return ERR_PTR(-ENOMEM); 1316 return ERR_PTR(-ENOMEM);
1318 }
1319 1317
1320 pdata->dma_slave = devm_kzalloc(&pdev->dev, 1318 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1321 sizeof(*(pdata->dma_slave)), 1319 sizeof(*(pdata->dma_slave)),
1322 GFP_KERNEL); 1320 GFP_KERNEL);
1323 if (!pdata->dma_slave) { 1321 if (!pdata->dma_slave)
1324 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
1325 return ERR_PTR(-ENOMEM); 1322 return ERR_PTR(-ENOMEM);
1326 }
1327 1323
1328 return pdata; 1324 return pdata;
1329} 1325}
@@ -1344,7 +1340,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
1344 1340
1345 tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL); 1341 tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
1346 if (tdes_dd == NULL) { 1342 if (tdes_dd == NULL) {
1347 dev_err(dev, "unable to alloc data struct.\n");
1348 err = -ENOMEM; 1343 err = -ENOMEM;
1349 goto tdes_dd_err; 1344 goto tdes_dd_err;
1350 } 1345 }
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 2b75f95bbe1b..309c67c7012f 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -818,7 +818,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
818 818
819 /* AES hashing keeps key size in type field, so need to copy it here */ 819 /* AES hashing keeps key size in type field, so need to copy it here */
820 if (hash_parms.alg == HASH_ALG_AES) 820 if (hash_parms.alg == HASH_ALG_AES)
821 hash_parms.type = cipher_parms.type; 821 hash_parms.type = (enum hash_type)cipher_parms.type;
822 else 822 else
823 hash_parms.type = spu->spu_hash_type(rctx->total_sent); 823 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
824 824
@@ -1409,7 +1409,7 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
1409 rctx->iv_ctr_len); 1409 rctx->iv_ctr_len);
1410 1410
1411 if (ctx->auth.alg == HASH_ALG_AES) 1411 if (ctx->auth.alg == HASH_ALG_AES)
1412 hash_parms.type = ctx->cipher_type; 1412 hash_parms.type = (enum hash_type)ctx->cipher_type;
1413 1413
1414 /* General case AAD padding (CCM and RFC4543 special cases below) */ 1414 /* General case AAD padding (CCM and RFC4543 special cases below) */
1415 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1415 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index d543c010ccd9..a912c6ad3e85 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -279,7 +279,6 @@ int do_shash(unsigned char *name, unsigned char *result,
279 sdesc = kmalloc(size, GFP_KERNEL); 279 sdesc = kmalloc(size, GFP_KERNEL);
280 if (!sdesc) { 280 if (!sdesc) {
281 rc = -ENOMEM; 281 rc = -ENOMEM;
282 pr_err("%s: Memory allocation failure\n", __func__);
283 goto do_shash_err; 282 goto do_shash_err;
284 } 283 }
285 sdesc->shash.tfm = hash; 284 sdesc->shash.tfm = hash;
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
deleted file mode 100644
index bfbf8bf77f03..000000000000
--- a/drivers/crypto/bfin_crc.c
+++ /dev/null
@@ -1,743 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Support Blackfin CRC HW acceleration.
5 *
6 * Copyright 2012 Analog Devices Inc.
7 *
8 * Licensed under the GPL-2.
9 */
10
11#include <linux/err.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/irq.h>
19#include <linux/io.h>
20#include <linux/platform_device.h>
21#include <linux/scatterlist.h>
22#include <linux/dma-mapping.h>
23#include <linux/delay.h>
24#include <linux/crypto.h>
25#include <linux/cryptohash.h>
26#include <crypto/scatterwalk.h>
27#include <crypto/algapi.h>
28#include <crypto/hash.h>
29#include <crypto/internal/hash.h>
30#include <asm/unaligned.h>
31
32#include <asm/dma.h>
33#include <asm/portmux.h>
34#include <asm/io.h>
35
36#include "bfin_crc.h"
37
38#define CRC_CCRYPTO_QUEUE_LENGTH 5
39
40#define DRIVER_NAME "bfin-hmac-crc"
41#define CHKSUM_DIGEST_SIZE 4
42#define CHKSUM_BLOCK_SIZE 1
43
44#define CRC_MAX_DMA_DESC 100
45
46#define CRC_CRYPTO_STATE_UPDATE 1
47#define CRC_CRYPTO_STATE_FINALUPDATE 2
48#define CRC_CRYPTO_STATE_FINISH 3
49
50struct bfin_crypto_crc {
51 struct list_head list;
52 struct device *dev;
53 spinlock_t lock;
54
55 int irq;
56 int dma_ch;
57 u32 poly;
58 struct crc_register *regs;
59
60 struct ahash_request *req; /* current request in operation */
61 struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
62 dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
63 u8 *sg_mid_buf;
64 dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
65
66 struct tasklet_struct done_task;
67 struct crypto_queue queue; /* waiting requests */
68
69 u8 busy:1; /* crc device in operation flag */
70};
71
72static struct bfin_crypto_crc_list {
73 struct list_head dev_list;
74 spinlock_t lock;
75} crc_list;
76
77struct bfin_crypto_crc_reqctx {
78 struct bfin_crypto_crc *crc;
79
80 unsigned int total; /* total request bytes */
81 size_t sg_buflen; /* bytes for this update */
82 unsigned int sg_nents;
83 struct scatterlist *sg; /* sg list head for this update*/
84 struct scatterlist bufsl[2]; /* chained sg list */
85
86 size_t bufnext_len;
87 size_t buflast_len;
88 u8 bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
89 u8 buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
90
91 u8 flag;
92};
93
94struct bfin_crypto_crc_ctx {
95 struct bfin_crypto_crc *crc;
96 u32 key;
97};
98
99/*
100 * get element in scatter list by given index
101 */
102static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
103 unsigned int index)
104{
105 struct scatterlist *sg = NULL;
106 int i;
107
108 for_each_sg(sg_list, sg, nents, i)
109 if (i == index)
110 break;
111
112 return sg;
113}
114
115static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
116{
117 writel(0, &crc->regs->datacntrld);
118 writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
119 writel(key, &crc->regs->curresult);
120
121 /* setup CRC interrupts */
122 writel(CMPERRI | DCNTEXPI, &crc->regs->status);
123 writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
124
125 return 0;
126}
127
128static int bfin_crypto_crc_init(struct ahash_request *req)
129{
130 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
131 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
132 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
133 struct bfin_crypto_crc *crc;
134
135 dev_dbg(ctx->crc->dev, "crc_init\n");
136 spin_lock_bh(&crc_list.lock);
137 list_for_each_entry(crc, &crc_list.dev_list, list) {
138 crc_ctx->crc = crc;
139 break;
140 }
141 spin_unlock_bh(&crc_list.lock);
142
143 if (sg_nents(req->src) > CRC_MAX_DMA_DESC) {
144 dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
145 CRC_MAX_DMA_DESC);
146 return -EINVAL;
147 }
148
149 ctx->crc = crc;
150 ctx->bufnext_len = 0;
151 ctx->buflast_len = 0;
152 ctx->sg_buflen = 0;
153 ctx->total = 0;
154 ctx->flag = 0;
155
156 /* init crc results */
157 put_unaligned_le32(crc_ctx->key, req->result);
158
159 dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
160 crypto_ahash_digestsize(tfm));
161
162 return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
163}
164
165static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
166{
167 struct scatterlist *sg;
168 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
169 int i = 0, j = 0;
170 unsigned long dma_config;
171 unsigned int dma_count;
172 unsigned int dma_addr;
173 unsigned int mid_dma_count = 0;
174 int dma_mod;
175
176 dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
177
178 for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
179 dma_addr = sg_dma_address(sg);
180 /* deduce extra bytes in last sg */
181 if (sg_is_last(sg))
182 dma_count = sg_dma_len(sg) - ctx->bufnext_len;
183 else
184 dma_count = sg_dma_len(sg);
185
186 if (mid_dma_count) {
187 /* Append last middle dma buffer to 4 bytes with first
188 bytes in current sg buffer. Move addr of current
189 sg and deduce the length of current sg.
190 */
191 memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
192 sg_virt(sg),
193 CHKSUM_DIGEST_SIZE - mid_dma_count);
194 dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
195 dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
196
197 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
198 DMAEN | PSIZE_32 | WDSIZE_32;
199
200 /* setup new dma descriptor for next middle dma */
201 crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
202 crc->sg_cpu[i].cfg = dma_config;
203 crc->sg_cpu[i].x_count = 1;
204 crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
205 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
206 "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
207 i, crc->sg_cpu[i].start_addr,
208 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
209 crc->sg_cpu[i].x_modify);
210 i++;
211 }
212
213 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
214 /* chop current sg dma len to multiple of 32 bits */
215 mid_dma_count = dma_count % 4;
216 dma_count &= ~0x3;
217
218 if (dma_addr % 4 == 0) {
219 dma_config |= WDSIZE_32;
220 dma_count >>= 2;
221 dma_mod = 4;
222 } else if (dma_addr % 2 == 0) {
223 dma_config |= WDSIZE_16;
224 dma_count >>= 1;
225 dma_mod = 2;
226 } else {
227 dma_config |= WDSIZE_8;
228 dma_mod = 1;
229 }
230
231 crc->sg_cpu[i].start_addr = dma_addr;
232 crc->sg_cpu[i].cfg = dma_config;
233 crc->sg_cpu[i].x_count = dma_count;
234 crc->sg_cpu[i].x_modify = dma_mod;
235 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
236 "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
237 i, crc->sg_cpu[i].start_addr,
238 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
239 crc->sg_cpu[i].x_modify);
240 i++;
241
242 if (mid_dma_count) {
243 /* copy extra bytes to next middle dma buffer */
244 memcpy(crc->sg_mid_buf + (i << 2),
245 (u8*)sg_virt(sg) + (dma_count << 2),
246 mid_dma_count);
247 }
248 }
249
250 dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
251 /* For final update req, append the buffer for next update as well*/
252 if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
253 ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
254 crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
255 CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
256 crc->sg_cpu[i].cfg = dma_config;
257 crc->sg_cpu[i].x_count = 1;
258 crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
259 dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
260 "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
261 i, crc->sg_cpu[i].start_addr,
262 crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
263 crc->sg_cpu[i].x_modify);
264 i++;
265 }
266
267 if (i == 0)
268 return;
269
270 /* Set the last descriptor to stop mode */
271 crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
272 crc->sg_cpu[i - 1].cfg |= DI_EN;
273 set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
274 set_dma_x_count(crc->dma_ch, 0);
275 set_dma_x_modify(crc->dma_ch, 0);
276 set_dma_config(crc->dma_ch, dma_config);
277}
278
279static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
280 struct ahash_request *req)
281{
282 struct crypto_async_request *async_req, *backlog;
283 struct bfin_crypto_crc_reqctx *ctx;
284 struct scatterlist *sg;
285 int ret = 0;
286 int nsg, i, j;
287 unsigned int nextlen;
288 unsigned long flags;
289 u32 reg;
290
291 spin_lock_irqsave(&crc->lock, flags);
292 if (req)
293 ret = ahash_enqueue_request(&crc->queue, req);
294 if (crc->busy) {
295 spin_unlock_irqrestore(&crc->lock, flags);
296 return ret;
297 }
298 backlog = crypto_get_backlog(&crc->queue);
299 async_req = crypto_dequeue_request(&crc->queue);
300 if (async_req)
301 crc->busy = 1;
302 spin_unlock_irqrestore(&crc->lock, flags);
303
304 if (!async_req)
305 return ret;
306
307 if (backlog)
308 backlog->complete(backlog, -EINPROGRESS);
309
310 req = ahash_request_cast(async_req);
311 crc->req = req;
312 ctx = ahash_request_ctx(req);
313 ctx->sg = NULL;
314 ctx->sg_buflen = 0;
315 ctx->sg_nents = 0;
316
317 dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
318 ctx->flag, req->nbytes);
319
320 if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
321 if (ctx->bufnext_len == 0) {
322 crc->busy = 0;
323 return 0;
324 }
325
326 /* Pack last crc update buffer to 32bit */
327 memset(ctx->bufnext + ctx->bufnext_len, 0,
328 CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
329 } else {
330 /* Pack small data which is less than 32bit to buffer for next update. */
331 if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
332 memcpy(ctx->bufnext + ctx->bufnext_len,
333 sg_virt(req->src), req->nbytes);
334 ctx->bufnext_len += req->nbytes;
335 if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
336 ctx->bufnext_len) {
337 goto finish_update;
338 } else {
339 crc->busy = 0;
340 return 0;
341 }
342 }
343
344 if (ctx->bufnext_len) {
345 /* Chain in extra bytes of last update */
346 ctx->buflast_len = ctx->bufnext_len;
347 memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
348
349 nsg = ctx->sg_buflen ? 2 : 1;
350 sg_init_table(ctx->bufsl, nsg);
351 sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
352 if (nsg > 1)
353 sg_chain(ctx->bufsl, nsg, req->src);
354 ctx->sg = ctx->bufsl;
355 } else
356 ctx->sg = req->src;
357
358 /* Chop crc buffer size to multiple of 32 bit */
359 nsg = sg_nents(ctx->sg);
360 ctx->sg_nents = nsg;
361 ctx->sg_buflen = ctx->buflast_len + req->nbytes;
362 ctx->bufnext_len = ctx->sg_buflen % 4;
363 ctx->sg_buflen &= ~0x3;
364
365 if (ctx->bufnext_len) {
366 /* copy extra bytes to buffer for next update */
367 memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
368 nextlen = ctx->bufnext_len;
369 for (i = nsg - 1; i >= 0; i--) {
370 sg = sg_get(ctx->sg, nsg, i);
371 j = min(nextlen, sg_dma_len(sg));
372 memcpy(ctx->bufnext + nextlen - j,
373 sg_virt(sg) + sg_dma_len(sg) - j, j);
374 if (j == sg_dma_len(sg))
375 ctx->sg_nents--;
376 nextlen -= j;
377 if (nextlen == 0)
378 break;
379 }
380 }
381 }
382
383finish_update:
384 if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
385 ctx->flag == CRC_CRYPTO_STATE_FINISH))
386 ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
387
388 /* set CRC data count before start DMA */
389 writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
390
391 /* setup and enable CRC DMA */
392 bfin_crypto_crc_config_dma(crc);
393
394 /* finally kick off CRC operation */
395 reg = readl(&crc->regs->control);
396 writel(reg | BLKEN, &crc->regs->control);
397
398 return -EINPROGRESS;
399}
400
401static int bfin_crypto_crc_update(struct ahash_request *req)
402{
403 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
404
405 if (!req->nbytes)
406 return 0;
407
408 dev_dbg(ctx->crc->dev, "crc_update\n");
409 ctx->total += req->nbytes;
410 ctx->flag = CRC_CRYPTO_STATE_UPDATE;
411
412 return bfin_crypto_crc_handle_queue(ctx->crc, req);
413}
414
415static int bfin_crypto_crc_final(struct ahash_request *req)
416{
417 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
418 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
419 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
420
421 dev_dbg(ctx->crc->dev, "crc_final\n");
422 ctx->flag = CRC_CRYPTO_STATE_FINISH;
423 crc_ctx->key = 0;
424
425 return bfin_crypto_crc_handle_queue(ctx->crc, req);
426}
427
428static int bfin_crypto_crc_finup(struct ahash_request *req)
429{
430 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
431 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
432 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
433
434 dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
435 ctx->total += req->nbytes;
436 ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
437 crc_ctx->key = 0;
438
439 return bfin_crypto_crc_handle_queue(ctx->crc, req);
440}
441
442static int bfin_crypto_crc_digest(struct ahash_request *req)
443{
444 int ret;
445
446 ret = bfin_crypto_crc_init(req);
447 if (ret)
448 return ret;
449
450 return bfin_crypto_crc_finup(req);
451}
452
453static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
454 unsigned int keylen)
455{
456 struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
457
458 dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
459 if (keylen != CHKSUM_DIGEST_SIZE) {
460 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
461 return -EINVAL;
462 }
463
464 crc_ctx->key = get_unaligned_le32(key);
465
466 return 0;
467}
468
469static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
470{
471 struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
472
473 crc_ctx->key = 0;
474 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
475 sizeof(struct bfin_crypto_crc_reqctx));
476
477 return 0;
478}
479
480static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
481{
482}
483
484static struct ahash_alg algs = {
485 .init = bfin_crypto_crc_init,
486 .update = bfin_crypto_crc_update,
487 .final = bfin_crypto_crc_final,
488 .finup = bfin_crypto_crc_finup,
489 .digest = bfin_crypto_crc_digest,
490 .setkey = bfin_crypto_crc_setkey,
491 .halg.digestsize = CHKSUM_DIGEST_SIZE,
492 .halg.base = {
493 .cra_name = "hmac(crc32)",
494 .cra_driver_name = DRIVER_NAME,
495 .cra_priority = 100,
496 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
497 CRYPTO_ALG_ASYNC |
498 CRYPTO_ALG_OPTIONAL_KEY,
499 .cra_blocksize = CHKSUM_BLOCK_SIZE,
500 .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
501 .cra_alignmask = 3,
502 .cra_module = THIS_MODULE,
503 .cra_init = bfin_crypto_crc_cra_init,
504 .cra_exit = bfin_crypto_crc_cra_exit,
505 }
506};
507
508static void bfin_crypto_crc_done_task(unsigned long data)
509{
510 struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
511
512 bfin_crypto_crc_handle_queue(crc, NULL);
513}
514
515static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
516{
517 struct bfin_crypto_crc *crc = dev_id;
518 u32 reg;
519
520 if (readl(&crc->regs->status) & DCNTEXP) {
521 writel(DCNTEXP, &crc->regs->status);
522
523 /* prepare results */
524 put_unaligned_le32(readl(&crc->regs->result),
525 crc->req->result);
526
527 reg = readl(&crc->regs->control);
528 writel(reg & ~BLKEN, &crc->regs->control);
529 crc->busy = 0;
530
531 if (crc->req->base.complete)
532 crc->req->base.complete(&crc->req->base, 0);
533
534 tasklet_schedule(&crc->done_task);
535
536 return IRQ_HANDLED;
537 } else
538 return IRQ_NONE;
539}
540
541#ifdef CONFIG_PM
542/**
543 * bfin_crypto_crc_suspend - suspend crc device
544 * @pdev: device being suspended
545 * @state: requested suspend state
546 */
547static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
548{
549 struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
550 int i = 100000;
551
552 while ((readl(&crc->regs->control) & BLKEN) && --i)
553 cpu_relax();
554
555 if (i == 0)
556 return -EBUSY;
557
558 return 0;
559}
560#else
561# define bfin_crypto_crc_suspend NULL
562#endif
563
564#define bfin_crypto_crc_resume NULL
565
566/**
567 * bfin_crypto_crc_probe - Initialize module
568 *
569 */
570static int bfin_crypto_crc_probe(struct platform_device *pdev)
571{
572 struct device *dev = &pdev->dev;
573 struct resource *res;
574 struct bfin_crypto_crc *crc;
575 unsigned int timeout = 100000;
576 int ret;
577
578 crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
579 if (!crc) {
580 dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
581 return -ENOMEM;
582 }
583
584 crc->dev = dev;
585
586 INIT_LIST_HEAD(&crc->list);
587 spin_lock_init(&crc->lock);
588 tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
589 crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
590
591 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
592 crc->regs = devm_ioremap_resource(dev, res);
593 if (IS_ERR((void *)crc->regs)) {
594 dev_err(&pdev->dev, "Cannot map CRC IO\n");
595 return PTR_ERR((void *)crc->regs);
596 }
597
598 crc->irq = platform_get_irq(pdev, 0);
599 if (crc->irq < 0) {
600 dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
601 return -ENOENT;
602 }
603
604 ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
605 IRQF_SHARED, dev_name(dev), crc);
606 if (ret) {
607 dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
608 return ret;
609 }
610
611 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
612 if (res == NULL) {
613 dev_err(&pdev->dev, "No CRC DMA channel specified\n");
614 return -ENOENT;
615 }
616 crc->dma_ch = res->start;
617
618 ret = request_dma(crc->dma_ch, dev_name(dev));
619 if (ret) {
620 dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
621 return ret;
622 }
623
624 crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
625 if (crc->sg_cpu == NULL) {
626 ret = -ENOMEM;
627 goto out_error_dma;
628 }
629 /*
630 * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle +
631 * 1 last + 1 next dma descriptors
632 */
633 crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
634 crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
635 * ((CRC_MAX_DMA_DESC + 1) << 1);
636
637 writel(0, &crc->regs->control);
638 crc->poly = (u32)pdev->dev.platform_data;
639 writel(crc->poly, &crc->regs->poly);
640
641 while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
642 cpu_relax();
643
644 if (timeout == 0)
645 dev_info(&pdev->dev, "init crc poly timeout\n");
646
647 platform_set_drvdata(pdev, crc);
648
649 spin_lock(&crc_list.lock);
650 list_add(&crc->list, &crc_list.dev_list);
651 spin_unlock(&crc_list.lock);
652
653 if (list_is_singular(&crc_list.dev_list)) {
654 ret = crypto_register_ahash(&algs);
655 if (ret) {
656 dev_err(&pdev->dev,
657 "Can't register crypto ahash device\n");
658 goto out_error_dma;
659 }
660 }
661
662 dev_info(&pdev->dev, "initialized\n");
663
664 return 0;
665
666out_error_dma:
667 if (crc->sg_cpu)
668 dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
669 free_dma(crc->dma_ch);
670
671 return ret;
672}
673
674/**
675 * bfin_crypto_crc_remove - Initialize module
676 *
677 */
678static int bfin_crypto_crc_remove(struct platform_device *pdev)
679{
680 struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
681
682 if (!crc)
683 return -ENODEV;
684
685 spin_lock(&crc_list.lock);
686 list_del(&crc->list);
687 spin_unlock(&crc_list.lock);
688
689 crypto_unregister_ahash(&algs);
690 tasklet_kill(&crc->done_task);
691 free_dma(crc->dma_ch);
692
693 return 0;
694}
695
696static struct platform_driver bfin_crypto_crc_driver = {
697 .probe = bfin_crypto_crc_probe,
698 .remove = bfin_crypto_crc_remove,
699 .suspend = bfin_crypto_crc_suspend,
700 .resume = bfin_crypto_crc_resume,
701 .driver = {
702 .name = DRIVER_NAME,
703 },
704};
705
706/**
707 * bfin_crypto_crc_mod_init - Initialize module
708 *
709 * Checks the module params and registers the platform driver.
710 * Real work is in the platform probe function.
711 */
712static int __init bfin_crypto_crc_mod_init(void)
713{
714 int ret;
715
716 pr_info("Blackfin hardware CRC crypto driver\n");
717
718 INIT_LIST_HEAD(&crc_list.dev_list);
719 spin_lock_init(&crc_list.lock);
720
721 ret = platform_driver_register(&bfin_crypto_crc_driver);
722 if (ret) {
723 pr_err("unable to register driver\n");
724 return ret;
725 }
726
727 return 0;
728}
729
730/**
731 * bfin_crypto_crc_mod_exit - Deinitialize module
732 */
733static void __exit bfin_crypto_crc_mod_exit(void)
734{
735 platform_driver_unregister(&bfin_crypto_crc_driver);
736}
737
738module_init(bfin_crypto_crc_mod_init);
739module_exit(bfin_crypto_crc_mod_exit);
740
741MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
742MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
743MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/bfin_crc.h b/drivers/crypto/bfin_crc.h
deleted file mode 100644
index 786ef746d109..000000000000
--- a/drivers/crypto/bfin_crc.h
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * bfin_crc.h - interface to Blackfin CRC controllers
3 *
4 * Copyright 2012 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_CRC_H__
10#define __BFIN_CRC_H__
11
12/* Function driver which use hardware crc must initialize the structure */
13struct crc_info {
14 /* Input data address */
15 unsigned char *in_addr;
16 /* Output data address */
17 unsigned char *out_addr;
18 /* Input or output bytes */
19 unsigned long datasize;
20 union {
21 /* CRC to compare with that of input buffer */
22 unsigned long crc_compare;
23 /* Value to compare with input data */
24 unsigned long val_verify;
25 /* Value to fill */
26 unsigned long val_fill;
27 };
28 /* Value to program the 32b CRC Polynomial */
29 unsigned long crc_poly;
30 union {
31 /* CRC calculated from the input data */
32 unsigned long crc_result;
33 /* First failed position to verify input data */
34 unsigned long pos_verify;
35 };
36 /* CRC mirror flags */
37 unsigned int bitmirr:1;
38 unsigned int bytmirr:1;
39 unsigned int w16swp:1;
40 unsigned int fdsel:1;
41 unsigned int rsltmirr:1;
42 unsigned int polymirr:1;
43 unsigned int cmpmirr:1;
44};
45
46/* Userspace interface */
47#define CRC_IOC_MAGIC 'C'
48#define CRC_IOC_CALC_CRC _IOWR('C', 0x01, unsigned int)
49#define CRC_IOC_MEMCPY_CRC _IOWR('C', 0x02, unsigned int)
50#define CRC_IOC_VERIFY_VAL _IOWR('C', 0x03, unsigned int)
51#define CRC_IOC_FILL_VAL _IOWR('C', 0x04, unsigned int)
52
53
54#ifdef __KERNEL__
55
56#include <linux/types.h>
57#include <linux/spinlock.h>
58
59struct crc_register {
60 u32 control;
61 u32 datacnt;
62 u32 datacntrld;
63 u32 __pad_1[2];
64 u32 compare;
65 u32 fillval;
66 u32 datafifo;
67 u32 intren;
68 u32 intrenset;
69 u32 intrenclr;
70 u32 poly;
71 u32 __pad_2[4];
72 u32 status;
73 u32 datacntcap;
74 u32 __pad_3;
75 u32 result;
76 u32 curresult;
77 u32 __pad_4[3];
78 u32 revid;
79};
80
81/* CRC_STATUS Masks */
82#define CMPERR 0x00000002 /* Compare error */
83#define DCNTEXP 0x00000010 /* datacnt register expired */
84#define IBR 0x00010000 /* Input buffer ready */
85#define OBR 0x00020000 /* Output buffer ready */
86#define IRR 0x00040000 /* Immediate result readt */
87#define LUTDONE 0x00080000 /* Look-up table generation done */
88#define FSTAT 0x00700000 /* FIFO status */
89#define MAX_FIFO 4 /* Max fifo size */
90
91/* CRC_CONTROL Masks */
92#define BLKEN 0x00000001 /* Block enable */
93#define OPMODE 0x000000F0 /* Operation mode */
94#define OPMODE_OFFSET 4 /* Operation mode mask offset*/
95#define MODE_DMACPY_CRC 1 /* MTM CRC compute and compare */
96#define MODE_DATA_FILL 2 /* MTM data fill */
97#define MODE_CALC_CRC 3 /* MSM CRC compute and compare */
98#define MODE_DATA_VERIFY 4 /* MSM data verify */
99#define AUTOCLRZ 0x00000100 /* Auto clear to zero */
100#define AUTOCLRF 0x00000200 /* Auto clear to one */
101#define OBRSTALL 0x00001000 /* Stall on output buffer ready */
102#define IRRSTALL 0x00002000 /* Stall on immediate result ready */
103#define BITMIRR 0x00010000 /* Mirror bits within each byte of 32-bit input data */
104#define BITMIRR_OFFSET 16 /* Mirror bits offset */
105#define BYTMIRR 0x00020000 /* Mirror bytes of 32-bit input data */
106#define BYTMIRR_OFFSET 17 /* Mirror bytes offset */
107#define W16SWP 0x00040000 /* Mirror uppper and lower 16-bit word of 32-bit input data */
108#define W16SWP_OFFSET 18 /* Mirror 16-bit word offset */
109#define FDSEL 0x00080000 /* FIFO is written after input data is mirrored */
110#define FDSEL_OFFSET 19 /* Mirror FIFO offset */
111#define RSLTMIRR 0x00100000 /* CRC result registers are mirrored. */
112#define RSLTMIRR_OFFSET 20 /* Mirror CRC result offset. */
113#define POLYMIRR 0x00200000 /* CRC poly register is mirrored. */
114#define POLYMIRR_OFFSET 21 /* Mirror CRC poly offset. */
115#define CMPMIRR 0x00400000 /* CRC compare register is mirrored. */
116#define CMPMIRR_OFFSET 22 /* Mirror CRC compare offset. */
117
118/* CRC_INTREN Masks */
119#define CMPERRI 0x02 /* CRC_ERROR_INTR */
120#define DCNTEXPI 0x10 /* CRC_STATUS_INTR */
121
122#endif
123
124#endif
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2188235be02d..7207a535942d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -328,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
328{ 328{
329 struct caam_ctx *ctx = crypto_aead_ctx(aead); 329 struct caam_ctx *ctx = crypto_aead_ctx(aead);
330 struct device *jrdev = ctx->jrdev; 330 struct device *jrdev = ctx->jrdev;
331 unsigned int ivsize = crypto_aead_ivsize(aead);
331 u32 *desc; 332 u32 *desc;
332 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 333 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
333 ctx->cdata.keylen; 334 ctx->cdata.keylen;
@@ -349,7 +350,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
349 } 350 }
350 351
351 desc = ctx->sh_desc_enc; 352 desc = ctx->sh_desc_enc;
352 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); 353 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
353 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 354 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
354 desc_bytes(desc), ctx->dir); 355 desc_bytes(desc), ctx->dir);
355 356
@@ -366,7 +367,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
366 } 367 }
367 368
368 desc = ctx->sh_desc_dec; 369 desc = ctx->sh_desc_dec;
369 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); 370 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
370 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 371 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
371 desc_bytes(desc), ctx->dir); 372 desc_bytes(desc), ctx->dir);
372 373
@@ -387,6 +388,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
387{ 388{
388 struct caam_ctx *ctx = crypto_aead_ctx(aead); 389 struct caam_ctx *ctx = crypto_aead_ctx(aead);
389 struct device *jrdev = ctx->jrdev; 390 struct device *jrdev = ctx->jrdev;
391 unsigned int ivsize = crypto_aead_ivsize(aead);
390 u32 *desc; 392 u32 *desc;
391 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 393 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
392 ctx->cdata.keylen; 394 ctx->cdata.keylen;
@@ -408,7 +410,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
408 } 410 }
409 411
410 desc = ctx->sh_desc_enc; 412 desc = ctx->sh_desc_enc;
411 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); 413 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
414 false);
412 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 415 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
413 desc_bytes(desc), ctx->dir); 416 desc_bytes(desc), ctx->dir);
414 417
@@ -425,7 +428,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
425 } 428 }
426 429
427 desc = ctx->sh_desc_dec; 430 desc = ctx->sh_desc_dec;
428 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); 431 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
432 false);
429 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 433 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
430 desc_bytes(desc), ctx->dir); 434 desc_bytes(desc), ctx->dir);
431 435
@@ -447,6 +451,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
447{ 451{
448 struct caam_ctx *ctx = crypto_aead_ctx(aead); 452 struct caam_ctx *ctx = crypto_aead_ctx(aead);
449 struct device *jrdev = ctx->jrdev; 453 struct device *jrdev = ctx->jrdev;
454 unsigned int ivsize = crypto_aead_ivsize(aead);
450 u32 *desc; 455 u32 *desc;
451 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 456 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
452 ctx->cdata.keylen; 457 ctx->cdata.keylen;
@@ -468,7 +473,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
468 } 473 }
469 474
470 desc = ctx->sh_desc_enc; 475 desc = ctx->sh_desc_enc;
471 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); 476 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
477 false);
472 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 478 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
473 desc_bytes(desc), ctx->dir); 479 desc_bytes(desc), ctx->dir);
474 480
@@ -485,7 +491,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
485 } 491 }
486 492
487 desc = ctx->sh_desc_dec; 493 desc = ctx->sh_desc_dec;
488 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); 494 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
495 false);
489 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 496 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
490 desc_bytes(desc), ctx->dir); 497 desc_bytes(desc), ctx->dir);
491 498
@@ -563,9 +570,11 @@ static int aead_setkey(struct crypto_aead *aead,
563 570
564skip_split_key: 571skip_split_key:
565 ctx->cdata.keylen = keys.enckeylen; 572 ctx->cdata.keylen = keys.enckeylen;
573 memzero_explicit(&keys, sizeof(keys));
566 return aead_set_sh_desc(aead); 574 return aead_set_sh_desc(aead);
567badkey: 575badkey:
568 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 576 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
577 memzero_explicit(&keys, sizeof(keys));
569 return -EINVAL; 578 return -EINVAL;
570} 579}
571 580
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index ceb93fbb76e6..8ae7a1be7dfd 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -625,10 +625,13 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
625 * @desc: pointer to buffer used for descriptor construction 625 * @desc: pointer to buffer used for descriptor construction
626 * @cdata: pointer to block cipher transform definitions 626 * @cdata: pointer to block cipher transform definitions
627 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. 627 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
628 * @ivsize: initialization vector size
628 * @icvsize: integrity check value (ICV) size (truncated or full) 629 * @icvsize: integrity check value (ICV) size (truncated or full)
630 * @is_qi: true when called from caam/qi
629 */ 631 */
630void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, 632void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
631 unsigned int icvsize) 633 unsigned int ivsize, unsigned int icvsize,
634 const bool is_qi)
632{ 635{
633 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1, 636 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
634 *zero_assoc_jump_cmd2; 637 *zero_assoc_jump_cmd2;
@@ -650,11 +653,35 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
650 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 653 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
651 OP_ALG_ENCRYPT); 654 OP_ALG_ENCRYPT);
652 655
656 if (is_qi) {
657 u32 *wait_load_cmd;
658
659 /* REG3 = assoclen */
660 append_seq_load(desc, 4, LDST_CLASS_DECO |
661 LDST_SRCDST_WORD_DECO_MATH3 |
662 (4 << LDST_OFFSET_SHIFT));
663
664 wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
665 JUMP_COND_CALM | JUMP_COND_NCP |
666 JUMP_COND_NOP | JUMP_COND_NIP |
667 JUMP_COND_NIFP);
668 set_jump_tgt_here(desc, wait_load_cmd);
669
670 append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
671 ivsize);
672 } else {
673 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
674 CAAM_CMD_SZ);
675 }
676
653 /* if assoclen + cryptlen is ZERO, skip to ICV write */ 677 /* if assoclen + cryptlen is ZERO, skip to ICV write */
654 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
655 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | 678 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
656 JUMP_COND_MATH_Z); 679 JUMP_COND_MATH_Z);
657 680
681 if (is_qi)
682 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
683 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
684
658 /* if assoclen is ZERO, skip reading the assoc data */ 685 /* if assoclen is ZERO, skip reading the assoc data */
659 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 686 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
660 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | 687 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
@@ -686,8 +713,11 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
686 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | 713 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
687 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); 714 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
688 715
689 /* jump the zero-payload commands */ 716 /* jump to ICV writing */
690 append_jump(desc, JUMP_TEST_ALL | 2); 717 if (is_qi)
718 append_jump(desc, JUMP_TEST_ALL | 4);
719 else
720 append_jump(desc, JUMP_TEST_ALL | 2);
691 721
692 /* zero-payload commands */ 722 /* zero-payload commands */
693 set_jump_tgt_here(desc, zero_payload_jump_cmd); 723 set_jump_tgt_here(desc, zero_payload_jump_cmd);
@@ -695,10 +725,18 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
695 /* read assoc data */ 725 /* read assoc data */
696 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | 726 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
697 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); 727 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
728 if (is_qi)
729 /* jump to ICV writing */
730 append_jump(desc, JUMP_TEST_ALL | 2);
698 731
699 /* There is no input data */ 732 /* There is no input data */
700 set_jump_tgt_here(desc, zero_assoc_jump_cmd2); 733 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
701 734
735 if (is_qi)
736 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
737 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
738 FIFOLD_TYPE_LAST1);
739
702 /* write ICV */ 740 /* write ICV */
703 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | 741 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
704 LDST_SRCDST_BYTE_CONTEXT); 742 LDST_SRCDST_BYTE_CONTEXT);
@@ -715,10 +753,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
715 * @desc: pointer to buffer used for descriptor construction 753 * @desc: pointer to buffer used for descriptor construction
716 * @cdata: pointer to block cipher transform definitions 754 * @cdata: pointer to block cipher transform definitions
717 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. 755 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
756 * @ivsize: initialization vector size
718 * @icvsize: integrity check value (ICV) size (truncated or full) 757 * @icvsize: integrity check value (ICV) size (truncated or full)
758 * @is_qi: true when called from caam/qi
719 */ 759 */
720void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, 760void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
721 unsigned int icvsize) 761 unsigned int ivsize, unsigned int icvsize,
762 const bool is_qi)
722{ 763{
723 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1; 764 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
724 765
@@ -739,6 +780,24 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
739 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 780 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
740 OP_ALG_DECRYPT | OP_ALG_ICV_ON); 781 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
741 782
783 if (is_qi) {
784 u32 *wait_load_cmd;
785
786 /* REG3 = assoclen */
787 append_seq_load(desc, 4, LDST_CLASS_DECO |
788 LDST_SRCDST_WORD_DECO_MATH3 |
789 (4 << LDST_OFFSET_SHIFT));
790
791 wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
792 JUMP_COND_CALM | JUMP_COND_NCP |
793 JUMP_COND_NOP | JUMP_COND_NIP |
794 JUMP_COND_NIFP);
795 set_jump_tgt_here(desc, wait_load_cmd);
796
797 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
798 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
799 }
800
742 /* if assoclen is ZERO, skip reading the assoc data */ 801 /* if assoclen is ZERO, skip reading the assoc data */
743 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 802 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
744 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | 803 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
@@ -791,10 +850,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
791 * @desc: pointer to buffer used for descriptor construction 850 * @desc: pointer to buffer used for descriptor construction
792 * @cdata: pointer to block cipher transform definitions 851 * @cdata: pointer to block cipher transform definitions
793 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. 852 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
853 * @ivsize: initialization vector size
794 * @icvsize: integrity check value (ICV) size (truncated or full) 854 * @icvsize: integrity check value (ICV) size (truncated or full)
855 * @is_qi: true when called from caam/qi
795 */ 856 */
796void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, 857void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
797 unsigned int icvsize) 858 unsigned int ivsize, unsigned int icvsize,
859 const bool is_qi)
798{ 860{
799 u32 *key_jump_cmd; 861 u32 *key_jump_cmd;
800 862
@@ -815,7 +877,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
815 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 877 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
816 OP_ALG_ENCRYPT); 878 OP_ALG_ENCRYPT);
817 879
818 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); 880 if (is_qi) {
881 u32 *wait_load_cmd;
882
883 /* REG3 = assoclen */
884 append_seq_load(desc, 4, LDST_CLASS_DECO |
885 LDST_SRCDST_WORD_DECO_MATH3 |
886 (4 << LDST_OFFSET_SHIFT));
887
888 wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
889 JUMP_COND_CALM | JUMP_COND_NCP |
890 JUMP_COND_NOP | JUMP_COND_NIP |
891 JUMP_COND_NIFP);
892 set_jump_tgt_here(desc, wait_load_cmd);
893
894 /* Read salt and IV */
895 append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
896 cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
897 FIFOLD_TYPE_IV);
898 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
899 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
900 }
901
902 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
819 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 903 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
820 904
821 /* Read assoc data */ 905 /* Read assoc data */
@@ -823,7 +907,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
823 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); 907 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
824 908
825 /* Skip IV */ 909 /* Skip IV */
826 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); 910 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
827 911
828 /* Will read cryptlen bytes */ 912 /* Will read cryptlen bytes */
829 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 913 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -862,10 +946,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
862 * @desc: pointer to buffer used for descriptor construction 946 * @desc: pointer to buffer used for descriptor construction
863 * @cdata: pointer to block cipher transform definitions 947 * @cdata: pointer to block cipher transform definitions
864 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. 948 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
949 * @ivsize: initialization vector size
865 * @icvsize: integrity check value (ICV) size (truncated or full) 950 * @icvsize: integrity check value (ICV) size (truncated or full)
951 * @is_qi: true when called from caam/qi
866 */ 952 */
867void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, 953void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
868 unsigned int icvsize) 954 unsigned int ivsize, unsigned int icvsize,
955 const bool is_qi)
869{ 956{
870 u32 *key_jump_cmd; 957 u32 *key_jump_cmd;
871 958
@@ -887,7 +974,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
887 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 974 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
888 OP_ALG_DECRYPT | OP_ALG_ICV_ON); 975 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
889 976
890 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); 977 if (is_qi) {
978 u32 *wait_load_cmd;
979
980 /* REG3 = assoclen */
981 append_seq_load(desc, 4, LDST_CLASS_DECO |
982 LDST_SRCDST_WORD_DECO_MATH3 |
983 (4 << LDST_OFFSET_SHIFT));
984
985 wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
986 JUMP_COND_CALM | JUMP_COND_NCP |
987 JUMP_COND_NOP | JUMP_COND_NIP |
988 JUMP_COND_NIFP);
989 set_jump_tgt_here(desc, wait_load_cmd);
990
991 /* Read salt and IV */
992 append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
993 cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
994 FIFOLD_TYPE_IV);
995 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
996 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
997 }
998
999 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
891 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 1000 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
892 1001
893 /* Read assoc data */ 1002 /* Read assoc data */
@@ -895,7 +1004,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
895 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); 1004 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
896 1005
897 /* Skip IV */ 1006 /* Skip IV */
898 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); 1007 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
899 1008
900 /* Will read cryptlen bytes */ 1009 /* Will read cryptlen bytes */
901 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); 1010 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
@@ -934,10 +1043,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
934 * @desc: pointer to buffer used for descriptor construction 1043 * @desc: pointer to buffer used for descriptor construction
935 * @cdata: pointer to block cipher transform definitions 1044 * @cdata: pointer to block cipher transform definitions
936 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. 1045 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
1046 * @ivsize: initialization vector size
937 * @icvsize: integrity check value (ICV) size (truncated or full) 1047 * @icvsize: integrity check value (ICV) size (truncated or full)
1048 * @is_qi: true when called from caam/qi
938 */ 1049 */
939void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, 1050void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
940 unsigned int icvsize) 1051 unsigned int ivsize, unsigned int icvsize,
1052 const bool is_qi)
941{ 1053{
942 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; 1054 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
943 1055
@@ -958,6 +1070,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
958 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 1070 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
959 OP_ALG_ENCRYPT); 1071 OP_ALG_ENCRYPT);
960 1072
1073 if (is_qi) {
1074 /* assoclen is not needed, skip it */
1075 append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
1076
1077 /* Read salt and IV */
1078 append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
1079 cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
1080 FIFOLD_TYPE_IV);
1081 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
1082 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1083 }
1084
961 /* assoclen + cryptlen = seqinlen */ 1085 /* assoclen + cryptlen = seqinlen */
962 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); 1086 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
963 1087
@@ -1004,10 +1128,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
1004 * @desc: pointer to buffer used for descriptor construction 1128 * @desc: pointer to buffer used for descriptor construction
1005 * @cdata: pointer to block cipher transform definitions 1129 * @cdata: pointer to block cipher transform definitions
1006 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. 1130 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
1131 * @ivsize: initialization vector size
1007 * @icvsize: integrity check value (ICV) size (truncated or full) 1132 * @icvsize: integrity check value (ICV) size (truncated or full)
1133 * @is_qi: true when called from caam/qi
1008 */ 1134 */
1009void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, 1135void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
1010 unsigned int icvsize) 1136 unsigned int ivsize, unsigned int icvsize,
1137 const bool is_qi)
1011{ 1138{
1012 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; 1139 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
1013 1140
@@ -1028,6 +1155,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
1028 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | 1155 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
1029 OP_ALG_DECRYPT | OP_ALG_ICV_ON); 1156 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
1030 1157
1158 if (is_qi) {
1159 /* assoclen is not needed, skip it */
1160 append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
1161
1162 /* Read salt and IV */
1163 append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
1164 cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
1165 FIFOLD_TYPE_IV);
1166 append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
1167 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
1168 }
1169
1031 /* assoclen + cryptlen = seqoutlen */ 1170 /* assoclen + cryptlen = seqoutlen */
1032 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); 1171 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
1033 1172
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index 5f9445ae2114..a917af5776ce 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -27,14 +27,20 @@
27#define DESC_GCM_BASE (3 * CAAM_CMD_SZ) 27#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
28#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) 28#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
29#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) 29#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
30#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
31#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
30 32
31#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) 33#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
32#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) 34#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
33#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ) 35#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
36#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
37#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
34 38
35#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) 39#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
36#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) 40#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
37#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) 41#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
42#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
43#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
38 44
39#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) 45#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
40#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ 46#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
@@ -67,22 +73,28 @@ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
67 const bool is_qi, int era); 73 const bool is_qi, int era);
68 74
69void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, 75void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
70 unsigned int icvsize); 76 unsigned int ivsize, unsigned int icvsize,
77 const bool is_qi);
71 78
72void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, 79void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
73 unsigned int icvsize); 80 unsigned int ivsize, unsigned int icvsize,
81 const bool is_qi);
74 82
75void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, 83void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
76 unsigned int icvsize); 84 unsigned int ivsize, unsigned int icvsize,
85 const bool is_qi);
77 86
78void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, 87void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
79 unsigned int icvsize); 88 unsigned int ivsize, unsigned int icvsize,
89 const bool is_qi);
80 90
81void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, 91void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
82 unsigned int icvsize); 92 unsigned int ivsize, unsigned int icvsize,
93 const bool is_qi);
83 94
84void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, 95void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
85 unsigned int icvsize); 96 unsigned int ivsize, unsigned int icvsize,
97 const bool is_qi);
86 98
87void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata, 99void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
88 unsigned int ivsize, const bool is_rfc3686, 100 unsigned int ivsize, const bool is_rfc3686,
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 4aecc9435f69..cacda0831390 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -278,12 +278,317 @@ skip_split_key:
278 } 278 }
279 } 279 }
280 280
281 memzero_explicit(&keys, sizeof(keys));
281 return ret; 282 return ret;
282badkey: 283badkey:
283 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 284 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
285 memzero_explicit(&keys, sizeof(keys));
284 return -EINVAL; 286 return -EINVAL;
285} 287}
286 288
289static int gcm_set_sh_desc(struct crypto_aead *aead)
290{
291 struct caam_ctx *ctx = crypto_aead_ctx(aead);
292 unsigned int ivsize = crypto_aead_ivsize(aead);
293 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
294 ctx->cdata.keylen;
295
296 if (!ctx->cdata.keylen || !ctx->authsize)
297 return 0;
298
299 /*
300 * Job Descriptor and Shared Descriptor
301 * must fit into the 64-word Descriptor h/w Buffer
302 */
303 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
304 ctx->cdata.key_inline = true;
305 ctx->cdata.key_virt = ctx->key;
306 } else {
307 ctx->cdata.key_inline = false;
308 ctx->cdata.key_dma = ctx->key_dma;
309 }
310
311 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
312 ctx->authsize, true);
313
314 /*
315 * Job Descriptor and Shared Descriptor
316 * must fit into the 64-word Descriptor h/w Buffer
317 */
318 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
319 ctx->cdata.key_inline = true;
320 ctx->cdata.key_virt = ctx->key;
321 } else {
322 ctx->cdata.key_inline = false;
323 ctx->cdata.key_dma = ctx->key_dma;
324 }
325
326 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
327 ctx->authsize, true);
328
329 return 0;
330}
331
332static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
333{
334 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
335
336 ctx->authsize = authsize;
337 gcm_set_sh_desc(authenc);
338
339 return 0;
340}
341
342static int gcm_setkey(struct crypto_aead *aead,
343 const u8 *key, unsigned int keylen)
344{
345 struct caam_ctx *ctx = crypto_aead_ctx(aead);
346 struct device *jrdev = ctx->jrdev;
347 int ret;
348
349#ifdef DEBUG
350 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
351 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
352#endif
353
354 memcpy(ctx->key, key, keylen);
355 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
356 ctx->cdata.keylen = keylen;
357
358 ret = gcm_set_sh_desc(aead);
359 if (ret)
360 return ret;
361
362 /* Now update the driver contexts with the new shared descriptor */
363 if (ctx->drv_ctx[ENCRYPT]) {
364 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
365 ctx->sh_desc_enc);
366 if (ret) {
367 dev_err(jrdev, "driver enc context update failed\n");
368 return ret;
369 }
370 }
371
372 if (ctx->drv_ctx[DECRYPT]) {
373 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
374 ctx->sh_desc_dec);
375 if (ret) {
376 dev_err(jrdev, "driver dec context update failed\n");
377 return ret;
378 }
379 }
380
381 return 0;
382}
383
384static int rfc4106_set_sh_desc(struct crypto_aead *aead)
385{
386 struct caam_ctx *ctx = crypto_aead_ctx(aead);
387 unsigned int ivsize = crypto_aead_ivsize(aead);
388 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
389 ctx->cdata.keylen;
390
391 if (!ctx->cdata.keylen || !ctx->authsize)
392 return 0;
393
394 ctx->cdata.key_virt = ctx->key;
395
396 /*
397 * Job Descriptor and Shared Descriptor
398 * must fit into the 64-word Descriptor h/w Buffer
399 */
400 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
401 ctx->cdata.key_inline = true;
402 } else {
403 ctx->cdata.key_inline = false;
404 ctx->cdata.key_dma = ctx->key_dma;
405 }
406
407 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
408 ctx->authsize, true);
409
410 /*
411 * Job Descriptor and Shared Descriptor
412 * must fit into the 64-word Descriptor h/w Buffer
413 */
414 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
415 ctx->cdata.key_inline = true;
416 } else {
417 ctx->cdata.key_inline = false;
418 ctx->cdata.key_dma = ctx->key_dma;
419 }
420
421 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
422 ctx->authsize, true);
423
424 return 0;
425}
426
427static int rfc4106_setauthsize(struct crypto_aead *authenc,
428 unsigned int authsize)
429{
430 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
431
432 ctx->authsize = authsize;
433 rfc4106_set_sh_desc(authenc);
434
435 return 0;
436}
437
438static int rfc4106_setkey(struct crypto_aead *aead,
439 const u8 *key, unsigned int keylen)
440{
441 struct caam_ctx *ctx = crypto_aead_ctx(aead);
442 struct device *jrdev = ctx->jrdev;
443 int ret;
444
445 if (keylen < 4)
446 return -EINVAL;
447
448#ifdef DEBUG
449 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
450 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
451#endif
452
453 memcpy(ctx->key, key, keylen);
454 /*
455 * The last four bytes of the key material are used as the salt value
456 * in the nonce. Update the AES key length.
457 */
458 ctx->cdata.keylen = keylen - 4;
459 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
460 ctx->dir);
461
462 ret = rfc4106_set_sh_desc(aead);
463 if (ret)
464 return ret;
465
466 /* Now update the driver contexts with the new shared descriptor */
467 if (ctx->drv_ctx[ENCRYPT]) {
468 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
469 ctx->sh_desc_enc);
470 if (ret) {
471 dev_err(jrdev, "driver enc context update failed\n");
472 return ret;
473 }
474 }
475
476 if (ctx->drv_ctx[DECRYPT]) {
477 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
478 ctx->sh_desc_dec);
479 if (ret) {
480 dev_err(jrdev, "driver dec context update failed\n");
481 return ret;
482 }
483 }
484
485 return 0;
486}
487
488static int rfc4543_set_sh_desc(struct crypto_aead *aead)
489{
490 struct caam_ctx *ctx = crypto_aead_ctx(aead);
491 unsigned int ivsize = crypto_aead_ivsize(aead);
492 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
493 ctx->cdata.keylen;
494
495 if (!ctx->cdata.keylen || !ctx->authsize)
496 return 0;
497
498 ctx->cdata.key_virt = ctx->key;
499
500 /*
501 * Job Descriptor and Shared Descriptor
502 * must fit into the 64-word Descriptor h/w Buffer
503 */
504 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
505 ctx->cdata.key_inline = true;
506 } else {
507 ctx->cdata.key_inline = false;
508 ctx->cdata.key_dma = ctx->key_dma;
509 }
510
511 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
512 ctx->authsize, true);
513
514 /*
515 * Job Descriptor and Shared Descriptor
516 * must fit into the 64-word Descriptor h/w Buffer
517 */
518 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
519 ctx->cdata.key_inline = true;
520 } else {
521 ctx->cdata.key_inline = false;
522 ctx->cdata.key_dma = ctx->key_dma;
523 }
524
525 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
526 ctx->authsize, true);
527
528 return 0;
529}
530
531static int rfc4543_setauthsize(struct crypto_aead *authenc,
532 unsigned int authsize)
533{
534 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
535
536 ctx->authsize = authsize;
537 rfc4543_set_sh_desc(authenc);
538
539 return 0;
540}
541
542static int rfc4543_setkey(struct crypto_aead *aead,
543 const u8 *key, unsigned int keylen)
544{
545 struct caam_ctx *ctx = crypto_aead_ctx(aead);
546 struct device *jrdev = ctx->jrdev;
547 int ret;
548
549 if (keylen < 4)
550 return -EINVAL;
551
552#ifdef DEBUG
553 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
554 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
555#endif
556
557 memcpy(ctx->key, key, keylen);
558 /*
559 * The last four bytes of the key material are used as the salt value
560 * in the nonce. Update the AES key length.
561 */
562 ctx->cdata.keylen = keylen - 4;
563 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
564 ctx->dir);
565
566 ret = rfc4543_set_sh_desc(aead);
567 if (ret)
568 return ret;
569
570 /* Now update the driver contexts with the new shared descriptor */
571 if (ctx->drv_ctx[ENCRYPT]) {
572 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
573 ctx->sh_desc_enc);
574 if (ret) {
575 dev_err(jrdev, "driver enc context update failed\n");
576 return ret;
577 }
578 }
579
580 if (ctx->drv_ctx[DECRYPT]) {
581 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
582 ctx->sh_desc_dec);
583 if (ret) {
584 dev_err(jrdev, "driver dec context update failed\n");
585 return ret;
586 }
587 }
588
589 return 0;
590}
591
287static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 592static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
288 const u8 *key, unsigned int keylen) 593 const u8 *key, unsigned int keylen)
289{ 594{
@@ -562,8 +867,18 @@ static void aead_done(struct caam_drv_req *drv_req, u32 status)
562 qidev = caam_ctx->qidev; 867 qidev = caam_ctx->qidev;
563 868
564 if (unlikely(status)) { 869 if (unlikely(status)) {
870 u32 ssrc = status & JRSTA_SSRC_MASK;
871 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
872
565 caam_jr_strstatus(qidev, status); 873 caam_jr_strstatus(qidev, status);
566 ecode = -EIO; 874 /*
875 * verify hw auth check passed else return -EBADMSG
876 */
877 if (ssrc == JRSTA_SSRC_CCB_ERROR &&
878 err_id == JRSTA_CCBERR_ERRID_ICVCHK)
879 ecode = -EBADMSG;
880 else
881 ecode = -EIO;
567 } 882 }
568 883
569 edesc = container_of(drv_req, typeof(*edesc), drv_req); 884 edesc = container_of(drv_req, typeof(*edesc), drv_req);
@@ -807,6 +1122,22 @@ static int aead_decrypt(struct aead_request *req)
807 return aead_crypt(req, false); 1122 return aead_crypt(req, false);
808} 1123}
809 1124
1125static int ipsec_gcm_encrypt(struct aead_request *req)
1126{
1127 if (req->assoclen < 8)
1128 return -EINVAL;
1129
1130 return aead_crypt(req, true);
1131}
1132
1133static int ipsec_gcm_decrypt(struct aead_request *req)
1134{
1135 if (req->assoclen < 8)
1136 return -EINVAL;
1137
1138 return aead_crypt(req, false);
1139}
1140
810static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) 1141static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
811{ 1142{
812 struct ablkcipher_edesc *edesc; 1143 struct ablkcipher_edesc *edesc;
@@ -1327,6 +1658,61 @@ static struct caam_alg_template driver_algs[] = {
1327}; 1658};
1328 1659
1329static struct caam_aead_alg driver_aeads[] = { 1660static struct caam_aead_alg driver_aeads[] = {
1661 {
1662 .aead = {
1663 .base = {
1664 .cra_name = "rfc4106(gcm(aes))",
1665 .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
1666 .cra_blocksize = 1,
1667 },
1668 .setkey = rfc4106_setkey,
1669 .setauthsize = rfc4106_setauthsize,
1670 .encrypt = ipsec_gcm_encrypt,
1671 .decrypt = ipsec_gcm_decrypt,
1672 .ivsize = 8,
1673 .maxauthsize = AES_BLOCK_SIZE,
1674 },
1675 .caam = {
1676 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1677 },
1678 },
1679 {
1680 .aead = {
1681 .base = {
1682 .cra_name = "rfc4543(gcm(aes))",
1683 .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
1684 .cra_blocksize = 1,
1685 },
1686 .setkey = rfc4543_setkey,
1687 .setauthsize = rfc4543_setauthsize,
1688 .encrypt = ipsec_gcm_encrypt,
1689 .decrypt = ipsec_gcm_decrypt,
1690 .ivsize = 8,
1691 .maxauthsize = AES_BLOCK_SIZE,
1692 },
1693 .caam = {
1694 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1695 },
1696 },
1697 /* Galois Counter Mode */
1698 {
1699 .aead = {
1700 .base = {
1701 .cra_name = "gcm(aes)",
1702 .cra_driver_name = "gcm-aes-caam-qi",
1703 .cra_blocksize = 1,
1704 },
1705 .setkey = gcm_setkey,
1706 .setauthsize = gcm_setauthsize,
1707 .encrypt = aead_encrypt,
1708 .decrypt = aead_decrypt,
1709 .ivsize = 12,
1710 .maxauthsize = AES_BLOCK_SIZE,
1711 },
1712 .caam = {
1713 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1714 }
1715 },
1330 /* single-pass ipsec_esp descriptor */ 1716 /* single-pass ipsec_esp descriptor */
1331 { 1717 {
1332 .aead = { 1718 .aead = {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e843cf410373..e4cc636e1104 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -337,7 +337,8 @@ static int caam_remove(struct platform_device *pdev)
337 337
338 /* shut clocks off before finalizing shutdown */ 338 /* shut clocks off before finalizing shutdown */
339 clk_disable_unprepare(ctrlpriv->caam_ipg); 339 clk_disable_unprepare(ctrlpriv->caam_ipg);
340 clk_disable_unprepare(ctrlpriv->caam_mem); 340 if (ctrlpriv->caam_mem)
341 clk_disable_unprepare(ctrlpriv->caam_mem);
341 clk_disable_unprepare(ctrlpriv->caam_aclk); 342 clk_disable_unprepare(ctrlpriv->caam_aclk);
342 if (ctrlpriv->caam_emi_slow) 343 if (ctrlpriv->caam_emi_slow)
343 clk_disable_unprepare(ctrlpriv->caam_emi_slow); 344 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
@@ -466,14 +467,17 @@ static int caam_probe(struct platform_device *pdev)
466 } 467 }
467 ctrlpriv->caam_ipg = clk; 468 ctrlpriv->caam_ipg = clk;
468 469
469 clk = caam_drv_identify_clk(&pdev->dev, "mem"); 470 if (!of_machine_is_compatible("fsl,imx7d") &&
470 if (IS_ERR(clk)) { 471 !of_machine_is_compatible("fsl,imx7s")) {
471 ret = PTR_ERR(clk); 472 clk = caam_drv_identify_clk(&pdev->dev, "mem");
472 dev_err(&pdev->dev, 473 if (IS_ERR(clk)) {
473 "can't identify CAAM mem clk: %d\n", ret); 474 ret = PTR_ERR(clk);
474 return ret; 475 dev_err(&pdev->dev,
476 "can't identify CAAM mem clk: %d\n", ret);
477 return ret;
478 }
479 ctrlpriv->caam_mem = clk;
475 } 480 }
476 ctrlpriv->caam_mem = clk;
477 481
478 clk = caam_drv_identify_clk(&pdev->dev, "aclk"); 482 clk = caam_drv_identify_clk(&pdev->dev, "aclk");
479 if (IS_ERR(clk)) { 483 if (IS_ERR(clk)) {
@@ -484,7 +488,9 @@ static int caam_probe(struct platform_device *pdev)
484 } 488 }
485 ctrlpriv->caam_aclk = clk; 489 ctrlpriv->caam_aclk = clk;
486 490
487 if (!of_machine_is_compatible("fsl,imx6ul")) { 491 if (!of_machine_is_compatible("fsl,imx6ul") &&
492 !of_machine_is_compatible("fsl,imx7d") &&
493 !of_machine_is_compatible("fsl,imx7s")) {
488 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); 494 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
489 if (IS_ERR(clk)) { 495 if (IS_ERR(clk)) {
490 ret = PTR_ERR(clk); 496 ret = PTR_ERR(clk);
@@ -501,11 +507,13 @@ static int caam_probe(struct platform_device *pdev)
501 return ret; 507 return ret;
502 } 508 }
503 509
504 ret = clk_prepare_enable(ctrlpriv->caam_mem); 510 if (ctrlpriv->caam_mem) {
505 if (ret < 0) { 511 ret = clk_prepare_enable(ctrlpriv->caam_mem);
506 dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n", 512 if (ret < 0) {
507 ret); 513 dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
508 goto disable_caam_ipg; 514 ret);
515 goto disable_caam_ipg;
516 }
509 } 517 }
510 518
511 ret = clk_prepare_enable(ctrlpriv->caam_aclk); 519 ret = clk_prepare_enable(ctrlpriv->caam_aclk);
@@ -815,9 +823,6 @@ static int caam_probe(struct platform_device *pdev)
815 return 0; 823 return 0;
816 824
817caam_remove: 825caam_remove:
818#ifdef CONFIG_DEBUG_FS
819 debugfs_remove_recursive(ctrlpriv->dfs_root);
820#endif
821 caam_remove(pdev); 826 caam_remove(pdev);
822 return ret; 827 return ret;
823 828
@@ -829,7 +834,8 @@ disable_caam_emi_slow:
829disable_caam_aclk: 834disable_caam_aclk:
830 clk_disable_unprepare(ctrlpriv->caam_aclk); 835 clk_disable_unprepare(ctrlpriv->caam_aclk);
831disable_caam_mem: 836disable_caam_mem:
832 clk_disable_unprepare(ctrlpriv->caam_mem); 837 if (ctrlpriv->caam_mem)
838 clk_disable_unprepare(ctrlpriv->caam_mem);
833disable_caam_ipg: 839disable_caam_ipg:
834 clk_disable_unprepare(ctrlpriv->caam_ipg); 840 clk_disable_unprepare(ctrlpriv->caam_ipg);
835 return ret; 841 return ret;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index f9a44f485aac..b9480828da38 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -579,8 +579,15 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
579 579
580 fd = &dqrr->fd; 580 fd = &dqrr->fd;
581 status = be32_to_cpu(fd->status); 581 status = be32_to_cpu(fd->status);
582 if (unlikely(status)) 582 if (unlikely(status)) {
583 dev_err(qidev, "Error: %#x in CAAM response FD\n", status); 583 u32 ssrc = status & JRSTA_SSRC_MASK;
584 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
585
586 if (ssrc != JRSTA_SSRC_CCB_ERROR ||
587 err_id != JRSTA_CCBERR_ERRID_ICVCHK)
588 dev_err(qidev, "Error: %#x in CAAM response FD\n",
589 status);
590 }
584 591
585 if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { 592 if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
586 dev_err(qidev, "Non-compound FD from CAAM\n"); 593 dev_err(qidev, "Non-compound FD from CAAM\n");
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 34a6d8bf229e..06ad85ab5e86 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -436,7 +436,7 @@ static int cpt_device_init(struct cpt_device *cpt)
436 436
437 /* Reset the PF when probed first */ 437 /* Reset the PF when probed first */
438 cpt_reset(cpt); 438 cpt_reset(cpt);
439 mdelay(100); 439 msleep(100);
440 440
441 /*Check BIST status*/ 441 /*Check BIST status*/
442 bist = (u64)cpt_check_bist_status(cpt); 442 bist = (u64)cpt_check_bist_status(cpt);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 60fc0fa26fd3..26687f318de6 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -46,7 +46,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
46 } 46 }
47 47
48 /* Update result area if supplied */ 48 /* Update result area if supplied */
49 if (req->result) 49 if (req->result && rctx->final)
50 memcpy(req->result, rctx->iv, digest_size); 50 memcpy(req->result, rctx->iv, digest_size);
51 51
52e_free: 52e_free:
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index e6db8672d89c..05850dfd7940 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -60,10 +60,9 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
60 60
61static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) 61static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
62{ 62{
63 if (ccp_version() > CCP_VERSION(3, 0)) 63 struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
64 return CCP5_RSA_MAXMOD; 64
65 else 65 return ctx->u.rsa.n_len;
66 return CCP_RSA_MAXMOD;
67} 66}
68 67
69static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt) 68static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 8b9b16d433f7..871c9628a2ee 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -47,7 +47,7 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
47 } 47 }
48 48
49 /* Update result area if supplied */ 49 /* Update result area if supplied */
50 if (req->result) 50 if (req->result && rctx->final)
51 memcpy(req->result, rctx->ctx, digest_size); 51 memcpy(req->result, rctx->ctx, digest_size);
52 52
53e_free: 53e_free:
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 59d4ca4e72d8..1a734bd2070a 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -278,7 +278,7 @@ static const struct file_operations ccp_debugfs_stats_ops = {
278}; 278};
279 279
280static struct dentry *ccp_debugfs_dir; 280static struct dentry *ccp_debugfs_dir;
281static DEFINE_RWLOCK(ccp_debugfs_lock); 281static DEFINE_MUTEX(ccp_debugfs_lock);
282 282
283#define MAX_NAME_LEN 20 283#define MAX_NAME_LEN 20
284 284
@@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
290 struct dentry *debugfs_stats; 290 struct dentry *debugfs_stats;
291 struct dentry *debugfs_q_instance; 291 struct dentry *debugfs_q_instance;
292 struct dentry *debugfs_q_stats; 292 struct dentry *debugfs_q_stats;
293 unsigned long flags;
294 int i; 293 int i;
295 294
296 if (!debugfs_initialized()) 295 if (!debugfs_initialized())
297 return; 296 return;
298 297
299 write_lock_irqsave(&ccp_debugfs_lock, flags); 298 mutex_lock(&ccp_debugfs_lock);
300 if (!ccp_debugfs_dir) 299 if (!ccp_debugfs_dir)
301 ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 300 ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
302 write_unlock_irqrestore(&ccp_debugfs_lock, flags); 301 mutex_unlock(&ccp_debugfs_lock);
303 if (!ccp_debugfs_dir) 302 if (!ccp_debugfs_dir)
304 return; 303 return;
305 304
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 8b9da58459df..67155cb21636 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -38,7 +38,7 @@ static unsigned int dma_chan_attr = CCP_DMA_DFLT;
38module_param(dma_chan_attr, uint, 0444); 38module_param(dma_chan_attr, uint, 0444);
39MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); 39MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
40 40
41unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) 41static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
42{ 42{
43 switch (dma_chan_attr) { 43 switch (dma_chan_attr) {
44 case CCP_DMA_DFLT: 44 case CCP_DMA_DFLT:
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 406b95329b3d..0ea43cdeb05f 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
178 return 0; 178 return 0;
179} 179}
180 180
181static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 181static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
182 struct scatterlist *sg, unsigned int sg_offset, 182 struct scatterlist *sg, unsigned int sg_offset,
183 unsigned int len) 183 unsigned int len)
184{ 184{
185 WARN_ON(!wa->address); 185 WARN_ON(!wa->address);
186 186
187 if (len > (wa->length - wa_offset))
188 return -EINVAL;
189
187 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 190 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
188 0); 191 0);
192 return 0;
189} 193}
190 194
191static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, 195static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
@@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
205 unsigned int len) 209 unsigned int len)
206{ 210{
207 u8 *p, *q; 211 u8 *p, *q;
212 int rc;
208 213
209 ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); 214 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
215 if (rc)
216 return rc;
210 217
211 p = wa->address + wa_offset; 218 p = wa->address + wa_offset;
212 q = p + len - 1; 219 q = p + len - 1;
@@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
509 return ret; 516 return ret;
510 517
511 dm_offset = CCP_SB_BYTES - aes->key_len; 518 dm_offset = CCP_SB_BYTES - aes->key_len;
512 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 519 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
520 if (ret)
521 goto e_key;
513 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 522 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
514 CCP_PASSTHRU_BYTESWAP_256BIT); 523 CCP_PASSTHRU_BYTESWAP_256BIT);
515 if (ret) { 524 if (ret) {
@@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
528 goto e_key; 537 goto e_key;
529 538
530 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 539 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
531 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 540 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
541 if (ret)
542 goto e_ctx;
532 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 543 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
533 CCP_PASSTHRU_BYTESWAP_256BIT); 544 CCP_PASSTHRU_BYTESWAP_256BIT);
534 if (ret) { 545 if (ret) {
@@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
556 goto e_src; 567 goto e_src;
557 } 568 }
558 569
559 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, 570 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
560 aes->cmac_key_len); 571 aes->cmac_key_len);
572 if (ret)
573 goto e_src;
561 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 574 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
562 CCP_PASSTHRU_BYTESWAP_256BIT); 575 CCP_PASSTHRU_BYTESWAP_256BIT);
563 if (ret) { 576 if (ret) {
@@ -666,7 +679,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
666 return ret; 679 return ret;
667 680
668 dm_offset = CCP_SB_BYTES - aes->key_len; 681 dm_offset = CCP_SB_BYTES - aes->key_len;
669 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 682 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
683 if (ret)
684 goto e_key;
670 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 685 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
671 CCP_PASSTHRU_BYTESWAP_256BIT); 686 CCP_PASSTHRU_BYTESWAP_256BIT);
672 if (ret) { 687 if (ret) {
@@ -685,7 +700,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
685 goto e_key; 700 goto e_key;
686 701
687 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; 702 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
688 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 703 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
704 if (ret)
705 goto e_ctx;
689 706
690 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 707 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
691 CCP_PASSTHRU_BYTESWAP_256BIT); 708 CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -777,7 +794,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
777 goto e_dst; 794 goto e_dst;
778 } 795 }
779 796
780 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 797 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
798 if (ret)
799 goto e_dst;
781 800
782 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 801 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
783 CCP_PASSTHRU_BYTESWAP_256BIT); 802 CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -820,7 +839,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
820 DMA_BIDIRECTIONAL); 839 DMA_BIDIRECTIONAL);
821 if (ret) 840 if (ret)
822 goto e_tag; 841 goto e_tag;
823 ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE); 842 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
843 if (ret)
844 goto e_tag;
824 845
825 ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE); 846 ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
826 ccp_dm_free(&tag); 847 ccp_dm_free(&tag);
@@ -914,7 +935,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
914 return ret; 935 return ret;
915 936
916 dm_offset = CCP_SB_BYTES - aes->key_len; 937 dm_offset = CCP_SB_BYTES - aes->key_len;
917 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); 938 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
939 if (ret)
940 goto e_key;
918 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 941 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
919 CCP_PASSTHRU_BYTESWAP_256BIT); 942 CCP_PASSTHRU_BYTESWAP_256BIT);
920 if (ret) { 943 if (ret) {
@@ -935,7 +958,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
935 if (aes->mode != CCP_AES_MODE_ECB) { 958 if (aes->mode != CCP_AES_MODE_ECB) {
936 /* Load the AES context - convert to LE */ 959 /* Load the AES context - convert to LE */
937 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; 960 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
938 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); 961 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
962 if (ret)
963 goto e_ctx;
939 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 964 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
940 CCP_PASSTHRU_BYTESWAP_256BIT); 965 CCP_PASSTHRU_BYTESWAP_256BIT);
941 if (ret) { 966 if (ret) {
@@ -1113,8 +1138,12 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
1113 * big endian to little endian. 1138 * big endian to little endian.
1114 */ 1139 */
1115 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; 1140 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
1116 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); 1141 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1117 ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); 1142 if (ret)
1143 goto e_key;
1144 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1145 if (ret)
1146 goto e_key;
1118 } else { 1147 } else {
1119 /* Version 5 CCPs use a 512-bit space for the key: each portion 1148 /* Version 5 CCPs use a 512-bit space for the key: each portion
1120 * occupies 256 bits, or one entire slot, and is zero-padded. 1149 * occupies 256 bits, or one entire slot, and is zero-padded.
@@ -1123,9 +1152,13 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
1123 1152
1124 dm_offset = CCP_SB_BYTES; 1153 dm_offset = CCP_SB_BYTES;
1125 pad = dm_offset - xts->key_len; 1154 pad = dm_offset - xts->key_len;
1126 ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); 1155 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1127 ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len, 1156 if (ret)
1128 xts->key_len); 1157 goto e_key;
1158 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
1159 xts->key_len, xts->key_len);
1160 if (ret)
1161 goto e_key;
1129 } 1162 }
1130 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1163 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1131 CCP_PASSTHRU_BYTESWAP_256BIT); 1164 CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -1144,7 +1177,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
1144 if (ret) 1177 if (ret)
1145 goto e_key; 1178 goto e_key;
1146 1179
1147 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); 1180 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1181 if (ret)
1182 goto e_ctx;
1148 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1183 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1149 CCP_PASSTHRU_BYTESWAP_NOOP); 1184 CCP_PASSTHRU_BYTESWAP_NOOP);
1150 if (ret) { 1185 if (ret) {
@@ -1287,12 +1322,18 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1287 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ 1322 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1288 1323
1289 len_singlekey = des3->key_len / 3; 1324 len_singlekey = des3->key_len / 3;
1290 ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, 1325 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1291 des3->key, 0, len_singlekey); 1326 des3->key, 0, len_singlekey);
1292 ccp_set_dm_area(&key, dm_offset + len_singlekey, 1327 if (ret)
1293 des3->key, len_singlekey, len_singlekey); 1328 goto e_key;
1294 ccp_set_dm_area(&key, dm_offset, 1329 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
1295 des3->key, 2 * len_singlekey, len_singlekey); 1330 des3->key, len_singlekey, len_singlekey);
1331 if (ret)
1332 goto e_key;
1333 ret = ccp_set_dm_area(&key, dm_offset,
1334 des3->key, 2 * len_singlekey, len_singlekey);
1335 if (ret)
1336 goto e_key;
1296 1337
1297 /* Copy the key to the SB */ 1338 /* Copy the key to the SB */
1298 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, 1339 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
@@ -1320,7 +1361,10 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1320 1361
1321 /* Load the context into the LSB */ 1362 /* Load the context into the LSB */
1322 dm_offset = CCP_SB_BYTES - des3->iv_len; 1363 dm_offset = CCP_SB_BYTES - des3->iv_len;
1323 ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len); 1364 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
1365 des3->iv_len);
1366 if (ret)
1367 goto e_ctx;
1324 1368
1325 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) 1369 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1326 load_mode = CCP_PASSTHRU_BYTESWAP_NOOP; 1370 load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
@@ -1604,8 +1648,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1604 } 1648 }
1605 } else { 1649 } else {
1606 /* Restore the context */ 1650 /* Restore the context */
1607 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, 1651 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1608 sb_count * CCP_SB_BYTES); 1652 sb_count * CCP_SB_BYTES);
1653 if (ret)
1654 goto e_ctx;
1609 } 1655 }
1610 1656
1611 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, 1657 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
@@ -1927,7 +1973,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1927 if (ret) 1973 if (ret)
1928 return ret; 1974 return ret;
1929 1975
1930 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); 1976 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1977 if (ret)
1978 goto e_mask;
1931 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, 1979 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1932 CCP_PASSTHRU_BYTESWAP_NOOP); 1980 CCP_PASSTHRU_BYTESWAP_NOOP);
1933 if (ret) { 1981 if (ret) {
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index b3afb6cc9d72..d95ec526587a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -367,8 +367,6 @@ e_free:
367 367
368void *psp_copy_user_blob(u64 __user uaddr, u32 len) 368void *psp_copy_user_blob(u64 __user uaddr, u32 len)
369{ 369{
370 void *data;
371
372 if (!uaddr || !len) 370 if (!uaddr || !len)
373 return ERR_PTR(-EINVAL); 371 return ERR_PTR(-EINVAL);
374 372
@@ -376,18 +374,7 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len)
376 if (len > SEV_FW_BLOB_MAX_SIZE) 374 if (len > SEV_FW_BLOB_MAX_SIZE)
377 return ERR_PTR(-EINVAL); 375 return ERR_PTR(-EINVAL);
378 376
379 data = kmalloc(len, GFP_KERNEL); 377 return memdup_user((void __user *)(uintptr_t)uaddr, len);
380 if (!data)
381 return ERR_PTR(-ENOMEM);
382
383 if (copy_from_user(data, (void __user *)(uintptr_t)uaddr, len))
384 goto e_free;
385
386 return data;
387
388e_free:
389 kfree(data);
390 return ERR_PTR(-EFAULT);
391} 378}
392EXPORT_SYMBOL_GPL(psp_copy_user_blob); 379EXPORT_SYMBOL_GPL(psp_copy_user_blob);
393 380
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index eb0da6572720..e0459002eb71 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -252,12 +252,12 @@ struct sp_device *sp_get_psp_master_device(void)
252 goto unlock; 252 goto unlock;
253 253
254 list_for_each_entry(i, &sp_units, entry) { 254 list_for_each_entry(i, &sp_units, entry) {
255 if (i->psp_data) 255 if (i->psp_data && i->get_psp_master_device) {
256 ret = i->get_psp_master_device();
256 break; 257 break;
258 }
257 } 259 }
258 260
259 if (i->get_psp_master_device)
260 ret = i->get_psp_master_device();
261unlock: 261unlock:
262 write_unlock_irqrestore(&sp_unit_lock, flags); 262 write_unlock_irqrestore(&sp_unit_lock, flags);
263 return ret; 263 return ret;
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
new file mode 100644
index 000000000000..bdc27970f95f
--- /dev/null
+++ b/drivers/crypto/ccree/Makefile
@@ -0,0 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0
2
3obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
4ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
5ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
6ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
7ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
new file mode 100644
index 000000000000..03f4b9fce556
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -0,0 +1,2718 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/internal/aead.h>
8#include <crypto/authenc.h>
9#include <crypto/des.h>
10#include <linux/rtnetlink.h>
11#include "cc_driver.h"
12#include "cc_buffer_mgr.h"
13#include "cc_aead.h"
14#include "cc_request_mgr.h"
15#include "cc_hash.h"
16#include "cc_sram_mgr.h"
17
18#define template_aead template_u.aead
19
20#define MAX_AEAD_SETKEY_SEQ 12
21#define MAX_AEAD_PROCESS_SEQ 23
22
23#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
25
26#define AES_CCM_RFC4309_NONCE_SIZE 3
27#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
28
29/* Value of each ICV_CMP byte (of 8) in case of success */
30#define ICV_VERIF_OK 0x01
31
32struct cc_aead_handle {
33 cc_sram_addr_t sram_workspace_addr;
34 struct list_head aead_list;
35};
36
37struct cc_hmac_s {
38 u8 *padded_authkey;
39 u8 *ipad_opad; /* IPAD, OPAD*/
40 dma_addr_t padded_authkey_dma_addr;
41 dma_addr_t ipad_opad_dma_addr;
42};
43
44struct cc_xcbc_s {
45 u8 *xcbc_keys; /* K1,K2,K3 */
46 dma_addr_t xcbc_keys_dma_addr;
47};
48
49struct cc_aead_ctx {
50 struct cc_drvdata *drvdata;
51 u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
52 u8 *enckey;
53 dma_addr_t enckey_dma_addr;
54 union {
55 struct cc_hmac_s hmac;
56 struct cc_xcbc_s xcbc;
57 } auth_state;
58 unsigned int enc_keylen;
59 unsigned int auth_keylen;
60 unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
61 enum drv_cipher_mode cipher_mode;
62 enum cc_flow_mode flow_mode;
63 enum drv_hash_mode auth_mode;
64};
65
66static inline bool valid_assoclen(struct aead_request *req)
67{
68 return ((req->assoclen == 16) || (req->assoclen == 20));
69}
70
71static void cc_aead_exit(struct crypto_aead *tfm)
72{
73 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
74 struct device *dev = drvdata_to_dev(ctx->drvdata);
75
76 dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
77 crypto_tfm_alg_name(&tfm->base));
78
79 /* Unmap enckey buffer */
80 if (ctx->enckey) {
81 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
82 ctx->enckey_dma_addr);
83 dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
84 &ctx->enckey_dma_addr);
85 ctx->enckey_dma_addr = 0;
86 ctx->enckey = NULL;
87 }
88
89 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
90 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
91
92 if (xcbc->xcbc_keys) {
93 dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
94 xcbc->xcbc_keys,
95 xcbc->xcbc_keys_dma_addr);
96 }
97 dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
98 &xcbc->xcbc_keys_dma_addr);
99 xcbc->xcbc_keys_dma_addr = 0;
100 xcbc->xcbc_keys = NULL;
101 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
102 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
103
104 if (hmac->ipad_opad) {
105 dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
106 hmac->ipad_opad,
107 hmac->ipad_opad_dma_addr);
108 dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
109 &hmac->ipad_opad_dma_addr);
110 hmac->ipad_opad_dma_addr = 0;
111 hmac->ipad_opad = NULL;
112 }
113 if (hmac->padded_authkey) {
114 dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
115 hmac->padded_authkey,
116 hmac->padded_authkey_dma_addr);
117 dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
118 &hmac->padded_authkey_dma_addr);
119 hmac->padded_authkey_dma_addr = 0;
120 hmac->padded_authkey = NULL;
121 }
122 }
123}
124
125static int cc_aead_init(struct crypto_aead *tfm)
126{
127 struct aead_alg *alg = crypto_aead_alg(tfm);
128 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
129 struct cc_crypto_alg *cc_alg =
130 container_of(alg, struct cc_crypto_alg, aead_alg);
131 struct device *dev = drvdata_to_dev(cc_alg->drvdata);
132
133 dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
134 crypto_tfm_alg_name(&tfm->base));
135
136 /* Initialize modes in instance */
137 ctx->cipher_mode = cc_alg->cipher_mode;
138 ctx->flow_mode = cc_alg->flow_mode;
139 ctx->auth_mode = cc_alg->auth_mode;
140 ctx->drvdata = cc_alg->drvdata;
141 crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
142
143 /* Allocate key buffer, cache line aligned */
144 ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
145 &ctx->enckey_dma_addr, GFP_KERNEL);
146 if (!ctx->enckey) {
147 dev_err(dev, "Failed allocating key buffer\n");
148 goto init_failed;
149 }
150 dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
151 ctx->enckey);
152
153 /* Set default authlen value */
154
155 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
156 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
157 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
158
159 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
160 /* (and temporary for user key - up to 256b) */
161 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
162 &xcbc->xcbc_keys_dma_addr,
163 GFP_KERNEL);
164 if (!xcbc->xcbc_keys) {
165 dev_err(dev, "Failed allocating buffer for XCBC keys\n");
166 goto init_failed;
167 }
168 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
169 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
170 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
171 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
172
173 /* Allocate dma-coherent buffer for IPAD + OPAD */
174 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
175 &hmac->ipad_opad_dma_addr,
176 GFP_KERNEL);
177
178 if (!hmac->ipad_opad) {
179 dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
180 goto init_failed;
181 }
182
183 dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
184 hmac->ipad_opad);
185
186 hmac->padded_authkey = dma_alloc_coherent(dev,
187 MAX_HMAC_BLOCK_SIZE,
188 pkey_dma,
189 GFP_KERNEL);
190
191 if (!hmac->padded_authkey) {
192 dev_err(dev, "failed to allocate padded_authkey\n");
193 goto init_failed;
194 }
195 } else {
196 ctx->auth_state.hmac.ipad_opad = NULL;
197 ctx->auth_state.hmac.padded_authkey = NULL;
198 }
199
200 return 0;
201
202init_failed:
203 cc_aead_exit(tfm);
204 return -ENOMEM;
205}
206
207static void cc_aead_complete(struct device *dev, void *cc_req, int err)
208{
209 struct aead_request *areq = (struct aead_request *)cc_req;
210 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
211 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
212 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
213
214 cc_unmap_aead_request(dev, areq);
215
216 /* Restore ordinary iv pointer */
217 areq->iv = areq_ctx->backup_iv;
218
219 if (err)
220 goto done;
221
222 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
223 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
224 ctx->authsize) != 0) {
225 dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
226 ctx->authsize, ctx->cipher_mode);
227 /* In case of payload authentication failure, MUST NOT
228 * revealed the decrypted message --> zero its memory.
229 */
230 cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
231 err = -EBADMSG;
232 }
233 } else { /*ENCRYPT*/
234 if (areq_ctx->is_icv_fragmented) {
235 u32 skip = areq->cryptlen + areq_ctx->dst_offset;
236
237 cc_copy_sg_portion(dev, areq_ctx->mac_buf,
238 areq_ctx->dst_sgl, skip,
239 (skip + ctx->authsize),
240 CC_SG_FROM_BUF);
241 }
242
243 /* If an IV was generated, copy it back to the user provided
244 * buffer.
245 */
246 if (areq_ctx->backup_giv) {
247 if (ctx->cipher_mode == DRV_CIPHER_CTR)
248 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
249 CTR_RFC3686_NONCE_SIZE,
250 CTR_RFC3686_IV_SIZE);
251 else if (ctx->cipher_mode == DRV_CIPHER_CCM)
252 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
253 CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
254 }
255 }
256done:
257 aead_request_complete(areq, err);
258}
259
260static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
261 struct cc_aead_ctx *ctx)
262{
263 /* Load the AES key */
264 hw_desc_init(&desc[0]);
265 /* We are using for the source/user key the same buffer
266 * as for the output keys, * because after this key loading it
267 * is not needed anymore
268 */
269 set_din_type(&desc[0], DMA_DLLI,
270 ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
271 NS_BIT);
272 set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
273 set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
274 set_key_size_aes(&desc[0], ctx->auth_keylen);
275 set_flow_mode(&desc[0], S_DIN_to_AES);
276 set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
277
278 hw_desc_init(&desc[1]);
279 set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
280 set_flow_mode(&desc[1], DIN_AES_DOUT);
281 set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
282 AES_KEYSIZE_128, NS_BIT, 0);
283
284 hw_desc_init(&desc[2]);
285 set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
286 set_flow_mode(&desc[2], DIN_AES_DOUT);
287 set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
288 + AES_KEYSIZE_128),
289 AES_KEYSIZE_128, NS_BIT, 0);
290
291 hw_desc_init(&desc[3]);
292 set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
293 set_flow_mode(&desc[3], DIN_AES_DOUT);
294 set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
295 + 2 * AES_KEYSIZE_128),
296 AES_KEYSIZE_128, NS_BIT, 0);
297
298 return 4;
299}
300
301static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
302{
303 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
304 unsigned int digest_ofs = 0;
305 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
306 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
307 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
308 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
309 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
310
311 unsigned int idx = 0;
312 int i;
313
314 /* calc derived HMAC key */
315 for (i = 0; i < 2; i++) {
316 /* Load hash initial state */
317 hw_desc_init(&desc[idx]);
318 set_cipher_mode(&desc[idx], hash_mode);
319 set_din_sram(&desc[idx],
320 cc_larval_digest_addr(ctx->drvdata,
321 ctx->auth_mode),
322 digest_size);
323 set_flow_mode(&desc[idx], S_DIN_to_HASH);
324 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
325 idx++;
326
327 /* Load the hash current length*/
328 hw_desc_init(&desc[idx]);
329 set_cipher_mode(&desc[idx], hash_mode);
330 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
331 set_flow_mode(&desc[idx], S_DIN_to_HASH);
332 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
333 idx++;
334
335 /* Prepare ipad key */
336 hw_desc_init(&desc[idx]);
337 set_xor_val(&desc[idx], hmac_pad_const[i]);
338 set_cipher_mode(&desc[idx], hash_mode);
339 set_flow_mode(&desc[idx], S_DIN_to_HASH);
340 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
341 idx++;
342
343 /* Perform HASH update */
344 hw_desc_init(&desc[idx]);
345 set_din_type(&desc[idx], DMA_DLLI,
346 hmac->padded_authkey_dma_addr,
347 SHA256_BLOCK_SIZE, NS_BIT);
348 set_cipher_mode(&desc[idx], hash_mode);
349 set_xor_active(&desc[idx]);
350 set_flow_mode(&desc[idx], DIN_HASH);
351 idx++;
352
353 /* Get the digset */
354 hw_desc_init(&desc[idx]);
355 set_cipher_mode(&desc[idx], hash_mode);
356 set_dout_dlli(&desc[idx],
357 (hmac->ipad_opad_dma_addr + digest_ofs),
358 digest_size, NS_BIT, 0);
359 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
360 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
361 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
362 idx++;
363
364 digest_ofs += digest_size;
365 }
366
367 return idx;
368}
369
370static int validate_keys_sizes(struct cc_aead_ctx *ctx)
371{
372 struct device *dev = drvdata_to_dev(ctx->drvdata);
373
374 dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
375 ctx->enc_keylen, ctx->auth_keylen);
376
377 switch (ctx->auth_mode) {
378 case DRV_HASH_SHA1:
379 case DRV_HASH_SHA256:
380 break;
381 case DRV_HASH_XCBC_MAC:
382 if (ctx->auth_keylen != AES_KEYSIZE_128 &&
383 ctx->auth_keylen != AES_KEYSIZE_192 &&
384 ctx->auth_keylen != AES_KEYSIZE_256)
385 return -ENOTSUPP;
386 break;
387 case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
388 if (ctx->auth_keylen > 0)
389 return -EINVAL;
390 break;
391 default:
392 dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
393 return -EINVAL;
394 }
395 /* Check cipher key size */
396 if (ctx->flow_mode == S_DIN_to_DES) {
397 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
398 dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
399 ctx->enc_keylen);
400 return -EINVAL;
401 }
402 } else { /* Default assumed to be AES ciphers */
403 if (ctx->enc_keylen != AES_KEYSIZE_128 &&
404 ctx->enc_keylen != AES_KEYSIZE_192 &&
405 ctx->enc_keylen != AES_KEYSIZE_256) {
406 dev_err(dev, "Invalid cipher(AES) key size: %u\n",
407 ctx->enc_keylen);
408 return -EINVAL;
409 }
410 }
411
412 return 0; /* All tests of keys sizes passed */
413}
414
415/* This function prepers the user key so it can pass to the hmac processing
416 * (copy to intenral buffer or hash in case of key longer than block
417 */
418static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
419 unsigned int keylen)
420{
421 dma_addr_t key_dma_addr = 0;
422 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
423 struct device *dev = drvdata_to_dev(ctx->drvdata);
424 u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
425 struct cc_crypto_req cc_req = {};
426 unsigned int blocksize;
427 unsigned int digestsize;
428 unsigned int hashmode;
429 unsigned int idx = 0;
430 int rc = 0;
431 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
432 dma_addr_t padded_authkey_dma_addr =
433 ctx->auth_state.hmac.padded_authkey_dma_addr;
434
435 switch (ctx->auth_mode) { /* auth_key required and >0 */
436 case DRV_HASH_SHA1:
437 blocksize = SHA1_BLOCK_SIZE;
438 digestsize = SHA1_DIGEST_SIZE;
439 hashmode = DRV_HASH_HW_SHA1;
440 break;
441 case DRV_HASH_SHA256:
442 default:
443 blocksize = SHA256_BLOCK_SIZE;
444 digestsize = SHA256_DIGEST_SIZE;
445 hashmode = DRV_HASH_HW_SHA256;
446 }
447
448 if (keylen != 0) {
449 key_dma_addr = dma_map_single(dev, (void *)key, keylen,
450 DMA_TO_DEVICE);
451 if (dma_mapping_error(dev, key_dma_addr)) {
452 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
453 key, keylen);
454 return -ENOMEM;
455 }
456 if (keylen > blocksize) {
457 /* Load hash initial state */
458 hw_desc_init(&desc[idx]);
459 set_cipher_mode(&desc[idx], hashmode);
460 set_din_sram(&desc[idx], larval_addr, digestsize);
461 set_flow_mode(&desc[idx], S_DIN_to_HASH);
462 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
463 idx++;
464
465 /* Load the hash current length*/
466 hw_desc_init(&desc[idx]);
467 set_cipher_mode(&desc[idx], hashmode);
468 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
469 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
470 set_flow_mode(&desc[idx], S_DIN_to_HASH);
471 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
472 idx++;
473
474 hw_desc_init(&desc[idx]);
475 set_din_type(&desc[idx], DMA_DLLI,
476 key_dma_addr, keylen, NS_BIT);
477 set_flow_mode(&desc[idx], DIN_HASH);
478 idx++;
479
480 /* Get hashed key */
481 hw_desc_init(&desc[idx]);
482 set_cipher_mode(&desc[idx], hashmode);
483 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
484 digestsize, NS_BIT, 0);
485 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
486 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
487 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
488 set_cipher_config0(&desc[idx],
489 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
490 idx++;
491
492 hw_desc_init(&desc[idx]);
493 set_din_const(&desc[idx], 0, (blocksize - digestsize));
494 set_flow_mode(&desc[idx], BYPASS);
495 set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
496 digestsize), (blocksize - digestsize),
497 NS_BIT, 0);
498 idx++;
499 } else {
500 hw_desc_init(&desc[idx]);
501 set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
502 keylen, NS_BIT);
503 set_flow_mode(&desc[idx], BYPASS);
504 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
505 keylen, NS_BIT, 0);
506 idx++;
507
508 if ((blocksize - keylen) != 0) {
509 hw_desc_init(&desc[idx]);
510 set_din_const(&desc[idx], 0,
511 (blocksize - keylen));
512 set_flow_mode(&desc[idx], BYPASS);
513 set_dout_dlli(&desc[idx],
514 (padded_authkey_dma_addr +
515 keylen),
516 (blocksize - keylen), NS_BIT, 0);
517 idx++;
518 }
519 }
520 } else {
521 hw_desc_init(&desc[idx]);
522 set_din_const(&desc[idx], 0, (blocksize - keylen));
523 set_flow_mode(&desc[idx], BYPASS);
524 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
525 blocksize, NS_BIT, 0);
526 idx++;
527 }
528
529 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
530 if (rc)
531 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
532
533 if (key_dma_addr)
534 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
535
536 return rc;
537}
538
539static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
540 unsigned int keylen)
541{
542 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
543 struct rtattr *rta = (struct rtattr *)key;
544 struct cc_crypto_req cc_req = {};
545 struct crypto_authenc_key_param *param;
546 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
547 int rc = -EINVAL;
548 unsigned int seq_len = 0;
549 struct device *dev = drvdata_to_dev(ctx->drvdata);
550
551 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
552 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
553
554 /* STAT_PHASE_0: Init and sanity checks */
555
556 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
557 if (!RTA_OK(rta, keylen))
558 goto badkey;
559 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
560 goto badkey;
561 if (RTA_PAYLOAD(rta) < sizeof(*param))
562 goto badkey;
563 param = RTA_DATA(rta);
564 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
565 key += RTA_ALIGN(rta->rta_len);
566 keylen -= RTA_ALIGN(rta->rta_len);
567 if (keylen < ctx->enc_keylen)
568 goto badkey;
569 ctx->auth_keylen = keylen - ctx->enc_keylen;
570
571 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
572 /* the nonce is stored in bytes at end of key */
573 if (ctx->enc_keylen <
574 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
575 goto badkey;
576 /* Copy nonce from last 4 bytes in CTR key to
577 * first 4 bytes in CTR IV
578 */
579 memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
580 ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
581 CTR_RFC3686_NONCE_SIZE);
582 /* Set CTR key size */
583 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
584 }
585 } else { /* non-authenc - has just one key */
586 ctx->enc_keylen = keylen;
587 ctx->auth_keylen = 0;
588 }
589
590 rc = validate_keys_sizes(ctx);
591 if (rc)
592 goto badkey;
593
594 /* STAT_PHASE_1: Copy key to ctx */
595
596 /* Get key material */
597 memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
598 if (ctx->enc_keylen == 24)
599 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
600 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
601 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
602 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
603 rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
604 if (rc)
605 goto badkey;
606 }
607
608 /* STAT_PHASE_2: Create sequence */
609
610 switch (ctx->auth_mode) {
611 case DRV_HASH_SHA1:
612 case DRV_HASH_SHA256:
613 seq_len = hmac_setkey(desc, ctx);
614 break;
615 case DRV_HASH_XCBC_MAC:
616 seq_len = xcbc_setkey(desc, ctx);
617 break;
618 case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
619 break; /* No auth. key setup */
620 default:
621 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
622 rc = -ENOTSUPP;
623 goto badkey;
624 }
625
626 /* STAT_PHASE_3: Submit sequence to HW */
627
628 if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
629 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
630 if (rc) {
631 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
632 goto setkey_error;
633 }
634 }
635
636 /* Update STAT_PHASE_3 */
637 return rc;
638
639badkey:
640 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
641
642setkey_error:
643 return rc;
644}
645
646static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
647 unsigned int keylen)
648{
649 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
650
651 if (keylen < 3)
652 return -EINVAL;
653
654 keylen -= 3;
655 memcpy(ctx->ctr_nonce, key + keylen, 3);
656
657 return cc_aead_setkey(tfm, key, keylen);
658}
659
660static int cc_aead_setauthsize(struct crypto_aead *authenc,
661 unsigned int authsize)
662{
663 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
664 struct device *dev = drvdata_to_dev(ctx->drvdata);
665
666 /* Unsupported auth. sizes */
667 if (authsize == 0 ||
668 authsize > crypto_aead_maxauthsize(authenc)) {
669 return -ENOTSUPP;
670 }
671
672 ctx->authsize = authsize;
673 dev_dbg(dev, "authlen=%d\n", ctx->authsize);
674
675 return 0;
676}
677
678static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
679 unsigned int authsize)
680{
681 switch (authsize) {
682 case 8:
683 case 12:
684 case 16:
685 break;
686 default:
687 return -EINVAL;
688 }
689
690 return cc_aead_setauthsize(authenc, authsize);
691}
692
693static int cc_ccm_setauthsize(struct crypto_aead *authenc,
694 unsigned int authsize)
695{
696 switch (authsize) {
697 case 4:
698 case 6:
699 case 8:
700 case 10:
701 case 12:
702 case 14:
703 case 16:
704 break;
705 default:
706 return -EINVAL;
707 }
708
709 return cc_aead_setauthsize(authenc, authsize);
710}
711
712static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
713 struct cc_hw_desc desc[], unsigned int *seq_size)
714{
715 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
716 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
717 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
718 enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
719 unsigned int idx = *seq_size;
720 struct device *dev = drvdata_to_dev(ctx->drvdata);
721
722 switch (assoc_dma_type) {
723 case CC_DMA_BUF_DLLI:
724 dev_dbg(dev, "ASSOC buffer type DLLI\n");
725 hw_desc_init(&desc[idx]);
726 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
727 areq->assoclen, NS_BIT);
728 set_flow_mode(&desc[idx], flow_mode);
729 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
730 areq_ctx->cryptlen > 0)
731 set_din_not_last_indication(&desc[idx]);
732 break;
733 case CC_DMA_BUF_MLLI:
734 dev_dbg(dev, "ASSOC buffer type MLLI\n");
735 hw_desc_init(&desc[idx]);
736 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
737 areq_ctx->assoc.mlli_nents, NS_BIT);
738 set_flow_mode(&desc[idx], flow_mode);
739 if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
740 areq_ctx->cryptlen > 0)
741 set_din_not_last_indication(&desc[idx]);
742 break;
743 case CC_DMA_BUF_NULL:
744 default:
745 dev_err(dev, "Invalid ASSOC buffer type\n");
746 }
747
748 *seq_size = (++idx);
749}
750
751static void cc_proc_authen_desc(struct aead_request *areq,
752 unsigned int flow_mode,
753 struct cc_hw_desc desc[],
754 unsigned int *seq_size, int direct)
755{
756 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
757 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
758 unsigned int idx = *seq_size;
759 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
760 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
761 struct device *dev = drvdata_to_dev(ctx->drvdata);
762
763 switch (data_dma_type) {
764 case CC_DMA_BUF_DLLI:
765 {
766 struct scatterlist *cipher =
767 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
768 areq_ctx->dst_sgl : areq_ctx->src_sgl;
769
770 unsigned int offset =
771 (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
772 areq_ctx->dst_offset : areq_ctx->src_offset;
773 dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
774 hw_desc_init(&desc[idx]);
775 set_din_type(&desc[idx], DMA_DLLI,
776 (sg_dma_address(cipher) + offset),
777 areq_ctx->cryptlen, NS_BIT);
778 set_flow_mode(&desc[idx], flow_mode);
779 break;
780 }
781 case CC_DMA_BUF_MLLI:
782 {
783 /* DOUBLE-PASS flow (as default)
784 * assoc. + iv + data -compact in one table
785 * if assoclen is ZERO only IV perform
786 */
787 cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
788 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
789
790 if (areq_ctx->is_single_pass) {
791 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
792 mlli_addr = areq_ctx->dst.sram_addr;
793 mlli_nents = areq_ctx->dst.mlli_nents;
794 } else {
795 mlli_addr = areq_ctx->src.sram_addr;
796 mlli_nents = areq_ctx->src.mlli_nents;
797 }
798 }
799
800 dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
801 hw_desc_init(&desc[idx]);
802 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
803 NS_BIT);
804 set_flow_mode(&desc[idx], flow_mode);
805 break;
806 }
807 case CC_DMA_BUF_NULL:
808 default:
809 dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
810 }
811
812 *seq_size = (++idx);
813}
814
815static void cc_proc_cipher_desc(struct aead_request *areq,
816 unsigned int flow_mode,
817 struct cc_hw_desc desc[],
818 unsigned int *seq_size)
819{
820 unsigned int idx = *seq_size;
821 struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
822 enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
823 struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
824 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
825 struct device *dev = drvdata_to_dev(ctx->drvdata);
826
827 if (areq_ctx->cryptlen == 0)
828 return; /*null processing*/
829
830 switch (data_dma_type) {
831 case CC_DMA_BUF_DLLI:
832 dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
833 hw_desc_init(&desc[idx]);
834 set_din_type(&desc[idx], DMA_DLLI,
835 (sg_dma_address(areq_ctx->src_sgl) +
836 areq_ctx->src_offset), areq_ctx->cryptlen,
837 NS_BIT);
838 set_dout_dlli(&desc[idx],
839 (sg_dma_address(areq_ctx->dst_sgl) +
840 areq_ctx->dst_offset),
841 areq_ctx->cryptlen, NS_BIT, 0);
842 set_flow_mode(&desc[idx], flow_mode);
843 break;
844 case CC_DMA_BUF_MLLI:
845 dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
846 hw_desc_init(&desc[idx]);
847 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
848 areq_ctx->src.mlli_nents, NS_BIT);
849 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
850 areq_ctx->dst.mlli_nents, NS_BIT, 0);
851 set_flow_mode(&desc[idx], flow_mode);
852 break;
853 case CC_DMA_BUF_NULL:
854 default:
855 dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
856 }
857
858 *seq_size = (++idx);
859}
860
861static void cc_proc_digest_desc(struct aead_request *req,
862 struct cc_hw_desc desc[],
863 unsigned int *seq_size)
864{
865 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
866 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
867 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
868 unsigned int idx = *seq_size;
869 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
870 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
871 int direct = req_ctx->gen_ctx.op_type;
872
873 /* Get final ICV result */
874 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
875 hw_desc_init(&desc[idx]);
876 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
877 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
878 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
879 NS_BIT, 1);
880 set_queue_last_ind(ctx->drvdata, &desc[idx]);
881 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
882 set_aes_not_hash_mode(&desc[idx]);
883 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
884 } else {
885 set_cipher_config0(&desc[idx],
886 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
887 set_cipher_mode(&desc[idx], hash_mode);
888 }
889 } else { /*Decrypt*/
890 /* Get ICV out from hardware */
891 hw_desc_init(&desc[idx]);
892 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
893 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
894 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
895 ctx->authsize, NS_BIT, 1);
896 set_queue_last_ind(ctx->drvdata, &desc[idx]);
897 set_cipher_config0(&desc[idx],
898 HASH_DIGEST_RESULT_LITTLE_ENDIAN);
899 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
900 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
901 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
902 set_aes_not_hash_mode(&desc[idx]);
903 } else {
904 set_cipher_mode(&desc[idx], hash_mode);
905 }
906 }
907
908 *seq_size = (++idx);
909}
910
911static void cc_set_cipher_desc(struct aead_request *req,
912 struct cc_hw_desc desc[],
913 unsigned int *seq_size)
914{
915 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
916 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
917 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
918 unsigned int hw_iv_size = req_ctx->hw_iv_size;
919 unsigned int idx = *seq_size;
920 int direct = req_ctx->gen_ctx.op_type;
921
922 /* Setup cipher state */
923 hw_desc_init(&desc[idx]);
924 set_cipher_config0(&desc[idx], direct);
925 set_flow_mode(&desc[idx], ctx->flow_mode);
926 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
927 hw_iv_size, NS_BIT);
928 if (ctx->cipher_mode == DRV_CIPHER_CTR)
929 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
930 else
931 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
932 set_cipher_mode(&desc[idx], ctx->cipher_mode);
933 idx++;
934
935 /* Setup enc. key */
936 hw_desc_init(&desc[idx]);
937 set_cipher_config0(&desc[idx], direct);
938 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
939 set_flow_mode(&desc[idx], ctx->flow_mode);
940 if (ctx->flow_mode == S_DIN_to_AES) {
941 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
942 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
943 ctx->enc_keylen), NS_BIT);
944 set_key_size_aes(&desc[idx], ctx->enc_keylen);
945 } else {
946 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
947 ctx->enc_keylen, NS_BIT);
948 set_key_size_des(&desc[idx], ctx->enc_keylen);
949 }
950 set_cipher_mode(&desc[idx], ctx->cipher_mode);
951 idx++;
952
953 *seq_size = idx;
954}
955
956static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
957 unsigned int *seq_size, unsigned int data_flow_mode)
958{
959 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
960 int direct = req_ctx->gen_ctx.op_type;
961 unsigned int idx = *seq_size;
962
963 if (req_ctx->cryptlen == 0)
964 return; /*null processing*/
965
966 cc_set_cipher_desc(req, desc, &idx);
967 cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
968 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
969 /* We must wait for DMA to write all cipher */
970 hw_desc_init(&desc[idx]);
971 set_din_no_dma(&desc[idx], 0, 0xfffff0);
972 set_dout_no_dma(&desc[idx], 0, 0, 1);
973 idx++;
974 }
975
976 *seq_size = idx;
977}
978
979static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
980 unsigned int *seq_size)
981{
982 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
983 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
984 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
985 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
986 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
987 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
988 unsigned int idx = *seq_size;
989
990 /* Loading hash ipad xor key state */
991 hw_desc_init(&desc[idx]);
992 set_cipher_mode(&desc[idx], hash_mode);
993 set_din_type(&desc[idx], DMA_DLLI,
994 ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
995 NS_BIT);
996 set_flow_mode(&desc[idx], S_DIN_to_HASH);
997 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
998 idx++;
999
1000 /* Load init. digest len (64 bytes) */
1001 hw_desc_init(&desc[idx]);
1002 set_cipher_mode(&desc[idx], hash_mode);
1003 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1004 ctx->drvdata->hash_len_sz);
1005 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1006 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1007 idx++;
1008
1009 *seq_size = idx;
1010}
1011
1012static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1013 unsigned int *seq_size)
1014{
1015 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1016 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1017 unsigned int idx = *seq_size;
1018
1019 /* Loading MAC state */
1020 hw_desc_init(&desc[idx]);
1021 set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1022 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1023 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1024 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1025 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1026 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1027 set_aes_not_hash_mode(&desc[idx]);
1028 idx++;
1029
1030 /* Setup XCBC MAC K1 */
1031 hw_desc_init(&desc[idx]);
1032 set_din_type(&desc[idx], DMA_DLLI,
1033 ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1034 AES_KEYSIZE_128, NS_BIT);
1035 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1036 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1037 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1038 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1039 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1040 set_aes_not_hash_mode(&desc[idx]);
1041 idx++;
1042
1043 /* Setup XCBC MAC K2 */
1044 hw_desc_init(&desc[idx]);
1045 set_din_type(&desc[idx], DMA_DLLI,
1046 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1047 AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1048 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1049 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1050 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1051 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1052 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1053 set_aes_not_hash_mode(&desc[idx]);
1054 idx++;
1055
1056 /* Setup XCBC MAC K3 */
1057 hw_desc_init(&desc[idx]);
1058 set_din_type(&desc[idx], DMA_DLLI,
1059 (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1060 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1061 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1062 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1063 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1064 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1065 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1066 set_aes_not_hash_mode(&desc[idx]);
1067 idx++;
1068
1069 *seq_size = idx;
1070}
1071
1072static void cc_proc_header_desc(struct aead_request *req,
1073 struct cc_hw_desc desc[],
1074 unsigned int *seq_size)
1075{
1076 unsigned int idx = *seq_size;
1077 /* Hash associated data */
1078 if (req->assoclen > 0)
1079 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1080
1081 /* Hash IV */
1082 *seq_size = idx;
1083}
1084
1085static void cc_proc_scheme_desc(struct aead_request *req,
1086 struct cc_hw_desc desc[],
1087 unsigned int *seq_size)
1088{
1089 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1090 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1091 struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1092 unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1093 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1094 unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1095 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1096 unsigned int idx = *seq_size;
1097
1098 hw_desc_init(&desc[idx]);
1099 set_cipher_mode(&desc[idx], hash_mode);
1100 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1101 ctx->drvdata->hash_len_sz);
1102 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1103 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1104 set_cipher_do(&desc[idx], DO_PAD);
1105 idx++;
1106
1107 /* Get final ICV result */
1108 hw_desc_init(&desc[idx]);
1109 set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1110 digest_size);
1111 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1112 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1113 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1114 set_cipher_mode(&desc[idx], hash_mode);
1115 idx++;
1116
1117 /* Loading hash opad xor key state */
1118 hw_desc_init(&desc[idx]);
1119 set_cipher_mode(&desc[idx], hash_mode);
1120 set_din_type(&desc[idx], DMA_DLLI,
1121 (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1122 digest_size, NS_BIT);
1123 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1124 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1125 idx++;
1126
1127 /* Load init. digest len (64 bytes) */
1128 hw_desc_init(&desc[idx]);
1129 set_cipher_mode(&desc[idx], hash_mode);
1130 set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1131 ctx->drvdata->hash_len_sz);
1132 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1133 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1134 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1135 idx++;
1136
1137 /* Perform HASH update */
1138 hw_desc_init(&desc[idx]);
1139 set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1140 digest_size);
1141 set_flow_mode(&desc[idx], DIN_HASH);
1142 idx++;
1143
1144 *seq_size = idx;
1145}
1146
1147static void cc_mlli_to_sram(struct aead_request *req,
1148 struct cc_hw_desc desc[], unsigned int *seq_size)
1149{
1150 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1151 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1152 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1153 struct device *dev = drvdata_to_dev(ctx->drvdata);
1154
1155 if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1156 req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1157 !req_ctx->is_single_pass) {
1158 dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1159 (unsigned int)ctx->drvdata->mlli_sram_addr,
1160 req_ctx->mlli_params.mlli_len);
1161 /* Copy MLLI table host-to-sram */
1162 hw_desc_init(&desc[*seq_size]);
1163 set_din_type(&desc[*seq_size], DMA_DLLI,
1164 req_ctx->mlli_params.mlli_dma_addr,
1165 req_ctx->mlli_params.mlli_len, NS_BIT);
1166 set_dout_sram(&desc[*seq_size],
1167 ctx->drvdata->mlli_sram_addr,
1168 req_ctx->mlli_params.mlli_len);
1169 set_flow_mode(&desc[*seq_size], BYPASS);
1170 (*seq_size)++;
1171 }
1172}
1173
1174static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1175 enum cc_flow_mode setup_flow_mode,
1176 bool is_single_pass)
1177{
1178 enum cc_flow_mode data_flow_mode;
1179
1180 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1181 if (setup_flow_mode == S_DIN_to_AES)
1182 data_flow_mode = is_single_pass ?
1183 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1184 else
1185 data_flow_mode = is_single_pass ?
1186 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1187 } else { /* Decrypt */
1188 if (setup_flow_mode == S_DIN_to_AES)
1189 data_flow_mode = is_single_pass ?
1190 AES_and_HASH : DIN_AES_DOUT;
1191 else
1192 data_flow_mode = is_single_pass ?
1193 DES_and_HASH : DIN_DES_DOUT;
1194 }
1195
1196 return data_flow_mode;
1197}
1198
1199static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1200 unsigned int *seq_size)
1201{
1202 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1203 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1204 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1205 int direct = req_ctx->gen_ctx.op_type;
1206 unsigned int data_flow_mode =
1207 cc_get_data_flow(direct, ctx->flow_mode,
1208 req_ctx->is_single_pass);
1209
1210 if (req_ctx->is_single_pass) {
1211 /**
1212 * Single-pass flow
1213 */
1214 cc_set_hmac_desc(req, desc, seq_size);
1215 cc_set_cipher_desc(req, desc, seq_size);
1216 cc_proc_header_desc(req, desc, seq_size);
1217 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1218 cc_proc_scheme_desc(req, desc, seq_size);
1219 cc_proc_digest_desc(req, desc, seq_size);
1220 return;
1221 }
1222
1223 /**
1224 * Double-pass flow
1225 * Fallback for unsupported single-pass modes,
1226 * i.e. using assoc. data of non-word-multiple
1227 */
1228 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1229 /* encrypt first.. */
1230 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1231 /* authenc after..*/
1232 cc_set_hmac_desc(req, desc, seq_size);
1233 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1234 cc_proc_scheme_desc(req, desc, seq_size);
1235 cc_proc_digest_desc(req, desc, seq_size);
1236
1237 } else { /*DECRYPT*/
1238 /* authenc first..*/
1239 cc_set_hmac_desc(req, desc, seq_size);
1240 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1241 cc_proc_scheme_desc(req, desc, seq_size);
1242 /* decrypt after.. */
1243 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1244 /* read the digest result with setting the completion bit
1245 * must be after the cipher operation
1246 */
1247 cc_proc_digest_desc(req, desc, seq_size);
1248 }
1249}
1250
1251static void
1252cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1253 unsigned int *seq_size)
1254{
1255 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1256 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1257 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1258 int direct = req_ctx->gen_ctx.op_type;
1259 unsigned int data_flow_mode =
1260 cc_get_data_flow(direct, ctx->flow_mode,
1261 req_ctx->is_single_pass);
1262
1263 if (req_ctx->is_single_pass) {
1264 /**
1265 * Single-pass flow
1266 */
1267 cc_set_xcbc_desc(req, desc, seq_size);
1268 cc_set_cipher_desc(req, desc, seq_size);
1269 cc_proc_header_desc(req, desc, seq_size);
1270 cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1271 cc_proc_digest_desc(req, desc, seq_size);
1272 return;
1273 }
1274
1275 /**
1276 * Double-pass flow
1277 * Fallback for unsupported single-pass modes,
1278 * i.e. using assoc. data of non-word-multiple
1279 */
1280 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1281 /* encrypt first.. */
1282 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1283 /* authenc after.. */
1284 cc_set_xcbc_desc(req, desc, seq_size);
1285 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1286 cc_proc_digest_desc(req, desc, seq_size);
1287 } else { /*DECRYPT*/
1288 /* authenc first.. */
1289 cc_set_xcbc_desc(req, desc, seq_size);
1290 cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1291 /* decrypt after..*/
1292 cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1293 /* read the digest result with setting the completion bit
1294 * must be after the cipher operation
1295 */
1296 cc_proc_digest_desc(req, desc, seq_size);
1297 }
1298}
1299
1300static int validate_data_size(struct cc_aead_ctx *ctx,
1301 enum drv_crypto_direction direct,
1302 struct aead_request *req)
1303{
1304 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1305 struct device *dev = drvdata_to_dev(ctx->drvdata);
1306 unsigned int assoclen = req->assoclen;
1307 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1308 (req->cryptlen - ctx->authsize) : req->cryptlen;
1309
1310 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1311 req->cryptlen < ctx->authsize)
1312 goto data_size_err;
1313
1314 areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1315
1316 switch (ctx->flow_mode) {
1317 case S_DIN_to_AES:
1318 if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1319 !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1320 goto data_size_err;
1321 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1322 break;
1323 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1324 if (areq_ctx->plaintext_authenticate_only)
1325 areq_ctx->is_single_pass = false;
1326 break;
1327 }
1328
1329 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1330 areq_ctx->is_single_pass = false;
1331
1332 if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1333 !IS_ALIGNED(cipherlen, sizeof(u32)))
1334 areq_ctx->is_single_pass = false;
1335
1336 break;
1337 case S_DIN_to_DES:
1338 if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1339 goto data_size_err;
1340 if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1341 areq_ctx->is_single_pass = false;
1342 break;
1343 default:
1344 dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1345 goto data_size_err;
1346 }
1347
1348 return 0;
1349
1350data_size_err:
1351 return -EINVAL;
1352}
1353
1354static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1355{
1356 unsigned int len = 0;
1357
1358 if (header_size == 0)
1359 return 0;
1360
1361 if (header_size < ((1UL << 16) - (1UL << 8))) {
1362 len = 2;
1363
1364 pa0_buff[0] = (header_size >> 8) & 0xFF;
1365 pa0_buff[1] = header_size & 0xFF;
1366 } else {
1367 len = 6;
1368
1369 pa0_buff[0] = 0xFF;
1370 pa0_buff[1] = 0xFE;
1371 pa0_buff[2] = (header_size >> 24) & 0xFF;
1372 pa0_buff[3] = (header_size >> 16) & 0xFF;
1373 pa0_buff[4] = (header_size >> 8) & 0xFF;
1374 pa0_buff[5] = header_size & 0xFF;
1375 }
1376
1377 return len;
1378}
1379
1380static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1381{
1382 __be32 data;
1383
1384 memset(block, 0, csize);
1385 block += csize;
1386
1387 if (csize >= 4)
1388 csize = 4;
1389 else if (msglen > (1 << (8 * csize)))
1390 return -EOVERFLOW;
1391
1392 data = cpu_to_be32(msglen);
1393 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1394
1395 return 0;
1396}
1397
1398static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1399 unsigned int *seq_size)
1400{
1401 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1402 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1403 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1404 unsigned int idx = *seq_size;
1405 unsigned int cipher_flow_mode;
1406 dma_addr_t mac_result;
1407
1408 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1409 cipher_flow_mode = AES_to_HASH_and_DOUT;
1410 mac_result = req_ctx->mac_buf_dma_addr;
1411 } else { /* Encrypt */
1412 cipher_flow_mode = AES_and_HASH;
1413 mac_result = req_ctx->icv_dma_addr;
1414 }
1415
1416 /* load key */
1417 hw_desc_init(&desc[idx]);
1418 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1419 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1420 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1421 ctx->enc_keylen), NS_BIT);
1422 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1423 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1424 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1425 set_flow_mode(&desc[idx], S_DIN_to_AES);
1426 idx++;
1427
1428 /* load ctr state */
1429 hw_desc_init(&desc[idx]);
1430 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1431 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1432 set_din_type(&desc[idx], DMA_DLLI,
1433 req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1434 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1435 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1436 set_flow_mode(&desc[idx], S_DIN_to_AES);
1437 idx++;
1438
1439 /* load MAC key */
1440 hw_desc_init(&desc[idx]);
1441 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1442 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1443 ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
1444 ctx->enc_keylen), NS_BIT);
1445 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1446 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1447 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1448 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1449 set_aes_not_hash_mode(&desc[idx]);
1450 idx++;
1451
1452 /* load MAC state */
1453 hw_desc_init(&desc[idx]);
1454 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1455 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1456 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1457 AES_BLOCK_SIZE, NS_BIT);
1458 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1459 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1460 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1461 set_aes_not_hash_mode(&desc[idx]);
1462 idx++;
1463
1464 /* process assoc data */
1465 if (req->assoclen > 0) {
1466 cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1467 } else {
1468 hw_desc_init(&desc[idx]);
1469 set_din_type(&desc[idx], DMA_DLLI,
1470 sg_dma_address(&req_ctx->ccm_adata_sg),
1471 AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1472 set_flow_mode(&desc[idx], DIN_HASH);
1473 idx++;
1474 }
1475
1476 /* process the cipher */
1477 if (req_ctx->cryptlen)
1478 cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1479
1480 /* Read temporal MAC */
1481 hw_desc_init(&desc[idx]);
1482 set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1483 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1484 NS_BIT, 0);
1485 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1486 set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1487 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1488 set_aes_not_hash_mode(&desc[idx]);
1489 idx++;
1490
1491 /* load AES-CTR state (for last MAC calculation)*/
1492 hw_desc_init(&desc[idx]);
1493 set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1494 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1495 set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1496 AES_BLOCK_SIZE, NS_BIT);
1497 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1498 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1499 set_flow_mode(&desc[idx], S_DIN_to_AES);
1500 idx++;
1501
1502 hw_desc_init(&desc[idx]);
1503 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1504 set_dout_no_dma(&desc[idx], 0, 0, 1);
1505 idx++;
1506
1507 /* encrypt the "T" value and store MAC in mac_state */
1508 hw_desc_init(&desc[idx]);
1509 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1510 ctx->authsize, NS_BIT);
1511 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1512 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1513 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1514 idx++;
1515
1516 *seq_size = idx;
1517 return 0;
1518}
1519
1520static int config_ccm_adata(struct aead_request *req)
1521{
1522 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1523 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1524 struct device *dev = drvdata_to_dev(ctx->drvdata);
1525 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1526 //unsigned int size_of_a = 0, rem_a_size = 0;
1527 unsigned int lp = req->iv[0];
1528 /* Note: The code assume that req->iv[0] already contains the value
1529 * of L' of RFC3610
1530 */
1531 unsigned int l = lp + 1; /* This is L' of RFC 3610. */
1532 unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */
1533 u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1534 u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1535 u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1536 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1537 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1538 req->cryptlen :
1539 (req->cryptlen - ctx->authsize);
1540 int rc;
1541
1542 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1543 memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1544
1545 /* taken from crypto/ccm.c */
1546 /* 2 <= L <= 8, so 1 <= L' <= 7. */
1547 if (l < 2 || l > 8) {
1548 dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1549 return -EINVAL;
1550 }
1551 memcpy(b0, req->iv, AES_BLOCK_SIZE);
1552
1553 /* format control info per RFC 3610 and
1554 * NIST Special Publication 800-38C
1555 */
1556 *b0 |= (8 * ((m - 2) / 2));
1557 if (req->assoclen > 0)
1558 *b0 |= 64; /* Enable bit 6 if Adata exists. */
1559
1560 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
1561 if (rc) {
1562 dev_err(dev, "message len overflow detected");
1563 return rc;
1564 }
1565 /* END of "taken from crypto/ccm.c" */
1566
1567 /* l(a) - size of associated data. */
1568 req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
1569
1570 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1571 req->iv[15] = 1;
1572
1573 memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1574 ctr_count_0[15] = 0;
1575
1576 return 0;
1577}
1578
1579static void cc_proc_rfc4309_ccm(struct aead_request *req)
1580{
1581 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1582 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1583 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1584
1585 /* L' */
1586 memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1587 /* For RFC 4309, always use 4 bytes for message length
1588 * (at most 2^32-1 bytes).
1589 */
1590 areq_ctx->ctr_iv[0] = 3;
1591
1592 /* In RFC 4309 there is an 11-bytes nonce+IV part,
1593 * that we build here.
1594 */
1595 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1596 CCM_BLOCK_NONCE_SIZE);
1597 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1598 CCM_BLOCK_IV_SIZE);
1599 req->iv = areq_ctx->ctr_iv;
1600 req->assoclen -= CCM_BLOCK_IV_SIZE;
1601}
1602
1603static void cc_set_ghash_desc(struct aead_request *req,
1604 struct cc_hw_desc desc[], unsigned int *seq_size)
1605{
1606 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1607 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1608 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1609 unsigned int idx = *seq_size;
1610
1611 /* load key to AES*/
1612 hw_desc_init(&desc[idx]);
1613 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1614 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1615 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1616 ctx->enc_keylen, NS_BIT);
1617 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1618 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1619 set_flow_mode(&desc[idx], S_DIN_to_AES);
1620 idx++;
1621
1622 /* process one zero block to generate hkey */
1623 hw_desc_init(&desc[idx]);
1624 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1625 set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1626 NS_BIT, 0);
1627 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1628 idx++;
1629
1630 /* Memory Barrier */
1631 hw_desc_init(&desc[idx]);
1632 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1633 set_dout_no_dma(&desc[idx], 0, 0, 1);
1634 idx++;
1635
1636 /* Load GHASH subkey */
1637 hw_desc_init(&desc[idx]);
1638 set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1639 AES_BLOCK_SIZE, NS_BIT);
1640 set_dout_no_dma(&desc[idx], 0, 0, 1);
1641 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1642 set_aes_not_hash_mode(&desc[idx]);
1643 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1644 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1645 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1646 idx++;
1647
1648 /* Configure Hash Engine to work with GHASH.
1649 * Since it was not possible to extend HASH submodes to add GHASH,
1650 * The following command is necessary in order to
1651 * select GHASH (according to HW designers)
1652 */
1653 hw_desc_init(&desc[idx]);
1654 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1655 set_dout_no_dma(&desc[idx], 0, 0, 1);
1656 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1657 set_aes_not_hash_mode(&desc[idx]);
1658 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1659 set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1660 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1661 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1662 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1663 idx++;
1664
1665 /* Load GHASH initial STATE (which is 0). (for any hash there is an
1666 * initial state)
1667 */
1668 hw_desc_init(&desc[idx]);
1669 set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1670 set_dout_no_dma(&desc[idx], 0, 0, 1);
1671 set_flow_mode(&desc[idx], S_DIN_to_HASH);
1672 set_aes_not_hash_mode(&desc[idx]);
1673 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1674 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1675 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1676 idx++;
1677
1678 *seq_size = idx;
1679}
1680
1681static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1682 unsigned int *seq_size)
1683{
1684 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1685 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1686 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1687 unsigned int idx = *seq_size;
1688
1689 /* load key to AES*/
1690 hw_desc_init(&desc[idx]);
1691 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1692 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1693 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1694 ctx->enc_keylen, NS_BIT);
1695 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1696 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1697 set_flow_mode(&desc[idx], S_DIN_to_AES);
1698 idx++;
1699
1700 if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1701 /* load AES/CTR initial CTR value inc by 2*/
1702 hw_desc_init(&desc[idx]);
1703 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1704 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1705 set_din_type(&desc[idx], DMA_DLLI,
1706 req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1707 NS_BIT);
1708 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1709 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1710 set_flow_mode(&desc[idx], S_DIN_to_AES);
1711 idx++;
1712 }
1713
1714 *seq_size = idx;
1715}
1716
1717static void cc_proc_gcm_result(struct aead_request *req,
1718 struct cc_hw_desc desc[],
1719 unsigned int *seq_size)
1720{
1721 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1722 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1723 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1724 dma_addr_t mac_result;
1725 unsigned int idx = *seq_size;
1726
1727 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1728 mac_result = req_ctx->mac_buf_dma_addr;
1729 } else { /* Encrypt */
1730 mac_result = req_ctx->icv_dma_addr;
1731 }
1732
1733 /* process(ghash) gcm_block_len */
1734 hw_desc_init(&desc[idx]);
1735 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1736 AES_BLOCK_SIZE, NS_BIT);
1737 set_flow_mode(&desc[idx], DIN_HASH);
1738 idx++;
1739
1740 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1741 hw_desc_init(&desc[idx]);
1742 set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1743 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1744 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1745 NS_BIT, 0);
1746 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1747 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1748 set_aes_not_hash_mode(&desc[idx]);
1749
1750 idx++;
1751
1752 /* load AES/CTR initial CTR value inc by 1*/
1753 hw_desc_init(&desc[idx]);
1754 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1755 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1756 set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1757 AES_BLOCK_SIZE, NS_BIT);
1758 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1759 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1760 set_flow_mode(&desc[idx], S_DIN_to_AES);
1761 idx++;
1762
1763 /* Memory Barrier */
1764 hw_desc_init(&desc[idx]);
1765 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1766 set_dout_no_dma(&desc[idx], 0, 0, 1);
1767 idx++;
1768
1769 /* process GCTR on stored GHASH and store MAC in mac_state*/
1770 hw_desc_init(&desc[idx]);
1771 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1772 set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1773 AES_BLOCK_SIZE, NS_BIT);
1774 set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1775 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1776 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1777 idx++;
1778
1779 *seq_size = idx;
1780}
1781
1782static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1783 unsigned int *seq_size)
1784{
1785 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1786 unsigned int cipher_flow_mode;
1787
1788 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1789 cipher_flow_mode = AES_and_HASH;
1790 } else { /* Encrypt */
1791 cipher_flow_mode = AES_to_HASH_and_DOUT;
1792 }
1793
1794 //in RFC4543 no data to encrypt. just copy data from src to dest.
1795 if (req_ctx->plaintext_authenticate_only) {
1796 cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1797 cc_set_ghash_desc(req, desc, seq_size);
1798 /* process(ghash) assoc data */
1799 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1800 cc_set_gctr_desc(req, desc, seq_size);
1801 cc_proc_gcm_result(req, desc, seq_size);
1802 return 0;
1803 }
1804
1805 // for gcm and rfc4106.
1806 cc_set_ghash_desc(req, desc, seq_size);
1807 /* process(ghash) assoc data */
1808 if (req->assoclen > 0)
1809 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1810 cc_set_gctr_desc(req, desc, seq_size);
1811 /* process(gctr+ghash) */
1812 if (req_ctx->cryptlen)
1813 cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1814 cc_proc_gcm_result(req, desc, seq_size);
1815
1816 return 0;
1817}
1818
1819static int config_gcm_context(struct aead_request *req)
1820{
1821 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1822 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1823 struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1824 struct device *dev = drvdata_to_dev(ctx->drvdata);
1825
1826 unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1827 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1828 req->cryptlen :
1829 (req->cryptlen - ctx->authsize);
1830 __be32 counter = cpu_to_be32(2);
1831
1832 dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
1833 __func__, cryptlen, req->assoclen, ctx->authsize);
1834
1835 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1836
1837 memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1838
1839 memcpy(req->iv + 12, &counter, 4);
1840 memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1841
1842 counter = cpu_to_be32(1);
1843 memcpy(req->iv + 12, &counter, 4);
1844 memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1845
1846 if (!req_ctx->plaintext_authenticate_only) {
1847 __be64 temp64;
1848
1849 temp64 = cpu_to_be64(req->assoclen * 8);
1850 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1851 temp64 = cpu_to_be64(cryptlen * 8);
1852 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1853 } else {
1854 /* rfc4543=> all data(AAD,IV,Plain) are considered additional
1855 * data that is nothing is encrypted.
1856 */
1857 __be64 temp64;
1858
1859 temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
1860 cryptlen) * 8);
1861 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1862 temp64 = 0;
1863 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1864 }
1865
1866 return 0;
1867}
1868
1869static void cc_proc_rfc4_gcm(struct aead_request *req)
1870{
1871 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1872 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1873 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1874
1875 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1876 ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1877 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1878 GCM_BLOCK_RFC4_IV_SIZE);
1879 req->iv = areq_ctx->ctr_iv;
1880 req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1881}
1882
1883static int cc_proc_aead(struct aead_request *req,
1884 enum drv_crypto_direction direct)
1885{
1886 int rc = 0;
1887 int seq_len = 0;
1888 struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1889 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1890 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1891 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1892 struct device *dev = drvdata_to_dev(ctx->drvdata);
1893 struct cc_crypto_req cc_req = {};
1894
1895 dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1896 ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1897 ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1898 sg_virt(req->dst), req->dst->offset, req->cryptlen);
1899
1900 /* STAT_PHASE_0: Init and sanity checks */
1901
1902 /* Check data length according to mode */
1903 if (validate_data_size(ctx, direct, req)) {
1904 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1905 req->cryptlen, req->assoclen);
1906 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1907 return -EINVAL;
1908 }
1909
1910 /* Setup request structure */
1911 cc_req.user_cb = (void *)cc_aead_complete;
1912 cc_req.user_arg = (void *)req;
1913
1914 /* Setup request context */
1915 areq_ctx->gen_ctx.op_type = direct;
1916 areq_ctx->req_authsize = ctx->authsize;
1917 areq_ctx->cipher_mode = ctx->cipher_mode;
1918
1919 /* STAT_PHASE_1: Map buffers */
1920
1921 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1922 /* Build CTR IV - Copy nonce from last 4 bytes in
1923 * CTR key to first 4 bytes in CTR IV
1924 */
1925 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1926 CTR_RFC3686_NONCE_SIZE);
1927 if (!areq_ctx->backup_giv) /*User none-generated IV*/
1928 memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1929 req->iv, CTR_RFC3686_IV_SIZE);
1930 /* Initialize counter portion of counter block */
1931 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1932 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1933
1934 /* Replace with counter iv */
1935 req->iv = areq_ctx->ctr_iv;
1936 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1937 } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1938 (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1939 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1940 if (areq_ctx->ctr_iv != req->iv) {
1941 memcpy(areq_ctx->ctr_iv, req->iv,
1942 crypto_aead_ivsize(tfm));
1943 req->iv = areq_ctx->ctr_iv;
1944 }
1945 } else {
1946 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1947 }
1948
1949 if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1950 rc = config_ccm_adata(req);
1951 if (rc) {
1952 dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1953 rc);
1954 goto exit;
1955 }
1956 } else {
1957 areq_ctx->ccm_hdr_size = ccm_header_size_null;
1958 }
1959
1960 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1961 rc = config_gcm_context(req);
1962 if (rc) {
1963 dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1964 rc);
1965 goto exit;
1966 }
1967 }
1968
1969 rc = cc_map_aead_request(ctx->drvdata, req);
1970 if (rc) {
1971 dev_err(dev, "map_request() failed\n");
1972 goto exit;
1973 }
1974
1975 /* do we need to generate IV? */
1976 if (areq_ctx->backup_giv) {
1977 /* set the DMA mapped IV address*/
1978 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1979 cc_req.ivgen_dma_addr[0] =
1980 areq_ctx->gen_ctx.iv_dma_addr +
1981 CTR_RFC3686_NONCE_SIZE;
1982 cc_req.ivgen_dma_addr_len = 1;
1983 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1984 /* In ccm, the IV needs to exist both inside B0 and
1985 * inside the counter.It is also copied to iv_dma_addr
1986 * for other reasons (like returning it to the user).
1987 * So, using 3 (identical) IV outputs.
1988 */
1989 cc_req.ivgen_dma_addr[0] =
1990 areq_ctx->gen_ctx.iv_dma_addr +
1991 CCM_BLOCK_IV_OFFSET;
1992 cc_req.ivgen_dma_addr[1] =
1993 sg_dma_address(&areq_ctx->ccm_adata_sg) +
1994 CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
1995 cc_req.ivgen_dma_addr[2] =
1996 sg_dma_address(&areq_ctx->ccm_adata_sg) +
1997 CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
1998 cc_req.ivgen_dma_addr_len = 3;
1999 } else {
2000 cc_req.ivgen_dma_addr[0] =
2001 areq_ctx->gen_ctx.iv_dma_addr;
2002 cc_req.ivgen_dma_addr_len = 1;
2003 }
2004
2005 /* set the IV size (8/16 B long)*/
2006 cc_req.ivgen_size = crypto_aead_ivsize(tfm);
2007 }
2008
2009 /* STAT_PHASE_2: Create sequence */
2010
2011 /* Load MLLI tables to SRAM if necessary */
2012 cc_mlli_to_sram(req, desc, &seq_len);
2013
2014 /*TODO: move seq len by reference */
2015 switch (ctx->auth_mode) {
2016 case DRV_HASH_SHA1:
2017 case DRV_HASH_SHA256:
2018 cc_hmac_authenc(req, desc, &seq_len);
2019 break;
2020 case DRV_HASH_XCBC_MAC:
2021 cc_xcbc_authenc(req, desc, &seq_len);
2022 break;
2023 case DRV_HASH_NULL:
2024 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2025 cc_ccm(req, desc, &seq_len);
2026 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2027 cc_gcm(req, desc, &seq_len);
2028 break;
2029 default:
2030 dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2031 cc_unmap_aead_request(dev, req);
2032 rc = -ENOTSUPP;
2033 goto exit;
2034 }
2035
2036 /* STAT_PHASE_3: Lock HW and push sequence */
2037
2038 rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2039
2040 if (rc != -EINPROGRESS && rc != -EBUSY) {
2041 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2042 cc_unmap_aead_request(dev, req);
2043 }
2044
2045exit:
2046 return rc;
2047}
2048
2049static int cc_aead_encrypt(struct aead_request *req)
2050{
2051 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2052 int rc;
2053
2054 /* No generated IV required */
2055 areq_ctx->backup_iv = req->iv;
2056 areq_ctx->backup_giv = NULL;
2057 areq_ctx->is_gcm4543 = false;
2058
2059 areq_ctx->plaintext_authenticate_only = false;
2060
2061 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2062 if (rc != -EINPROGRESS && rc != -EBUSY)
2063 req->iv = areq_ctx->backup_iv;
2064
2065 return rc;
2066}
2067
2068static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2069{
2070 /* Very similar to cc_aead_encrypt() above. */
2071
2072 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2073 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2074 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2075 struct device *dev = drvdata_to_dev(ctx->drvdata);
2076 int rc = -EINVAL;
2077
2078 if (!valid_assoclen(req)) {
2079 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2080 goto out;
2081 }
2082
2083 /* No generated IV required */
2084 areq_ctx->backup_iv = req->iv;
2085 areq_ctx->backup_giv = NULL;
2086 areq_ctx->is_gcm4543 = true;
2087
2088 cc_proc_rfc4309_ccm(req);
2089
2090 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2091 if (rc != -EINPROGRESS && rc != -EBUSY)
2092 req->iv = areq_ctx->backup_iv;
2093out:
2094 return rc;
2095}
2096
2097static int cc_aead_decrypt(struct aead_request *req)
2098{
2099 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2100 int rc;
2101
2102 /* No generated IV required */
2103 areq_ctx->backup_iv = req->iv;
2104 areq_ctx->backup_giv = NULL;
2105 areq_ctx->is_gcm4543 = false;
2106
2107 areq_ctx->plaintext_authenticate_only = false;
2108
2109 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2110 if (rc != -EINPROGRESS && rc != -EBUSY)
2111 req->iv = areq_ctx->backup_iv;
2112
2113 return rc;
2114}
2115
2116static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2117{
2118 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2119 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2120 struct device *dev = drvdata_to_dev(ctx->drvdata);
2121 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2122 int rc = -EINVAL;
2123
2124 if (!valid_assoclen(req)) {
2125 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2126 goto out;
2127 }
2128
2129 /* No generated IV required */
2130 areq_ctx->backup_iv = req->iv;
2131 areq_ctx->backup_giv = NULL;
2132
2133 areq_ctx->is_gcm4543 = true;
2134 cc_proc_rfc4309_ccm(req);
2135
2136 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2137 if (rc != -EINPROGRESS && rc != -EBUSY)
2138 req->iv = areq_ctx->backup_iv;
2139
2140out:
2141 return rc;
2142}
2143
2144static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2145 unsigned int keylen)
2146{
2147 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2148 struct device *dev = drvdata_to_dev(ctx->drvdata);
2149
2150 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2151
2152 if (keylen < 4)
2153 return -EINVAL;
2154
2155 keylen -= 4;
2156 memcpy(ctx->ctr_nonce, key + keylen, 4);
2157
2158 return cc_aead_setkey(tfm, key, keylen);
2159}
2160
2161static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2162 unsigned int keylen)
2163{
2164 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2165 struct device *dev = drvdata_to_dev(ctx->drvdata);
2166
2167 dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
2168
2169 if (keylen < 4)
2170 return -EINVAL;
2171
2172 keylen -= 4;
2173 memcpy(ctx->ctr_nonce, key + keylen, 4);
2174
2175 return cc_aead_setkey(tfm, key, keylen);
2176}
2177
2178static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2179 unsigned int authsize)
2180{
2181 switch (authsize) {
2182 case 4:
2183 case 8:
2184 case 12:
2185 case 13:
2186 case 14:
2187 case 15:
2188 case 16:
2189 break;
2190 default:
2191 return -EINVAL;
2192 }
2193
2194 return cc_aead_setauthsize(authenc, authsize);
2195}
2196
2197static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2198 unsigned int authsize)
2199{
2200 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2201 struct device *dev = drvdata_to_dev(ctx->drvdata);
2202
2203 dev_dbg(dev, "authsize %d\n", authsize);
2204
2205 switch (authsize) {
2206 case 8:
2207 case 12:
2208 case 16:
2209 break;
2210 default:
2211 return -EINVAL;
2212 }
2213
2214 return cc_aead_setauthsize(authenc, authsize);
2215}
2216
2217static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2218 unsigned int authsize)
2219{
2220 struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2221 struct device *dev = drvdata_to_dev(ctx->drvdata);
2222
2223 dev_dbg(dev, "authsize %d\n", authsize);
2224
2225 if (authsize != 16)
2226 return -EINVAL;
2227
2228 return cc_aead_setauthsize(authenc, authsize);
2229}
2230
2231static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2232{
2233 /* Very similar to cc_aead_encrypt() above. */
2234
2235 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2236 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2237 struct device *dev = drvdata_to_dev(ctx->drvdata);
2238 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2239 int rc = -EINVAL;
2240
2241 if (!valid_assoclen(req)) {
2242 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2243 goto out;
2244 }
2245
2246 /* No generated IV required */
2247 areq_ctx->backup_iv = req->iv;
2248 areq_ctx->backup_giv = NULL;
2249
2250 areq_ctx->plaintext_authenticate_only = false;
2251
2252 cc_proc_rfc4_gcm(req);
2253 areq_ctx->is_gcm4543 = true;
2254
2255 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2256 if (rc != -EINPROGRESS && rc != -EBUSY)
2257 req->iv = areq_ctx->backup_iv;
2258out:
2259 return rc;
2260}
2261
2262static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2263{
2264 /* Very similar to cc_aead_encrypt() above. */
2265
2266 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2267 int rc;
2268
2269 //plaintext is not encryped with rfc4543
2270 areq_ctx->plaintext_authenticate_only = true;
2271
2272 /* No generated IV required */
2273 areq_ctx->backup_iv = req->iv;
2274 areq_ctx->backup_giv = NULL;
2275
2276 cc_proc_rfc4_gcm(req);
2277 areq_ctx->is_gcm4543 = true;
2278
2279 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2280 if (rc != -EINPROGRESS && rc != -EBUSY)
2281 req->iv = areq_ctx->backup_iv;
2282
2283 return rc;
2284}
2285
2286static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2287{
2288 /* Very similar to cc_aead_decrypt() above. */
2289
2290 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2291 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2292 struct device *dev = drvdata_to_dev(ctx->drvdata);
2293 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2294 int rc = -EINVAL;
2295
2296 if (!valid_assoclen(req)) {
2297 dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2298 goto out;
2299 }
2300
2301 /* No generated IV required */
2302 areq_ctx->backup_iv = req->iv;
2303 areq_ctx->backup_giv = NULL;
2304
2305 areq_ctx->plaintext_authenticate_only = false;
2306
2307 cc_proc_rfc4_gcm(req);
2308 areq_ctx->is_gcm4543 = true;
2309
2310 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2311 if (rc != -EINPROGRESS && rc != -EBUSY)
2312 req->iv = areq_ctx->backup_iv;
2313out:
2314 return rc;
2315}
2316
2317static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2318{
2319 /* Very similar to cc_aead_decrypt() above. */
2320
2321 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2322 int rc;
2323
2324 //plaintext is not decryped with rfc4543
2325 areq_ctx->plaintext_authenticate_only = true;
2326
2327 /* No generated IV required */
2328 areq_ctx->backup_iv = req->iv;
2329 areq_ctx->backup_giv = NULL;
2330
2331 cc_proc_rfc4_gcm(req);
2332 areq_ctx->is_gcm4543 = true;
2333
2334 rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2335 if (rc != -EINPROGRESS && rc != -EBUSY)
2336 req->iv = areq_ctx->backup_iv;
2337
2338 return rc;
2339}
2340
2341/* aead alg */
2342static struct cc_alg_template aead_algs[] = {
2343 {
2344 .name = "authenc(hmac(sha1),cbc(aes))",
2345 .driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2346 .blocksize = AES_BLOCK_SIZE,
2347 .type = CRYPTO_ALG_TYPE_AEAD,
2348 .template_aead = {
2349 .setkey = cc_aead_setkey,
2350 .setauthsize = cc_aead_setauthsize,
2351 .encrypt = cc_aead_encrypt,
2352 .decrypt = cc_aead_decrypt,
2353 .init = cc_aead_init,
2354 .exit = cc_aead_exit,
2355 .ivsize = AES_BLOCK_SIZE,
2356 .maxauthsize = SHA1_DIGEST_SIZE,
2357 },
2358 .cipher_mode = DRV_CIPHER_CBC,
2359 .flow_mode = S_DIN_to_AES,
2360 .auth_mode = DRV_HASH_SHA1,
2361 .min_hw_rev = CC_HW_REV_630,
2362 },
2363 {
2364 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2365 .driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2366 .blocksize = DES3_EDE_BLOCK_SIZE,
2367 .type = CRYPTO_ALG_TYPE_AEAD,
2368 .template_aead = {
2369 .setkey = cc_aead_setkey,
2370 .setauthsize = cc_aead_setauthsize,
2371 .encrypt = cc_aead_encrypt,
2372 .decrypt = cc_aead_decrypt,
2373 .init = cc_aead_init,
2374 .exit = cc_aead_exit,
2375 .ivsize = DES3_EDE_BLOCK_SIZE,
2376 .maxauthsize = SHA1_DIGEST_SIZE,
2377 },
2378 .cipher_mode = DRV_CIPHER_CBC,
2379 .flow_mode = S_DIN_to_DES,
2380 .auth_mode = DRV_HASH_SHA1,
2381 .min_hw_rev = CC_HW_REV_630,
2382 },
2383 {
2384 .name = "authenc(hmac(sha256),cbc(aes))",
2385 .driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2386 .blocksize = AES_BLOCK_SIZE,
2387 .type = CRYPTO_ALG_TYPE_AEAD,
2388 .template_aead = {
2389 .setkey = cc_aead_setkey,
2390 .setauthsize = cc_aead_setauthsize,
2391 .encrypt = cc_aead_encrypt,
2392 .decrypt = cc_aead_decrypt,
2393 .init = cc_aead_init,
2394 .exit = cc_aead_exit,
2395 .ivsize = AES_BLOCK_SIZE,
2396 .maxauthsize = SHA256_DIGEST_SIZE,
2397 },
2398 .cipher_mode = DRV_CIPHER_CBC,
2399 .flow_mode = S_DIN_to_AES,
2400 .auth_mode = DRV_HASH_SHA256,
2401 .min_hw_rev = CC_HW_REV_630,
2402 },
2403 {
2404 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2405 .driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2406 .blocksize = DES3_EDE_BLOCK_SIZE,
2407 .type = CRYPTO_ALG_TYPE_AEAD,
2408 .template_aead = {
2409 .setkey = cc_aead_setkey,
2410 .setauthsize = cc_aead_setauthsize,
2411 .encrypt = cc_aead_encrypt,
2412 .decrypt = cc_aead_decrypt,
2413 .init = cc_aead_init,
2414 .exit = cc_aead_exit,
2415 .ivsize = DES3_EDE_BLOCK_SIZE,
2416 .maxauthsize = SHA256_DIGEST_SIZE,
2417 },
2418 .cipher_mode = DRV_CIPHER_CBC,
2419 .flow_mode = S_DIN_to_DES,
2420 .auth_mode = DRV_HASH_SHA256,
2421 .min_hw_rev = CC_HW_REV_630,
2422 },
2423 {
2424 .name = "authenc(xcbc(aes),cbc(aes))",
2425 .driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2426 .blocksize = AES_BLOCK_SIZE,
2427 .type = CRYPTO_ALG_TYPE_AEAD,
2428 .template_aead = {
2429 .setkey = cc_aead_setkey,
2430 .setauthsize = cc_aead_setauthsize,
2431 .encrypt = cc_aead_encrypt,
2432 .decrypt = cc_aead_decrypt,
2433 .init = cc_aead_init,
2434 .exit = cc_aead_exit,
2435 .ivsize = AES_BLOCK_SIZE,
2436 .maxauthsize = AES_BLOCK_SIZE,
2437 },
2438 .cipher_mode = DRV_CIPHER_CBC,
2439 .flow_mode = S_DIN_to_AES,
2440 .auth_mode = DRV_HASH_XCBC_MAC,
2441 .min_hw_rev = CC_HW_REV_630,
2442 },
2443 {
2444 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2445 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2446 .blocksize = 1,
2447 .type = CRYPTO_ALG_TYPE_AEAD,
2448 .template_aead = {
2449 .setkey = cc_aead_setkey,
2450 .setauthsize = cc_aead_setauthsize,
2451 .encrypt = cc_aead_encrypt,
2452 .decrypt = cc_aead_decrypt,
2453 .init = cc_aead_init,
2454 .exit = cc_aead_exit,
2455 .ivsize = CTR_RFC3686_IV_SIZE,
2456 .maxauthsize = SHA1_DIGEST_SIZE,
2457 },
2458 .cipher_mode = DRV_CIPHER_CTR,
2459 .flow_mode = S_DIN_to_AES,
2460 .auth_mode = DRV_HASH_SHA1,
2461 .min_hw_rev = CC_HW_REV_630,
2462 },
2463 {
2464 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2465 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2466 .blocksize = 1,
2467 .type = CRYPTO_ALG_TYPE_AEAD,
2468 .template_aead = {
2469 .setkey = cc_aead_setkey,
2470 .setauthsize = cc_aead_setauthsize,
2471 .encrypt = cc_aead_encrypt,
2472 .decrypt = cc_aead_decrypt,
2473 .init = cc_aead_init,
2474 .exit = cc_aead_exit,
2475 .ivsize = CTR_RFC3686_IV_SIZE,
2476 .maxauthsize = SHA256_DIGEST_SIZE,
2477 },
2478 .cipher_mode = DRV_CIPHER_CTR,
2479 .flow_mode = S_DIN_to_AES,
2480 .auth_mode = DRV_HASH_SHA256,
2481 .min_hw_rev = CC_HW_REV_630,
2482 },
2483 {
2484 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2485 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2486 .blocksize = 1,
2487 .type = CRYPTO_ALG_TYPE_AEAD,
2488 .template_aead = {
2489 .setkey = cc_aead_setkey,
2490 .setauthsize = cc_aead_setauthsize,
2491 .encrypt = cc_aead_encrypt,
2492 .decrypt = cc_aead_decrypt,
2493 .init = cc_aead_init,
2494 .exit = cc_aead_exit,
2495 .ivsize = CTR_RFC3686_IV_SIZE,
2496 .maxauthsize = AES_BLOCK_SIZE,
2497 },
2498 .cipher_mode = DRV_CIPHER_CTR,
2499 .flow_mode = S_DIN_to_AES,
2500 .auth_mode = DRV_HASH_XCBC_MAC,
2501 .min_hw_rev = CC_HW_REV_630,
2502 },
2503 {
2504 .name = "ccm(aes)",
2505 .driver_name = "ccm-aes-ccree",
2506 .blocksize = 1,
2507 .type = CRYPTO_ALG_TYPE_AEAD,
2508 .template_aead = {
2509 .setkey = cc_aead_setkey,
2510 .setauthsize = cc_ccm_setauthsize,
2511 .encrypt = cc_aead_encrypt,
2512 .decrypt = cc_aead_decrypt,
2513 .init = cc_aead_init,
2514 .exit = cc_aead_exit,
2515 .ivsize = AES_BLOCK_SIZE,
2516 .maxauthsize = AES_BLOCK_SIZE,
2517 },
2518 .cipher_mode = DRV_CIPHER_CCM,
2519 .flow_mode = S_DIN_to_AES,
2520 .auth_mode = DRV_HASH_NULL,
2521 .min_hw_rev = CC_HW_REV_630,
2522 },
2523 {
2524 .name = "rfc4309(ccm(aes))",
2525 .driver_name = "rfc4309-ccm-aes-ccree",
2526 .blocksize = 1,
2527 .type = CRYPTO_ALG_TYPE_AEAD,
2528 .template_aead = {
2529 .setkey = cc_rfc4309_ccm_setkey,
2530 .setauthsize = cc_rfc4309_ccm_setauthsize,
2531 .encrypt = cc_rfc4309_ccm_encrypt,
2532 .decrypt = cc_rfc4309_ccm_decrypt,
2533 .init = cc_aead_init,
2534 .exit = cc_aead_exit,
2535 .ivsize = CCM_BLOCK_IV_SIZE,
2536 .maxauthsize = AES_BLOCK_SIZE,
2537 },
2538 .cipher_mode = DRV_CIPHER_CCM,
2539 .flow_mode = S_DIN_to_AES,
2540 .auth_mode = DRV_HASH_NULL,
2541 .min_hw_rev = CC_HW_REV_630,
2542 },
2543 {
2544 .name = "gcm(aes)",
2545 .driver_name = "gcm-aes-ccree",
2546 .blocksize = 1,
2547 .type = CRYPTO_ALG_TYPE_AEAD,
2548 .template_aead = {
2549 .setkey = cc_aead_setkey,
2550 .setauthsize = cc_gcm_setauthsize,
2551 .encrypt = cc_aead_encrypt,
2552 .decrypt = cc_aead_decrypt,
2553 .init = cc_aead_init,
2554 .exit = cc_aead_exit,
2555 .ivsize = 12,
2556 .maxauthsize = AES_BLOCK_SIZE,
2557 },
2558 .cipher_mode = DRV_CIPHER_GCTR,
2559 .flow_mode = S_DIN_to_AES,
2560 .auth_mode = DRV_HASH_NULL,
2561 .min_hw_rev = CC_HW_REV_630,
2562 },
2563 {
2564 .name = "rfc4106(gcm(aes))",
2565 .driver_name = "rfc4106-gcm-aes-ccree",
2566 .blocksize = 1,
2567 .type = CRYPTO_ALG_TYPE_AEAD,
2568 .template_aead = {
2569 .setkey = cc_rfc4106_gcm_setkey,
2570 .setauthsize = cc_rfc4106_gcm_setauthsize,
2571 .encrypt = cc_rfc4106_gcm_encrypt,
2572 .decrypt = cc_rfc4106_gcm_decrypt,
2573 .init = cc_aead_init,
2574 .exit = cc_aead_exit,
2575 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2576 .maxauthsize = AES_BLOCK_SIZE,
2577 },
2578 .cipher_mode = DRV_CIPHER_GCTR,
2579 .flow_mode = S_DIN_to_AES,
2580 .auth_mode = DRV_HASH_NULL,
2581 .min_hw_rev = CC_HW_REV_630,
2582 },
2583 {
2584 .name = "rfc4543(gcm(aes))",
2585 .driver_name = "rfc4543-gcm-aes-ccree",
2586 .blocksize = 1,
2587 .type = CRYPTO_ALG_TYPE_AEAD,
2588 .template_aead = {
2589 .setkey = cc_rfc4543_gcm_setkey,
2590 .setauthsize = cc_rfc4543_gcm_setauthsize,
2591 .encrypt = cc_rfc4543_gcm_encrypt,
2592 .decrypt = cc_rfc4543_gcm_decrypt,
2593 .init = cc_aead_init,
2594 .exit = cc_aead_exit,
2595 .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2596 .maxauthsize = AES_BLOCK_SIZE,
2597 },
2598 .cipher_mode = DRV_CIPHER_GCTR,
2599 .flow_mode = S_DIN_to_AES,
2600 .auth_mode = DRV_HASH_NULL,
2601 .min_hw_rev = CC_HW_REV_630,
2602 },
2603};
2604
2605static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2606 struct device *dev)
2607{
2608 struct cc_crypto_alg *t_alg;
2609 struct aead_alg *alg;
2610
2611 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2612 if (!t_alg)
2613 return ERR_PTR(-ENOMEM);
2614
2615 alg = &tmpl->template_aead;
2616
2617 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2618 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2619 tmpl->driver_name);
2620 alg->base.cra_module = THIS_MODULE;
2621 alg->base.cra_priority = CC_CRA_PRIO;
2622
2623 alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2624 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2625 tmpl->type;
2626 alg->init = cc_aead_init;
2627 alg->exit = cc_aead_exit;
2628
2629 t_alg->aead_alg = *alg;
2630
2631 t_alg->cipher_mode = tmpl->cipher_mode;
2632 t_alg->flow_mode = tmpl->flow_mode;
2633 t_alg->auth_mode = tmpl->auth_mode;
2634
2635 return t_alg;
2636}
2637
2638int cc_aead_free(struct cc_drvdata *drvdata)
2639{
2640 struct cc_crypto_alg *t_alg, *n;
2641 struct cc_aead_handle *aead_handle =
2642 (struct cc_aead_handle *)drvdata->aead_handle;
2643
2644 if (aead_handle) {
2645 /* Remove registered algs */
2646 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2647 entry) {
2648 crypto_unregister_aead(&t_alg->aead_alg);
2649 list_del(&t_alg->entry);
2650 kfree(t_alg);
2651 }
2652 kfree(aead_handle);
2653 drvdata->aead_handle = NULL;
2654 }
2655
2656 return 0;
2657}
2658
2659int cc_aead_alloc(struct cc_drvdata *drvdata)
2660{
2661 struct cc_aead_handle *aead_handle;
2662 struct cc_crypto_alg *t_alg;
2663 int rc = -ENOMEM;
2664 int alg;
2665 struct device *dev = drvdata_to_dev(drvdata);
2666
2667 aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2668 if (!aead_handle) {
2669 rc = -ENOMEM;
2670 goto fail0;
2671 }
2672
2673 INIT_LIST_HEAD(&aead_handle->aead_list);
2674 drvdata->aead_handle = aead_handle;
2675
2676 aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2677 MAX_HMAC_DIGEST_SIZE);
2678
2679 if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2680 dev_err(dev, "SRAM pool exhausted\n");
2681 rc = -ENOMEM;
2682 goto fail1;
2683 }
2684
2685 /* Linux crypto */
2686 for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2687 if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
2688 continue;
2689
2690 t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2691 if (IS_ERR(t_alg)) {
2692 rc = PTR_ERR(t_alg);
2693 dev_err(dev, "%s alg allocation failed\n",
2694 aead_algs[alg].driver_name);
2695 goto fail1;
2696 }
2697 t_alg->drvdata = drvdata;
2698 rc = crypto_register_aead(&t_alg->aead_alg);
2699 if (rc) {
2700 dev_err(dev, "%s alg registration failed\n",
2701 t_alg->aead_alg.base.cra_driver_name);
2702 goto fail2;
2703 } else {
2704 list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2705 dev_dbg(dev, "Registered %s\n",
2706 t_alg->aead_alg.base.cra_driver_name);
2707 }
2708 }
2709
2710 return 0;
2711
2712fail2:
2713 kfree(t_alg);
2714fail1:
2715 cc_aead_free(drvdata);
2716fail0:
2717 return rc;
2718}
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
new file mode 100644
index 000000000000..5edf3b351fa4
--- /dev/null
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -0,0 +1,109 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4/* \file cc_aead.h
5 * ARM CryptoCell AEAD Crypto API
6 */
7
8#ifndef __CC_AEAD_H__
9#define __CC_AEAD_H__
10
11#include <linux/kernel.h>
12#include <crypto/algapi.h>
13#include <crypto/ctr.h>
14
15/* mac_cmp - HW writes 8 B but all bytes hold the same value */
16#define ICV_CMP_SIZE 8
17#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
18#define MAX_MAC_SIZE SHA256_DIGEST_SIZE
19
20/* defines for AES GCM configuration buffer */
21#define GCM_BLOCK_LEN_SIZE 8
22
23#define GCM_BLOCK_RFC4_IV_OFFSET 4
24#define GCM_BLOCK_RFC4_IV_SIZE 8 /* IV size for rfc's */
25#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
26#define GCM_BLOCK_RFC4_NONCE_SIZE 4
27
28/* Offsets into AES CCM configuration buffer */
29#define CCM_B0_OFFSET 0
30#define CCM_A0_OFFSET 16
31#define CCM_CTR_COUNT_0_OFFSET 32
32/* CCM B0 and CTR_COUNT constants. */
33#define CCM_BLOCK_NONCE_OFFSET 1 /* Nonce offset inside B0 and CTR_COUNT */
34#define CCM_BLOCK_NONCE_SIZE 3 /* Nonce size inside B0 and CTR_COUNT */
35#define CCM_BLOCK_IV_OFFSET 4 /* IV offset inside B0 and CTR_COUNT */
36#define CCM_BLOCK_IV_SIZE 8 /* IV size inside B0 and CTR_COUNT */
37
38enum aead_ccm_header_size {
39 ccm_header_size_null = -1,
40 ccm_header_size_zero = 0,
41 ccm_header_size_2 = 2,
42 ccm_header_size_6 = 6,
43 ccm_header_size_max = S32_MAX
44};
45
46struct aead_req_ctx {
47 /* Allocate cache line although only 4 bytes are needed to
48 * assure next field falls @ cache line
49 * Used for both: digest HW compare and CCM/GCM MAC value
50 */
51 u8 mac_buf[MAX_MAC_SIZE] ____cacheline_aligned;
52 u8 ctr_iv[AES_BLOCK_SIZE] ____cacheline_aligned;
53
54 //used in gcm
55 u8 gcm_iv_inc1[AES_BLOCK_SIZE] ____cacheline_aligned;
56 u8 gcm_iv_inc2[AES_BLOCK_SIZE] ____cacheline_aligned;
57 u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
58 struct {
59 u8 len_a[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
60 u8 len_c[GCM_BLOCK_LEN_SIZE];
61 } gcm_len_block;
62
63 u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
64 /* HW actual size input */
65 unsigned int hw_iv_size ____cacheline_aligned;
66 /* used to prevent cache coherence problem */
67 u8 backup_mac[MAX_MAC_SIZE];
68 u8 *backup_iv; /*store iv for generated IV flow*/
69 u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
70 dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
71 /* buffer for internal ccm configurations */
72 dma_addr_t ccm_iv0_dma_addr;
73 dma_addr_t icv_dma_addr; /* Phys. address of ICV */
74
75 //used in gcm
76 /* buffer for internal gcm configurations */
77 dma_addr_t gcm_iv_inc1_dma_addr;
78 /* buffer for internal gcm configurations */
79 dma_addr_t gcm_iv_inc2_dma_addr;
80 dma_addr_t hkey_dma_addr; /* Phys. address of hkey */
81 dma_addr_t gcm_block_len_dma_addr; /* Phys. address of gcm block len */
82 bool is_gcm4543;
83
84 u8 *icv_virt_addr; /* Virt. address of ICV */
85 struct async_gen_req_ctx gen_ctx;
86 struct cc_mlli assoc;
87 struct cc_mlli src;
88 struct cc_mlli dst;
89 struct scatterlist *src_sgl;
90 struct scatterlist *dst_sgl;
91 unsigned int src_offset;
92 unsigned int dst_offset;
93 enum cc_req_dma_buf_type assoc_buff_type;
94 enum cc_req_dma_buf_type data_buff_type;
95 struct mlli_params mlli_params;
96 unsigned int cryptlen;
97 struct scatterlist ccm_adata_sg;
98 enum aead_ccm_header_size ccm_hdr_size;
99 unsigned int req_authsize;
100 enum drv_cipher_mode cipher_mode;
101 bool is_icv_fragmented;
102 bool is_single_pass;
103 bool plaintext_authenticate_only; //for gcm_rfc4543
104};
105
106int cc_aead_alloc(struct cc_drvdata *drvdata);
107int cc_aead_free(struct cc_drvdata *drvdata);
108
109#endif /*__CC_AEAD_H__*/
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
new file mode 100644
index 000000000000..b32577477b4c
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -0,0 +1,1651 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <crypto/internal/aead.h>
5#include <crypto/authenc.h>
6#include <crypto/scatterwalk.h>
7#include <linux/dmapool.h>
8#include <linux/dma-mapping.h>
9
10#include "cc_buffer_mgr.h"
11#include "cc_lli_defs.h"
12#include "cc_cipher.h"
13#include "cc_hash.h"
14#include "cc_aead.h"
15
16enum dma_buffer_type {
17 DMA_NULL_TYPE = -1,
18 DMA_SGL_TYPE = 1,
19 DMA_BUFF_TYPE = 2,
20};
21
22struct buff_mgr_handle {
23 struct dma_pool *mlli_buffs_pool;
24};
25
26union buffer_array_entry {
27 struct scatterlist *sgl;
28 dma_addr_t buffer_dma;
29};
30
31struct buffer_array {
32 unsigned int num_of_buffers;
33 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
34 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
35 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
36 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
37 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI];
38 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
39 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
40};
41
42static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
43{
44 switch (type) {
45 case CC_DMA_BUF_NULL:
46 return "BUF_NULL";
47 case CC_DMA_BUF_DLLI:
48 return "BUF_DLLI";
49 case CC_DMA_BUF_MLLI:
50 return "BUF_MLLI";
51 default:
52 return "BUF_INVALID";
53 }
54}
55
56/**
57 * cc_copy_mac() - Copy MAC to temporary location
58 *
59 * @dev: device object
60 * @req: aead request object
61 * @dir: [IN] copy from/to sgl
62 */
63static void cc_copy_mac(struct device *dev, struct aead_request *req,
64 enum cc_sg_cpy_direct dir)
65{
66 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
67 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
68 u32 skip = req->assoclen + req->cryptlen;
69
70 if (areq_ctx->is_gcm4543)
71 skip += crypto_aead_ivsize(tfm);
72
73 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
74 (skip - areq_ctx->req_authsize), skip, dir);
75}
76
77/**
78 * cc_get_sgl_nents() - Get scatterlist number of entries.
79 *
80 * @sg_list: SG list
81 * @nbytes: [IN] Total SGL data bytes.
82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
83 */
84static unsigned int cc_get_sgl_nents(struct device *dev,
85 struct scatterlist *sg_list,
86 unsigned int nbytes, u32 *lbytes,
87 bool *is_chained)
88{
89 unsigned int nents = 0;
90
91 while (nbytes && sg_list) {
92 if (sg_list->length) {
93 nents++;
94 /* get the number of bytes in the last entry */
95 *lbytes = nbytes;
96 nbytes -= (sg_list->length > nbytes) ?
97 nbytes : sg_list->length;
98 sg_list = sg_next(sg_list);
99 } else {
100 sg_list = (struct scatterlist *)sg_page(sg_list);
101 if (is_chained)
102 *is_chained = true;
103 }
104 }
105 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
106 return nents;
107}
108
109/**
110 * cc_zero_sgl() - Zero scatter scatter list data.
111 *
112 * @sgl:
113 */
114void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
115{
116 struct scatterlist *current_sg = sgl;
117 int sg_index = 0;
118
119 while (sg_index <= data_len) {
120 if (!current_sg) {
121 /* reached the end of the sgl --> just return back */
122 return;
123 }
124 memset(sg_virt(current_sg), 0, current_sg->length);
125 sg_index += current_sg->length;
126 current_sg = sg_next(current_sg);
127 }
128}
129
130/**
131 * cc_copy_sg_portion() - Copy scatter list data,
132 * from to_skip to end, to dest and vice versa
133 *
134 * @dest:
135 * @sg:
136 * @to_skip:
137 * @end:
138 * @direct:
139 */
140void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
141 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
142{
143 u32 nents, lbytes;
144
145 nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
146 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
147 (direct == CC_SG_TO_BUF));
148}
149
150static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
151 u32 buff_size, u32 *curr_nents,
152 u32 **mlli_entry_pp)
153{
154 u32 *mlli_entry_p = *mlli_entry_pp;
155 u32 new_nents;
156
157 /* Verify there is no memory overflow*/
158 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
159 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
160 return -ENOMEM;
161
162 /*handle buffer longer than 64 kbytes */
163 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
164 cc_lli_set_addr(mlli_entry_p, buff_dma);
165 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
166 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
167 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
168 mlli_entry_p[LLI_WORD1_OFFSET]);
169 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
170 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
171 mlli_entry_p = mlli_entry_p + 2;
172 (*curr_nents)++;
173 }
174 /*Last entry */
175 cc_lli_set_addr(mlli_entry_p, buff_dma);
176 cc_lli_set_size(mlli_entry_p, buff_size);
177 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
178 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
179 mlli_entry_p[LLI_WORD1_OFFSET]);
180 mlli_entry_p = mlli_entry_p + 2;
181 *mlli_entry_pp = mlli_entry_p;
182 (*curr_nents)++;
183 return 0;
184}
185
186static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
187 u32 sgl_data_len, u32 sgl_offset,
188 u32 *curr_nents, u32 **mlli_entry_pp)
189{
190 struct scatterlist *curr_sgl = sgl;
191 u32 *mlli_entry_p = *mlli_entry_pp;
192 s32 rc = 0;
193
194 for ( ; (curr_sgl && sgl_data_len);
195 curr_sgl = sg_next(curr_sgl)) {
196 u32 entry_data_len =
197 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
198 sg_dma_len(curr_sgl) - sgl_offset :
199 sgl_data_len;
200 sgl_data_len -= entry_data_len;
201 rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
202 sgl_offset, entry_data_len,
203 curr_nents, &mlli_entry_p);
204 if (rc)
205 return rc;
206
207 sgl_offset = 0;
208 }
209 *mlli_entry_pp = mlli_entry_p;
210 return 0;
211}
212
213static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
214 struct mlli_params *mlli_params, gfp_t flags)
215{
216 u32 *mlli_p;
217 u32 total_nents = 0, prev_total_nents = 0;
218 int rc = 0, i;
219
220 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
221
222 /* Allocate memory from the pointed pool */
223 mlli_params->mlli_virt_addr =
224 dma_pool_alloc(mlli_params->curr_pool, flags,
225 &mlli_params->mlli_dma_addr);
226 if (!mlli_params->mlli_virt_addr) {
227 dev_err(dev, "dma_pool_alloc() failed\n");
228 rc = -ENOMEM;
229 goto build_mlli_exit;
230 }
231 /* Point to start of MLLI */
232 mlli_p = (u32 *)mlli_params->mlli_virt_addr;
233 /* go over all SG's and link it to one MLLI table */
234 for (i = 0; i < sg_data->num_of_buffers; i++) {
235 union buffer_array_entry *entry = &sg_data->entry[i];
236 u32 tot_len = sg_data->total_data_len[i];
237 u32 offset = sg_data->offset[i];
238
239 if (sg_data->type[i] == DMA_SGL_TYPE)
240 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len,
241 offset, &total_nents,
242 &mlli_p);
243 else /*DMA_BUFF_TYPE*/
244 rc = cc_render_buff_to_mlli(dev, entry->buffer_dma,
245 tot_len, &total_nents,
246 &mlli_p);
247 if (rc)
248 return rc;
249
250 /* set last bit in the current table */
251 if (sg_data->mlli_nents[i]) {
252 /*Calculate the current MLLI table length for the
253 *length field in the descriptor
254 */
255 *sg_data->mlli_nents[i] +=
256 (total_nents - prev_total_nents);
257 prev_total_nents = total_nents;
258 }
259 }
260
261 /* Set MLLI size for the bypass operation */
262 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
263
264 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
265 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
266 mlli_params->mlli_len);
267
268build_mlli_exit:
269 return rc;
270}
271
272static void cc_add_buffer_entry(struct device *dev,
273 struct buffer_array *sgl_data,
274 dma_addr_t buffer_dma, unsigned int buffer_len,
275 bool is_last_entry, u32 *mlli_nents)
276{
277 unsigned int index = sgl_data->num_of_buffers;
278
279 dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
280 index, &buffer_dma, buffer_len, is_last_entry);
281 sgl_data->nents[index] = 1;
282 sgl_data->entry[index].buffer_dma = buffer_dma;
283 sgl_data->offset[index] = 0;
284 sgl_data->total_data_len[index] = buffer_len;
285 sgl_data->type[index] = DMA_BUFF_TYPE;
286 sgl_data->is_last[index] = is_last_entry;
287 sgl_data->mlli_nents[index] = mlli_nents;
288 if (sgl_data->mlli_nents[index])
289 *sgl_data->mlli_nents[index] = 0;
290 sgl_data->num_of_buffers++;
291}
292
293static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
294 unsigned int nents, struct scatterlist *sgl,
295 unsigned int data_len, unsigned int data_offset,
296 bool is_last_table, u32 *mlli_nents)
297{
298 unsigned int index = sgl_data->num_of_buffers;
299
300 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
301 index, nents, sgl, data_len, is_last_table);
302 sgl_data->nents[index] = nents;
303 sgl_data->entry[index].sgl = sgl;
304 sgl_data->offset[index] = data_offset;
305 sgl_data->total_data_len[index] = data_len;
306 sgl_data->type[index] = DMA_SGL_TYPE;
307 sgl_data->is_last[index] = is_last_table;
308 sgl_data->mlli_nents[index] = mlli_nents;
309 if (sgl_data->mlli_nents[index])
310 *sgl_data->mlli_nents[index] = 0;
311 sgl_data->num_of_buffers++;
312}
313
314static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
315 enum dma_data_direction direction)
316{
317 u32 i, j;
318 struct scatterlist *l_sg = sg;
319
320 for (i = 0; i < nents; i++) {
321 if (!l_sg)
322 break;
323 if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
324 dev_err(dev, "dma_map_page() sg buffer failed\n");
325 goto err;
326 }
327 l_sg = sg_next(l_sg);
328 }
329 return nents;
330
331err:
332 /* Restore mapped parts */
333 for (j = 0; j < i; j++) {
334 if (!sg)
335 break;
336 dma_unmap_sg(dev, sg, 1, direction);
337 sg = sg_next(sg);
338 }
339 return 0;
340}
341
342static int cc_map_sg(struct device *dev, struct scatterlist *sg,
343 unsigned int nbytes, int direction, u32 *nents,
344 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
345{
346 bool is_chained = false;
347
348 if (sg_is_last(sg)) {
349 /* One entry only case -set to DLLI */
350 if (dma_map_sg(dev, sg, 1, direction) != 1) {
351 dev_err(dev, "dma_map_sg() single buffer failed\n");
352 return -ENOMEM;
353 }
354 dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
355 &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
356 sg->offset, sg->length);
357 *lbytes = nbytes;
358 *nents = 1;
359 *mapped_nents = 1;
360 } else { /*sg_is_last*/
361 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
362 &is_chained);
363 if (*nents > max_sg_nents) {
364 *nents = 0;
365 dev_err(dev, "Too many fragments. current %d max %d\n",
366 *nents, max_sg_nents);
367 return -ENOMEM;
368 }
369 if (!is_chained) {
370 /* In case of mmu the number of mapped nents might
371 * be changed from the original sgl nents
372 */
373 *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
374 if (*mapped_nents == 0) {
375 *nents = 0;
376 dev_err(dev, "dma_map_sg() sg buffer failed\n");
377 return -ENOMEM;
378 }
379 } else {
380 /*In this case the driver maps entry by entry so it
381 * must have the same nents before and after map
382 */
383 *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
384 direction);
385 if (*mapped_nents != *nents) {
386 *nents = *mapped_nents;
387 dev_err(dev, "dma_map_sg() sg buffer failed\n");
388 return -ENOMEM;
389 }
390 }
391 }
392
393 return 0;
394}
395
396static int
397cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
398 u8 *config_data, struct buffer_array *sg_data,
399 unsigned int assoclen)
400{
401 dev_dbg(dev, " handle additional data config set to DLLI\n");
402 /* create sg for the current buffer */
403 sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
404 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
405 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
406 dev_err(dev, "dma_map_sg() config buffer failed\n");
407 return -ENOMEM;
408 }
409 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
410 &sg_dma_address(&areq_ctx->ccm_adata_sg),
411 sg_page(&areq_ctx->ccm_adata_sg),
412 sg_virt(&areq_ctx->ccm_adata_sg),
413 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
414 /* prepare for case of MLLI */
415 if (assoclen > 0) {
416 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
417 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
418 0, false, NULL);
419 }
420 return 0;
421}
422
423static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
424 u8 *curr_buff, u32 curr_buff_cnt,
425 struct buffer_array *sg_data)
426{
427 dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
428 /* create sg for the current buffer */
429 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
430 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
431 dev_err(dev, "dma_map_sg() src buffer failed\n");
432 return -ENOMEM;
433 }
434 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
435 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
436 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
437 areq_ctx->buff_sg->length);
438 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
439 areq_ctx->curr_sg = areq_ctx->buff_sg;
440 areq_ctx->in_nents = 0;
441 /* prepare for case of MLLI */
442 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
443 false, NULL);
444 return 0;
445}
446
447void cc_unmap_cipher_request(struct device *dev, void *ctx,
448 unsigned int ivsize, struct scatterlist *src,
449 struct scatterlist *dst)
450{
451 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
452
453 if (req_ctx->gen_ctx.iv_dma_addr) {
454 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
455 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
456 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
457 ivsize,
458 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
459 DMA_TO_DEVICE);
460 }
461 /* Release pool */
462 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
463 req_ctx->mlli_params.mlli_virt_addr) {
464 dma_pool_free(req_ctx->mlli_params.curr_pool,
465 req_ctx->mlli_params.mlli_virt_addr,
466 req_ctx->mlli_params.mlli_dma_addr);
467 }
468
469 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
470 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
471
472 if (src != dst) {
473 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
474 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
475 }
476}
477
478int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
479 unsigned int ivsize, unsigned int nbytes,
480 void *info, struct scatterlist *src,
481 struct scatterlist *dst, gfp_t flags)
482{
483 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
484 struct mlli_params *mlli_params = &req_ctx->mlli_params;
485 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
486 struct device *dev = drvdata_to_dev(drvdata);
487 struct buffer_array sg_data;
488 u32 dummy = 0;
489 int rc = 0;
490 u32 mapped_nents = 0;
491
492 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
493 mlli_params->curr_pool = NULL;
494 sg_data.num_of_buffers = 0;
495
496 /* Map IV buffer */
497 if (ivsize) {
498 dump_byte_array("iv", (u8 *)info, ivsize);
499 req_ctx->gen_ctx.iv_dma_addr =
500 dma_map_single(dev, (void *)info,
501 ivsize,
502 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
503 DMA_TO_DEVICE);
504 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
505 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
506 ivsize, info);
507 return -ENOMEM;
508 }
509 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
510 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
511 } else {
512 req_ctx->gen_ctx.iv_dma_addr = 0;
513 }
514
515 /* Map the src SGL */
516 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
517 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
518 if (rc) {
519 rc = -ENOMEM;
520 goto cipher_exit;
521 }
522 if (mapped_nents > 1)
523 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
524
525 if (src == dst) {
526 /* Handle inplace operation */
527 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
528 req_ctx->out_nents = 0;
529 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
530 nbytes, 0, true,
531 &req_ctx->in_mlli_nents);
532 }
533 } else {
534 /* Map the dst sg */
535 if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
536 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
537 &dummy, &mapped_nents)) {
538 rc = -ENOMEM;
539 goto cipher_exit;
540 }
541 if (mapped_nents > 1)
542 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
543
544 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
545 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
546 nbytes, 0, true,
547 &req_ctx->in_mlli_nents);
548 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
549 nbytes, 0, true,
550 &req_ctx->out_mlli_nents);
551 }
552 }
553
554 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
555 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
556 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
557 if (rc)
558 goto cipher_exit;
559 }
560
561 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
562 cc_dma_buf_type(req_ctx->dma_buf_type));
563
564 return 0;
565
566cipher_exit:
567 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
568 return rc;
569}
570
571void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
572{
573 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
574 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
575 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
576 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
577 u32 dummy;
578 bool chained;
579 u32 size_to_unmap = 0;
580
581 if (areq_ctx->mac_buf_dma_addr) {
582 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
583 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
584 }
585
586 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
587 if (areq_ctx->hkey_dma_addr) {
588 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
589 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
590 }
591
592 if (areq_ctx->gcm_block_len_dma_addr) {
593 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
594 AES_BLOCK_SIZE, DMA_TO_DEVICE);
595 }
596
597 if (areq_ctx->gcm_iv_inc1_dma_addr) {
598 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
599 AES_BLOCK_SIZE, DMA_TO_DEVICE);
600 }
601
602 if (areq_ctx->gcm_iv_inc2_dma_addr) {
603 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
604 AES_BLOCK_SIZE, DMA_TO_DEVICE);
605 }
606 }
607
608 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
609 if (areq_ctx->ccm_iv0_dma_addr) {
610 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
611 AES_BLOCK_SIZE, DMA_TO_DEVICE);
612 }
613
614 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
615 }
616 if (areq_ctx->gen_ctx.iv_dma_addr) {
617 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
618 hw_iv_size, DMA_BIDIRECTIONAL);
619 }
620
621 /*In case a pool was set, a table was
622 *allocated and should be released
623 */
624 if (areq_ctx->mlli_params.curr_pool) {
625 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
626 &areq_ctx->mlli_params.mlli_dma_addr,
627 areq_ctx->mlli_params.mlli_virt_addr);
628 dma_pool_free(areq_ctx->mlli_params.curr_pool,
629 areq_ctx->mlli_params.mlli_virt_addr,
630 areq_ctx->mlli_params.mlli_dma_addr);
631 }
632
633 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
634 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
635 req->assoclen, req->cryptlen);
636 size_to_unmap = req->assoclen + req->cryptlen;
637 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
638 size_to_unmap += areq_ctx->req_authsize;
639 if (areq_ctx->is_gcm4543)
640 size_to_unmap += crypto_aead_ivsize(tfm);
641
642 dma_unmap_sg(dev, req->src,
643 cc_get_sgl_nents(dev, req->src, size_to_unmap,
644 &dummy, &chained),
645 DMA_BIDIRECTIONAL);
646 if (req->src != req->dst) {
647 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
648 sg_virt(req->dst));
649 dma_unmap_sg(dev, req->dst,
650 cc_get_sgl_nents(dev, req->dst, size_to_unmap,
651 &dummy, &chained),
652 DMA_BIDIRECTIONAL);
653 }
654 if (drvdata->coherent &&
655 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
656 req->src == req->dst) {
657 /* copy back mac from temporary location to deal with possible
658 * data memory overriding that caused by cache coherence
659 * problem.
660 */
661 cc_copy_mac(dev, req, CC_SG_FROM_BUF);
662 }
663}
664
665static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
666 unsigned int sgl_nents, unsigned int authsize,
667 u32 last_entry_data_size,
668 bool *is_icv_fragmented)
669{
670 unsigned int icv_max_size = 0;
671 unsigned int icv_required_size = authsize > last_entry_data_size ?
672 (authsize - last_entry_data_size) :
673 authsize;
674 unsigned int nents;
675 unsigned int i;
676
677 if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
678 *is_icv_fragmented = false;
679 return 0;
680 }
681
682 for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
683 if (!sgl)
684 break;
685 sgl = sg_next(sgl);
686 }
687
688 if (sgl)
689 icv_max_size = sgl->length;
690
691 if (last_entry_data_size > authsize) {
692 /* ICV attached to data in last entry (not fragmented!) */
693 nents = 0;
694 *is_icv_fragmented = false;
695 } else if (last_entry_data_size == authsize) {
696 /* ICV placed in whole last entry (not fragmented!) */
697 nents = 1;
698 *is_icv_fragmented = false;
699 } else if (icv_max_size > icv_required_size) {
700 nents = 1;
701 *is_icv_fragmented = true;
702 } else if (icv_max_size == icv_required_size) {
703 nents = 2;
704 *is_icv_fragmented = true;
705 } else {
706 dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
707 MAX_ICV_NENTS_SUPPORTED);
708 nents = -1; /*unsupported*/
709 }
710 dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
711 (*is_icv_fragmented ? "true" : "false"), nents);
712
713 return nents;
714}
715
716static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
717 struct aead_request *req,
718 struct buffer_array *sg_data,
719 bool is_last, bool do_chain)
720{
721 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
722 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
723 struct device *dev = drvdata_to_dev(drvdata);
724 int rc = 0;
725
726 if (!req->iv) {
727 areq_ctx->gen_ctx.iv_dma_addr = 0;
728 goto chain_iv_exit;
729 }
730
731 areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
732 hw_iv_size,
733 DMA_BIDIRECTIONAL);
734 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
735 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
736 hw_iv_size, req->iv);
737 rc = -ENOMEM;
738 goto chain_iv_exit;
739 }
740
741 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
742 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
743 // TODO: what about CTR?? ask Ron
744 if (do_chain && areq_ctx->plaintext_authenticate_only) {
745 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
746 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
747 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
748 /* Chain to given list */
749 cc_add_buffer_entry(dev, sg_data,
750 (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
751 iv_size_to_authenc, is_last,
752 &areq_ctx->assoc.mlli_nents);
753 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
754 }
755
756chain_iv_exit:
757 return rc;
758}
759
760static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
761 struct aead_request *req,
762 struct buffer_array *sg_data,
763 bool is_last, bool do_chain)
764{
765 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
766 int rc = 0;
767 u32 mapped_nents = 0;
768 struct scatterlist *current_sg = req->src;
769 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
770 unsigned int sg_index = 0;
771 u32 size_of_assoc = req->assoclen;
772 struct device *dev = drvdata_to_dev(drvdata);
773
774 if (areq_ctx->is_gcm4543)
775 size_of_assoc += crypto_aead_ivsize(tfm);
776
777 if (!sg_data) {
778 rc = -EINVAL;
779 goto chain_assoc_exit;
780 }
781
782 if (req->assoclen == 0) {
783 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
784 areq_ctx->assoc.nents = 0;
785 areq_ctx->assoc.mlli_nents = 0;
786 dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
787 cc_dma_buf_type(areq_ctx->assoc_buff_type),
788 areq_ctx->assoc.nents);
789 goto chain_assoc_exit;
790 }
791
792 //iterate over the sgl to see how many entries are for associated data
793 //it is assumed that if we reach here , the sgl is already mapped
794 sg_index = current_sg->length;
795 //the first entry in the scatter list contains all the associated data
796 if (sg_index > size_of_assoc) {
797 mapped_nents++;
798 } else {
799 while (sg_index <= size_of_assoc) {
800 current_sg = sg_next(current_sg);
801 /* if have reached the end of the sgl, then this is
802 * unexpected
803 */
804 if (!current_sg) {
805 dev_err(dev, "reached end of sg list. unexpected\n");
806 return -EINVAL;
807 }
808 sg_index += current_sg->length;
809 mapped_nents++;
810 }
811 }
812 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
813 dev_err(dev, "Too many fragments. current %d max %d\n",
814 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
815 return -ENOMEM;
816 }
817 areq_ctx->assoc.nents = mapped_nents;
818
819 /* in CCM case we have additional entry for
820 * ccm header configurations
821 */
822 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
823 if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
824 dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
825 (areq_ctx->assoc.nents + 1),
826 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
827 rc = -ENOMEM;
828 goto chain_assoc_exit;
829 }
830 }
831
832 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
833 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
834 else
835 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
836
837 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
838 dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
839 cc_dma_buf_type(areq_ctx->assoc_buff_type),
840 areq_ctx->assoc.nents);
841 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
842 req->assoclen, 0, is_last,
843 &areq_ctx->assoc.mlli_nents);
844 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
845 }
846
847chain_assoc_exit:
848 return rc;
849}
850
851static void cc_prepare_aead_data_dlli(struct aead_request *req,
852 u32 *src_last_bytes, u32 *dst_last_bytes)
853{
854 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
855 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
856 unsigned int authsize = areq_ctx->req_authsize;
857
858 areq_ctx->is_icv_fragmented = false;
859 if (req->src == req->dst) {
860 /*INPLACE*/
861 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
862 (*src_last_bytes - authsize);
863 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
864 (*src_last_bytes - authsize);
865 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
866 /*NON-INPLACE and DECRYPT*/
867 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
868 (*src_last_bytes - authsize);
869 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
870 (*src_last_bytes - authsize);
871 } else {
872 /*NON-INPLACE and ENCRYPT*/
873 areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
874 (*dst_last_bytes - authsize);
875 areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
876 (*dst_last_bytes - authsize);
877 }
878}
879
880static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
881 struct aead_request *req,
882 struct buffer_array *sg_data,
883 u32 *src_last_bytes, u32 *dst_last_bytes,
884 bool is_last_table)
885{
886 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
887 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
888 unsigned int authsize = areq_ctx->req_authsize;
889 int rc = 0, icv_nents;
890 struct device *dev = drvdata_to_dev(drvdata);
891 struct scatterlist *sg;
892
893 if (req->src == req->dst) {
894 /*INPLACE*/
895 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
896 areq_ctx->src_sgl, areq_ctx->cryptlen,
897 areq_ctx->src_offset, is_last_table,
898 &areq_ctx->src.mlli_nents);
899
900 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
901 areq_ctx->src.nents,
902 authsize, *src_last_bytes,
903 &areq_ctx->is_icv_fragmented);
904 if (icv_nents < 0) {
905 rc = -ENOTSUPP;
906 goto prepare_data_mlli_exit;
907 }
908
909 if (areq_ctx->is_icv_fragmented) {
910 /* Backup happens only when ICV is fragmented, ICV
911 * verification is made by CPU compare in order to
912 * simplify MAC verification upon request completion
913 */
914 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
915 /* In coherent platforms (e.g. ACP)
916 * already copying ICV for any
917 * INPLACE-DECRYPT operation, hence
918 * we must neglect this code.
919 */
920 if (!drvdata->coherent)
921 cc_copy_mac(dev, req, CC_SG_TO_BUF);
922
923 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
924 } else {
925 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
926 areq_ctx->icv_dma_addr =
927 areq_ctx->mac_buf_dma_addr;
928 }
929 } else { /* Contig. ICV */
930 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
931 /*Should hanlde if the sg is not contig.*/
932 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
933 (*src_last_bytes - authsize);
934 areq_ctx->icv_virt_addr = sg_virt(sg) +
935 (*src_last_bytes - authsize);
936 }
937
938 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
939 /*NON-INPLACE and DECRYPT*/
940 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
941 areq_ctx->src_sgl, areq_ctx->cryptlen,
942 areq_ctx->src_offset, is_last_table,
943 &areq_ctx->src.mlli_nents);
944 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
945 areq_ctx->dst_sgl, areq_ctx->cryptlen,
946 areq_ctx->dst_offset, is_last_table,
947 &areq_ctx->dst.mlli_nents);
948
949 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
950 areq_ctx->src.nents,
951 authsize, *src_last_bytes,
952 &areq_ctx->is_icv_fragmented);
953 if (icv_nents < 0) {
954 rc = -ENOTSUPP;
955 goto prepare_data_mlli_exit;
956 }
957
958 /* Backup happens only when ICV is fragmented, ICV
959 * verification is made by CPU compare in order to simplify
960 * MAC verification upon request completion
961 */
962 if (areq_ctx->is_icv_fragmented) {
963 cc_copy_mac(dev, req, CC_SG_TO_BUF);
964 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
965
966 } else { /* Contig. ICV */
967 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
968 /*Should hanlde if the sg is not contig.*/
969 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
970 (*src_last_bytes - authsize);
971 areq_ctx->icv_virt_addr = sg_virt(sg) +
972 (*src_last_bytes - authsize);
973 }
974
975 } else {
976 /*NON-INPLACE and ENCRYPT*/
977 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
978 areq_ctx->dst_sgl, areq_ctx->cryptlen,
979 areq_ctx->dst_offset, is_last_table,
980 &areq_ctx->dst.mlli_nents);
981 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
982 areq_ctx->src_sgl, areq_ctx->cryptlen,
983 areq_ctx->src_offset, is_last_table,
984 &areq_ctx->src.mlli_nents);
985
986 icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
987 areq_ctx->dst.nents,
988 authsize, *dst_last_bytes,
989 &areq_ctx->is_icv_fragmented);
990 if (icv_nents < 0) {
991 rc = -ENOTSUPP;
992 goto prepare_data_mlli_exit;
993 }
994
995 if (!areq_ctx->is_icv_fragmented) {
996 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
997 /* Contig. ICV */
998 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
999 (*dst_last_bytes - authsize);
1000 areq_ctx->icv_virt_addr = sg_virt(sg) +
1001 (*dst_last_bytes - authsize);
1002 } else {
1003 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
1004 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
1005 }
1006 }
1007
1008prepare_data_mlli_exit:
1009 return rc;
1010}
1011
1012static int cc_aead_chain_data(struct cc_drvdata *drvdata,
1013 struct aead_request *req,
1014 struct buffer_array *sg_data,
1015 bool is_last_table, bool do_chain)
1016{
1017 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1018 struct device *dev = drvdata_to_dev(drvdata);
1019 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
1020 unsigned int authsize = areq_ctx->req_authsize;
1021 unsigned int src_last_bytes = 0, dst_last_bytes = 0;
1022 int rc = 0;
1023 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
1024 u32 offset = 0;
1025 /* non-inplace mode */
1026 unsigned int size_for_map = req->assoclen + req->cryptlen;
1027 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1028 u32 sg_index = 0;
1029 bool chained = false;
1030 bool is_gcm4543 = areq_ctx->is_gcm4543;
1031 u32 size_to_skip = req->assoclen;
1032
1033 if (is_gcm4543)
1034 size_to_skip += crypto_aead_ivsize(tfm);
1035
1036 offset = size_to_skip;
1037
1038 if (!sg_data)
1039 return -EINVAL;
1040
1041 areq_ctx->src_sgl = req->src;
1042 areq_ctx->dst_sgl = req->dst;
1043
1044 if (is_gcm4543)
1045 size_for_map += crypto_aead_ivsize(tfm);
1046
1047 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1048 authsize : 0;
1049 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
1050 &src_last_bytes, &chained);
1051 sg_index = areq_ctx->src_sgl->length;
1052 //check where the data starts
1053 while (sg_index <= size_to_skip) {
1054 offset -= areq_ctx->src_sgl->length;
1055 areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
1056 //if have reached the end of the sgl, then this is unexpected
1057 if (!areq_ctx->src_sgl) {
1058 dev_err(dev, "reached end of sg list. unexpected\n");
1059 return -EINVAL;
1060 }
1061 sg_index += areq_ctx->src_sgl->length;
1062 src_mapped_nents--;
1063 }
1064 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1065 dev_err(dev, "Too many fragments. current %d max %d\n",
1066 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1067 return -ENOMEM;
1068 }
1069
1070 areq_ctx->src.nents = src_mapped_nents;
1071
1072 areq_ctx->src_offset = offset;
1073
1074 if (req->src != req->dst) {
1075 size_for_map = req->assoclen + req->cryptlen;
1076 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1077 authsize : 0;
1078 if (is_gcm4543)
1079 size_for_map += crypto_aead_ivsize(tfm);
1080
1081 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
1082 &areq_ctx->dst.nents,
1083 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
1084 &dst_mapped_nents);
1085 if (rc) {
1086 rc = -ENOMEM;
1087 goto chain_data_exit;
1088 }
1089 }
1090
1091 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
1092 &dst_last_bytes, &chained);
1093 sg_index = areq_ctx->dst_sgl->length;
1094 offset = size_to_skip;
1095
1096 //check where the data starts
1097 while (sg_index <= size_to_skip) {
1098 offset -= areq_ctx->dst_sgl->length;
1099 areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
1100 //if have reached the end of the sgl, then this is unexpected
1101 if (!areq_ctx->dst_sgl) {
1102 dev_err(dev, "reached end of sg list. unexpected\n");
1103 return -EINVAL;
1104 }
1105 sg_index += areq_ctx->dst_sgl->length;
1106 dst_mapped_nents--;
1107 }
1108 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
1109 dev_err(dev, "Too many fragments. current %d max %d\n",
1110 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
1111 return -ENOMEM;
1112 }
1113 areq_ctx->dst.nents = dst_mapped_nents;
1114 areq_ctx->dst_offset = offset;
1115 if (src_mapped_nents > 1 ||
1116 dst_mapped_nents > 1 ||
1117 do_chain) {
1118 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
1119 rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
1120 &src_last_bytes,
1121 &dst_last_bytes, is_last_table);
1122 } else {
1123 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
1124 cc_prepare_aead_data_dlli(req, &src_last_bytes,
1125 &dst_last_bytes);
1126 }
1127
1128chain_data_exit:
1129 return rc;
1130}
1131
1132static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
1133 struct aead_request *req)
1134{
1135 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1136 u32 curr_mlli_size = 0;
1137
1138 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
1139 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
1140 curr_mlli_size = areq_ctx->assoc.mlli_nents *
1141 LLI_ENTRY_BYTE_SIZE;
1142 }
1143
1144 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1145 /*Inplace case dst nents equal to src nents*/
1146 if (req->src == req->dst) {
1147 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
1148 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
1149 curr_mlli_size;
1150 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
1151 if (!areq_ctx->is_single_pass)
1152 areq_ctx->assoc.mlli_nents +=
1153 areq_ctx->src.mlli_nents;
1154 } else {
1155 if (areq_ctx->gen_ctx.op_type ==
1156 DRV_CRYPTO_DIRECTION_DECRYPT) {
1157 areq_ctx->src.sram_addr =
1158 drvdata->mlli_sram_addr +
1159 curr_mlli_size;
1160 areq_ctx->dst.sram_addr =
1161 areq_ctx->src.sram_addr +
1162 areq_ctx->src.mlli_nents *
1163 LLI_ENTRY_BYTE_SIZE;
1164 if (!areq_ctx->is_single_pass)
1165 areq_ctx->assoc.mlli_nents +=
1166 areq_ctx->src.mlli_nents;
1167 } else {
1168 areq_ctx->dst.sram_addr =
1169 drvdata->mlli_sram_addr +
1170 curr_mlli_size;
1171 areq_ctx->src.sram_addr =
1172 areq_ctx->dst.sram_addr +
1173 areq_ctx->dst.mlli_nents *
1174 LLI_ENTRY_BYTE_SIZE;
1175 if (!areq_ctx->is_single_pass)
1176 areq_ctx->assoc.mlli_nents +=
1177 areq_ctx->dst.mlli_nents;
1178 }
1179 }
1180 }
1181}
1182
1183int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
1184{
1185 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1186 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1187 struct device *dev = drvdata_to_dev(drvdata);
1188 struct buffer_array sg_data;
1189 unsigned int authsize = areq_ctx->req_authsize;
1190 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1191 int rc = 0;
1192 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1193 bool is_gcm4543 = areq_ctx->is_gcm4543;
1194 dma_addr_t dma_addr;
1195 u32 mapped_nents = 0;
1196 u32 dummy = 0; /*used for the assoc data fragments */
1197 u32 size_to_map = 0;
1198 gfp_t flags = cc_gfp_flags(&req->base);
1199
1200 mlli_params->curr_pool = NULL;
1201 sg_data.num_of_buffers = 0;
1202
1203 /* copy mac to a temporary location to deal with possible
1204 * data memory overriding that caused by cache coherence problem.
1205 */
1206 if (drvdata->coherent &&
1207 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
1208 req->src == req->dst)
1209 cc_copy_mac(dev, req, CC_SG_TO_BUF);
1210
1211 /* cacluate the size for cipher remove ICV in decrypt*/
1212 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
1213 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1214 req->cryptlen :
1215 (req->cryptlen - authsize);
1216
1217 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
1218 DMA_BIDIRECTIONAL);
1219 if (dma_mapping_error(dev, dma_addr)) {
1220 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1221 MAX_MAC_SIZE, areq_ctx->mac_buf);
1222 rc = -ENOMEM;
1223 goto aead_map_failure;
1224 }
1225 areq_ctx->mac_buf_dma_addr = dma_addr;
1226
1227 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
1228 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1229
1230 dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
1231 DMA_TO_DEVICE);
1232
1233 if (dma_mapping_error(dev, dma_addr)) {
1234 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1235 AES_BLOCK_SIZE, addr);
1236 areq_ctx->ccm_iv0_dma_addr = 0;
1237 rc = -ENOMEM;
1238 goto aead_map_failure;
1239 }
1240 areq_ctx->ccm_iv0_dma_addr = dma_addr;
1241
1242 if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1243 &sg_data, req->assoclen)) {
1244 rc = -ENOMEM;
1245 goto aead_map_failure;
1246 }
1247 }
1248
1249 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1250 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1251 DMA_BIDIRECTIONAL);
1252 if (dma_mapping_error(dev, dma_addr)) {
1253 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1254 AES_BLOCK_SIZE, areq_ctx->hkey);
1255 rc = -ENOMEM;
1256 goto aead_map_failure;
1257 }
1258 areq_ctx->hkey_dma_addr = dma_addr;
1259
1260 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1261 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1262 if (dma_mapping_error(dev, dma_addr)) {
1263 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1264 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1265 rc = -ENOMEM;
1266 goto aead_map_failure;
1267 }
1268 areq_ctx->gcm_block_len_dma_addr = dma_addr;
1269
1270 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1271 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1272
1273 if (dma_mapping_error(dev, dma_addr)) {
1274 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1275 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1276 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1277 rc = -ENOMEM;
1278 goto aead_map_failure;
1279 }
1280 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1281
1282 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1283 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1284
1285 if (dma_mapping_error(dev, dma_addr)) {
1286 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1287 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1288 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1289 rc = -ENOMEM;
1290 goto aead_map_failure;
1291 }
1292 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1293 }
1294
1295 size_to_map = req->cryptlen + req->assoclen;
1296 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
1297 size_to_map += authsize;
1298
1299 if (is_gcm4543)
1300 size_to_map += crypto_aead_ivsize(tfm);
1301 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
1302 &areq_ctx->src.nents,
1303 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1304 LLI_MAX_NUM_OF_DATA_ENTRIES),
1305 &dummy, &mapped_nents);
1306 if (rc) {
1307 rc = -ENOMEM;
1308 goto aead_map_failure;
1309 }
1310
1311 if (areq_ctx->is_single_pass) {
1312 /*
1313 * Create MLLI table for:
1314 * (1) Assoc. data
1315 * (2) Src/Dst SGLs
1316 * Note: IV is contg. buffer (not an SGL)
1317 */
1318 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1319 if (rc)
1320 goto aead_map_failure;
1321 rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1322 if (rc)
1323 goto aead_map_failure;
1324 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1325 if (rc)
1326 goto aead_map_failure;
1327 } else { /* DOUBLE-PASS flow */
1328 /*
1329 * Prepare MLLI table(s) in this order:
1330 *
1331 * If ENCRYPT/DECRYPT (inplace):
1332 * (1) MLLI table for assoc
1333 * (2) IV entry (chained right after end of assoc)
1334 * (3) MLLI for src/dst (inplace operation)
1335 *
1336 * If ENCRYPT (non-inplace)
1337 * (1) MLLI table for assoc
1338 * (2) IV entry (chained right after end of assoc)
1339 * (3) MLLI for dst
1340 * (4) MLLI for src
1341 *
1342 * If DECRYPT (non-inplace)
1343 * (1) MLLI table for assoc
1344 * (2) IV entry (chained right after end of assoc)
1345 * (3) MLLI for src
1346 * (4) MLLI for dst
1347 */
1348 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1349 if (rc)
1350 goto aead_map_failure;
1351 rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1352 if (rc)
1353 goto aead_map_failure;
1354 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1355 if (rc)
1356 goto aead_map_failure;
1357 }
1358
1359 /* Mlli support -start building the MLLI according to the above
1360 * results
1361 */
1362 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1363 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1364 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1365 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1366 if (rc)
1367 goto aead_map_failure;
1368
1369 cc_update_aead_mlli_nents(drvdata, req);
1370 dev_dbg(dev, "assoc params mn %d\n",
1371 areq_ctx->assoc.mlli_nents);
1372 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1373 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1374 }
1375 return 0;
1376
1377aead_map_failure:
1378 cc_unmap_aead_request(dev, req);
1379 return rc;
1380}
1381
1382int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1383 struct scatterlist *src, unsigned int nbytes,
1384 bool do_update, gfp_t flags)
1385{
1386 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1387 struct device *dev = drvdata_to_dev(drvdata);
1388 u8 *curr_buff = cc_hash_buf(areq_ctx);
1389 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1390 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1391 struct buffer_array sg_data;
1392 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1393 u32 dummy = 0;
1394 u32 mapped_nents = 0;
1395
1396 dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1397 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1398 /* Init the type of the dma buffer */
1399 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1400 mlli_params->curr_pool = NULL;
1401 sg_data.num_of_buffers = 0;
1402 areq_ctx->in_nents = 0;
1403
1404 if (nbytes == 0 && *curr_buff_cnt == 0) {
1405 /* nothing to do */
1406 return 0;
1407 }
1408
1409 /*TODO: copy data in case that buffer is enough for operation */
1410 /* map the previous buffer */
1411 if (*curr_buff_cnt) {
1412 if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1413 &sg_data)) {
1414 return -ENOMEM;
1415 }
1416 }
1417
1418 if (src && nbytes > 0 && do_update) {
1419 if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1420 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1421 &dummy, &mapped_nents)) {
1422 goto unmap_curr_buff;
1423 }
1424 if (src && mapped_nents == 1 &&
1425 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1426 memcpy(areq_ctx->buff_sg, src,
1427 sizeof(struct scatterlist));
1428 areq_ctx->buff_sg->length = nbytes;
1429 areq_ctx->curr_sg = areq_ctx->buff_sg;
1430 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1431 } else {
1432 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1433 }
1434 }
1435
1436 /*build mlli */
1437 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1438 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1439 /* add the src data to the sg_data */
1440 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1441 0, true, &areq_ctx->mlli_nents);
1442 if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
1443 goto fail_unmap_din;
1444 }
1445 /* change the buffer index for the unmap function */
1446 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1447 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1448 cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1449 return 0;
1450
1451fail_unmap_din:
1452 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1453
1454unmap_curr_buff:
1455 if (*curr_buff_cnt)
1456 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1457
1458 return -ENOMEM;
1459}
1460
1461int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1462 struct scatterlist *src, unsigned int nbytes,
1463 unsigned int block_size, gfp_t flags)
1464{
1465 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1466 struct device *dev = drvdata_to_dev(drvdata);
1467 u8 *curr_buff = cc_hash_buf(areq_ctx);
1468 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1469 u8 *next_buff = cc_next_buf(areq_ctx);
1470 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1471 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1472 unsigned int update_data_len;
1473 u32 total_in_len = nbytes + *curr_buff_cnt;
1474 struct buffer_array sg_data;
1475 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
1476 unsigned int swap_index = 0;
1477 u32 dummy = 0;
1478 u32 mapped_nents = 0;
1479
1480 dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1481 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1482 /* Init the type of the dma buffer */
1483 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1484 mlli_params->curr_pool = NULL;
1485 areq_ctx->curr_sg = NULL;
1486 sg_data.num_of_buffers = 0;
1487 areq_ctx->in_nents = 0;
1488
1489 if (total_in_len < block_size) {
1490 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1491 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1492 areq_ctx->in_nents =
1493 cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
1494 sg_copy_to_buffer(src, areq_ctx->in_nents,
1495 &curr_buff[*curr_buff_cnt], nbytes);
1496 *curr_buff_cnt += nbytes;
1497 return 1;
1498 }
1499
1500 /* Calculate the residue size*/
1501 *next_buff_cnt = total_in_len & (block_size - 1);
1502 /* update data len */
1503 update_data_len = total_in_len - *next_buff_cnt;
1504
1505 dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1506 *next_buff_cnt, update_data_len);
1507
1508 /* Copy the new residue to next buffer */
1509 if (*next_buff_cnt) {
1510 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1511 next_buff, (update_data_len - *curr_buff_cnt),
1512 *next_buff_cnt);
1513 cc_copy_sg_portion(dev, next_buff, src,
1514 (update_data_len - *curr_buff_cnt),
1515 nbytes, CC_SG_TO_BUF);
1516 /* change the buffer index for next operation */
1517 swap_index = 1;
1518 }
1519
1520 if (*curr_buff_cnt) {
1521 if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1522 &sg_data)) {
1523 return -ENOMEM;
1524 }
1525 /* change the buffer index for next operation */
1526 swap_index = 1;
1527 }
1528
1529 if (update_data_len > *curr_buff_cnt) {
1530 if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1531 DMA_TO_DEVICE, &areq_ctx->in_nents,
1532 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1533 &mapped_nents)) {
1534 goto unmap_curr_buff;
1535 }
1536 if (mapped_nents == 1 &&
1537 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1538 /* only one entry in the SG and no previous data */
1539 memcpy(areq_ctx->buff_sg, src,
1540 sizeof(struct scatterlist));
1541 areq_ctx->buff_sg->length = update_data_len;
1542 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1543 areq_ctx->curr_sg = areq_ctx->buff_sg;
1544 } else {
1545 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1546 }
1547 }
1548
1549 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1550 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
1551 /* add the src data to the sg_data */
1552 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1553 (update_data_len - *curr_buff_cnt), 0, true,
1554 &areq_ctx->mlli_nents);
1555 if (cc_generate_mlli(dev, &sg_data, mlli_params, flags))
1556 goto fail_unmap_din;
1557 }
1558 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1559
1560 return 0;
1561
1562fail_unmap_din:
1563 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1564
1565unmap_curr_buff:
1566 if (*curr_buff_cnt)
1567 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1568
1569 return -ENOMEM;
1570}
1571
1572void cc_unmap_hash_request(struct device *dev, void *ctx,
1573 struct scatterlist *src, bool do_revert)
1574{
1575 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1576 u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1577
1578 /*In case a pool was set, a table was
1579 *allocated and should be released
1580 */
1581 if (areq_ctx->mlli_params.curr_pool) {
1582 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1583 &areq_ctx->mlli_params.mlli_dma_addr,
1584 areq_ctx->mlli_params.mlli_virt_addr);
1585 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1586 areq_ctx->mlli_params.mlli_virt_addr,
1587 areq_ctx->mlli_params.mlli_dma_addr);
1588 }
1589
1590 if (src && areq_ctx->in_nents) {
1591 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1592 sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1593 dma_unmap_sg(dev, src,
1594 areq_ctx->in_nents, DMA_TO_DEVICE);
1595 }
1596
1597 if (*prev_len) {
1598 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1599 sg_virt(areq_ctx->buff_sg),
1600 &sg_dma_address(areq_ctx->buff_sg),
1601 sg_dma_len(areq_ctx->buff_sg));
1602 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1603 if (!do_revert) {
1604 /* clean the previous data length for update
1605 * operation
1606 */
1607 *prev_len = 0;
1608 } else {
1609 areq_ctx->buff_index ^= 1;
1610 }
1611 }
1612}
1613
1614int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1615{
1616 struct buff_mgr_handle *buff_mgr_handle;
1617 struct device *dev = drvdata_to_dev(drvdata);
1618
1619 buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
1620 if (!buff_mgr_handle)
1621 return -ENOMEM;
1622
1623 drvdata->buff_mgr_handle = buff_mgr_handle;
1624
1625 buff_mgr_handle->mlli_buffs_pool =
1626 dma_pool_create("dx_single_mlli_tables", dev,
1627 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1628 LLI_ENTRY_BYTE_SIZE,
1629 MLLI_TABLE_MIN_ALIGNMENT, 0);
1630
1631 if (!buff_mgr_handle->mlli_buffs_pool)
1632 goto error;
1633
1634 return 0;
1635
1636error:
1637 cc_buffer_mgr_fini(drvdata);
1638 return -ENOMEM;
1639}
1640
1641int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1642{
1643 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
1644
1645 if (buff_mgr_handle) {
1646 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
1647 kfree(drvdata->buff_mgr_handle);
1648 drvdata->buff_mgr_handle = NULL;
1649 }
1650 return 0;
1651}
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
new file mode 100644
index 000000000000..3ec4b4db5247
--- /dev/null
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -0,0 +1,71 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4/* \file cc_buffer_mgr.h
5 * Buffer Manager
6 */
7
8#ifndef __CC_BUFFER_MGR_H__
9#define __CC_BUFFER_MGR_H__
10
11#include <crypto/algapi.h>
12
13#include "cc_driver.h"
14
15enum cc_req_dma_buf_type {
16 CC_DMA_BUF_NULL = 0,
17 CC_DMA_BUF_DLLI,
18 CC_DMA_BUF_MLLI
19};
20
21enum cc_sg_cpy_direct {
22 CC_SG_TO_BUF = 0,
23 CC_SG_FROM_BUF = 1
24};
25
26struct cc_mlli {
27 cc_sram_addr_t sram_addr;
28 unsigned int nents; //sg nents
29 unsigned int mlli_nents; //mlli nents might be different than the above
30};
31
32struct mlli_params {
33 struct dma_pool *curr_pool;
34 u8 *mlli_virt_addr;
35 dma_addr_t mlli_dma_addr;
36 u32 mlli_len;
37};
38
39int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
40
41int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
42
43int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
44 unsigned int ivsize, unsigned int nbytes,
45 void *info, struct scatterlist *src,
46 struct scatterlist *dst, gfp_t flags);
47
48void cc_unmap_cipher_request(struct device *dev, void *ctx, unsigned int ivsize,
49 struct scatterlist *src, struct scatterlist *dst);
50
51int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
52
53void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
54
55int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
56 struct scatterlist *src, unsigned int nbytes,
57 bool do_update, gfp_t flags);
58
59int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
60 struct scatterlist *src, unsigned int nbytes,
61 unsigned int block_size, gfp_t flags);
62
63void cc_unmap_hash_request(struct device *dev, void *ctx,
64 struct scatterlist *src, bool do_revert);
65
66void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
67 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
68
69void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
70
71#endif /*__BUFFER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
new file mode 100644
index 000000000000..df98f7afe645
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -0,0 +1,1150 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/internal/skcipher.h>
8#include <crypto/des.h>
9#include <crypto/xts.h>
10#include <crypto/scatterwalk.h>
11
12#include "cc_driver.h"
13#include "cc_lli_defs.h"
14#include "cc_buffer_mgr.h"
15#include "cc_cipher.h"
16#include "cc_request_mgr.h"
17
18#define MAX_ABLKCIPHER_SEQ_LEN 6
19
20#define template_skcipher template_u.skcipher
21
22#define CC_MIN_AES_XTS_SIZE 0x10
23#define CC_MAX_AES_XTS_SIZE 0x2000
24struct cc_cipher_handle {
25 struct list_head alg_list;
26};
27
28struct cc_user_key_info {
29 u8 *key;
30 dma_addr_t key_dma_addr;
31};
32
33struct cc_hw_key_info {
34 enum cc_hw_crypto_key key1_slot;
35 enum cc_hw_crypto_key key2_slot;
36};
37
38struct cc_cipher_ctx {
39 struct cc_drvdata *drvdata;
40 int keylen;
41 int key_round_number;
42 int cipher_mode;
43 int flow_mode;
44 unsigned int flags;
45 struct cc_user_key_info user;
46 struct cc_hw_key_info hw;
47 struct crypto_shash *shash_tfm;
48};
49
50static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
51
52static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
53{
54 switch (ctx_p->flow_mode) {
55 case S_DIN_to_AES:
56 switch (size) {
57 case CC_AES_128_BIT_KEY_SIZE:
58 case CC_AES_192_BIT_KEY_SIZE:
59 if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
60 ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
61 ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
62 return 0;
63 break;
64 case CC_AES_256_BIT_KEY_SIZE:
65 return 0;
66 case (CC_AES_192_BIT_KEY_SIZE * 2):
67 case (CC_AES_256_BIT_KEY_SIZE * 2):
68 if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
69 ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
70 ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
71 return 0;
72 break;
73 default:
74 break;
75 }
76 case S_DIN_to_DES:
77 if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
78 return 0;
79 break;
80 default:
81 break;
82 }
83 return -EINVAL;
84}
85
86static int validate_data_size(struct cc_cipher_ctx *ctx_p,
87 unsigned int size)
88{
89 switch (ctx_p->flow_mode) {
90 case S_DIN_to_AES:
91 switch (ctx_p->cipher_mode) {
92 case DRV_CIPHER_XTS:
93 if (size >= CC_MIN_AES_XTS_SIZE &&
94 size <= CC_MAX_AES_XTS_SIZE &&
95 IS_ALIGNED(size, AES_BLOCK_SIZE))
96 return 0;
97 break;
98 case DRV_CIPHER_CBC_CTS:
99 if (size >= AES_BLOCK_SIZE)
100 return 0;
101 break;
102 case DRV_CIPHER_OFB:
103 case DRV_CIPHER_CTR:
104 return 0;
105 case DRV_CIPHER_ECB:
106 case DRV_CIPHER_CBC:
107 case DRV_CIPHER_ESSIV:
108 case DRV_CIPHER_BITLOCKER:
109 if (IS_ALIGNED(size, AES_BLOCK_SIZE))
110 return 0;
111 break;
112 default:
113 break;
114 }
115 break;
116 case S_DIN_to_DES:
117 if (IS_ALIGNED(size, DES_BLOCK_SIZE))
118 return 0;
119 break;
120 default:
121 break;
122 }
123 return -EINVAL;
124}
125
126static int cc_cipher_init(struct crypto_tfm *tfm)
127{
128 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
129 struct cc_crypto_alg *cc_alg =
130 container_of(tfm->__crt_alg, struct cc_crypto_alg,
131 skcipher_alg.base);
132 struct device *dev = drvdata_to_dev(cc_alg->drvdata);
133 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
134 int rc = 0;
135
136 dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
137 crypto_tfm_alg_name(tfm));
138
139 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
140 sizeof(struct cipher_req_ctx));
141
142 ctx_p->cipher_mode = cc_alg->cipher_mode;
143 ctx_p->flow_mode = cc_alg->flow_mode;
144 ctx_p->drvdata = cc_alg->drvdata;
145
146 /* Allocate key buffer, cache line aligned */
147 ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
148 if (!ctx_p->user.key)
149 return -ENOMEM;
150
151 dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
152 ctx_p->user.key);
153
154 /* Map key buffer */
155 ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
156 max_key_buf_size,
157 DMA_TO_DEVICE);
158 if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
159 dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
160 max_key_buf_size, ctx_p->user.key);
161 return -ENOMEM;
162 }
163 dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
164 max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
165
166 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
167 /* Alloc hash tfm for essiv */
168 ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
169 if (IS_ERR(ctx_p->shash_tfm)) {
170 dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
171 return PTR_ERR(ctx_p->shash_tfm);
172 }
173 }
174
175 return rc;
176}
177
178static void cc_cipher_exit(struct crypto_tfm *tfm)
179{
180 struct crypto_alg *alg = tfm->__crt_alg;
181 struct cc_crypto_alg *cc_alg =
182 container_of(alg, struct cc_crypto_alg,
183 skcipher_alg.base);
184 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
185 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
186 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
187
188 dev_dbg(dev, "Clearing context @%p for %s\n",
189 crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
190
191 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
192 /* Free hash tfm for essiv */
193 crypto_free_shash(ctx_p->shash_tfm);
194 ctx_p->shash_tfm = NULL;
195 }
196
197 /* Unmap key buffer */
198 dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
199 DMA_TO_DEVICE);
200 dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
201 &ctx_p->user.key_dma_addr);
202
203 /* Free key buffer in context */
204 kzfree(ctx_p->user.key);
205 dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
206}
207
208struct tdes_keys {
209 u8 key1[DES_KEY_SIZE];
210 u8 key2[DES_KEY_SIZE];
211 u8 key3[DES_KEY_SIZE];
212};
213
214static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
215{
216 switch (slot_num) {
217 case 0:
218 return KFDE0_KEY;
219 case 1:
220 return KFDE1_KEY;
221 case 2:
222 return KFDE2_KEY;
223 case 3:
224 return KFDE3_KEY;
225 }
226 return END_OF_KEYS;
227}
228
229static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
230 unsigned int keylen)
231{
232 struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
233 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
234 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
235 u32 tmp[DES3_EDE_EXPKEY_WORDS];
236 struct cc_crypto_alg *cc_alg =
237 container_of(tfm->__crt_alg, struct cc_crypto_alg,
238 skcipher_alg.base);
239 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
240
241 dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
242 ctx_p, crypto_tfm_alg_name(tfm), keylen);
243 dump_byte_array("key", (u8 *)key, keylen);
244
245 /* STAT_PHASE_0: Init and sanity checks */
246
247 if (validate_keys_sizes(ctx_p, keylen)) {
248 dev_err(dev, "Unsupported key size %d.\n", keylen);
249 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
250 return -EINVAL;
251 }
252
253 if (cc_is_hw_key(tfm)) {
254 /* setting HW key slots */
255 struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
256
257 if (ctx_p->flow_mode != S_DIN_to_AES) {
258 dev_err(dev, "HW key not supported for non-AES flows\n");
259 return -EINVAL;
260 }
261
262 ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
263 if (ctx_p->hw.key1_slot == END_OF_KEYS) {
264 dev_err(dev, "Unsupported hw key1 number (%d)\n",
265 hki->hw_key1);
266 return -EINVAL;
267 }
268
269 if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
270 ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
271 ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
272 if (hki->hw_key1 == hki->hw_key2) {
273 dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
274 hki->hw_key1, hki->hw_key2);
275 return -EINVAL;
276 }
277 ctx_p->hw.key2_slot =
278 hw_key_to_cc_hw_key(hki->hw_key2);
279 if (ctx_p->hw.key2_slot == END_OF_KEYS) {
280 dev_err(dev, "Unsupported hw key2 number (%d)\n",
281 hki->hw_key2);
282 return -EINVAL;
283 }
284 }
285
286 ctx_p->keylen = keylen;
287 dev_dbg(dev, "cc_is_hw_key ret 0");
288
289 return 0;
290 }
291
292 /*
293 * Verify DES weak keys
294 * Note that we're dropping the expanded key since the
295 * HW does the expansion on its own.
296 */
297 if (ctx_p->flow_mode == S_DIN_to_DES) {
298 if (keylen == DES3_EDE_KEY_SIZE &&
299 __des3_ede_setkey(tmp, &tfm->crt_flags, key,
300 DES3_EDE_KEY_SIZE)) {
301 dev_dbg(dev, "weak 3DES key");
302 return -EINVAL;
303 } else if (!des_ekey(tmp, key) &&
304 (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
305 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
306 dev_dbg(dev, "weak DES key");
307 return -EINVAL;
308 }
309 }
310
311 if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
312 xts_check_key(tfm, key, keylen)) {
313 dev_dbg(dev, "weak XTS key");
314 return -EINVAL;
315 }
316
317 /* STAT_PHASE_1: Copy key to ctx */
318 dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
319 max_key_buf_size, DMA_TO_DEVICE);
320
321 memcpy(ctx_p->user.key, key, keylen);
322 if (keylen == 24)
323 memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
324
325 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
326 /* sha256 for key2 - use sw implementation */
327 int key_len = keylen >> 1;
328 int err;
329
330 SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
331
332 desc->tfm = ctx_p->shash_tfm;
333
334 err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
335 ctx_p->user.key + key_len);
336 if (err) {
337 dev_err(dev, "Failed to hash ESSIV key.\n");
338 return err;
339 }
340 }
341 dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
342 max_key_buf_size, DMA_TO_DEVICE);
343 ctx_p->keylen = keylen;
344
345 dev_dbg(dev, "return safely");
346 return 0;
347}
348
349static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
350 struct cipher_req_ctx *req_ctx,
351 unsigned int ivsize, unsigned int nbytes,
352 struct cc_hw_desc desc[],
353 unsigned int *seq_size)
354{
355 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
356 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
357 int cipher_mode = ctx_p->cipher_mode;
358 int flow_mode = ctx_p->flow_mode;
359 int direction = req_ctx->gen_ctx.op_type;
360 dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
361 unsigned int key_len = ctx_p->keylen;
362 dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
363 unsigned int du_size = nbytes;
364
365 struct cc_crypto_alg *cc_alg =
366 container_of(tfm->__crt_alg, struct cc_crypto_alg,
367 skcipher_alg.base);
368
369 if (cc_alg->data_unit)
370 du_size = cc_alg->data_unit;
371
372 switch (cipher_mode) {
373 case DRV_CIPHER_CBC:
374 case DRV_CIPHER_CBC_CTS:
375 case DRV_CIPHER_CTR:
376 case DRV_CIPHER_OFB:
377 /* Load cipher state */
378 hw_desc_init(&desc[*seq_size]);
379 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
380 NS_BIT);
381 set_cipher_config0(&desc[*seq_size], direction);
382 set_flow_mode(&desc[*seq_size], flow_mode);
383 set_cipher_mode(&desc[*seq_size], cipher_mode);
384 if (cipher_mode == DRV_CIPHER_CTR ||
385 cipher_mode == DRV_CIPHER_OFB) {
386 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
387 } else {
388 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
389 }
390 (*seq_size)++;
391 /*FALLTHROUGH*/
392 case DRV_CIPHER_ECB:
393 /* Load key */
394 hw_desc_init(&desc[*seq_size]);
395 set_cipher_mode(&desc[*seq_size], cipher_mode);
396 set_cipher_config0(&desc[*seq_size], direction);
397 if (flow_mode == S_DIN_to_AES) {
398 if (cc_is_hw_key(tfm)) {
399 set_hw_crypto_key(&desc[*seq_size],
400 ctx_p->hw.key1_slot);
401 } else {
402 set_din_type(&desc[*seq_size], DMA_DLLI,
403 key_dma_addr, ((key_len == 24) ?
404 AES_MAX_KEY_SIZE :
405 key_len), NS_BIT);
406 }
407 set_key_size_aes(&desc[*seq_size], key_len);
408 } else {
409 /*des*/
410 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
411 key_len, NS_BIT);
412 set_key_size_des(&desc[*seq_size], key_len);
413 }
414 set_flow_mode(&desc[*seq_size], flow_mode);
415 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
416 (*seq_size)++;
417 break;
418 case DRV_CIPHER_XTS:
419 case DRV_CIPHER_ESSIV:
420 case DRV_CIPHER_BITLOCKER:
421 /* Load AES key */
422 hw_desc_init(&desc[*seq_size]);
423 set_cipher_mode(&desc[*seq_size], cipher_mode);
424 set_cipher_config0(&desc[*seq_size], direction);
425 if (cc_is_hw_key(tfm)) {
426 set_hw_crypto_key(&desc[*seq_size],
427 ctx_p->hw.key1_slot);
428 } else {
429 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
430 (key_len / 2), NS_BIT);
431 }
432 set_key_size_aes(&desc[*seq_size], (key_len / 2));
433 set_flow_mode(&desc[*seq_size], flow_mode);
434 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
435 (*seq_size)++;
436
437 /* load XEX key */
438 hw_desc_init(&desc[*seq_size]);
439 set_cipher_mode(&desc[*seq_size], cipher_mode);
440 set_cipher_config0(&desc[*seq_size], direction);
441 if (cc_is_hw_key(tfm)) {
442 set_hw_crypto_key(&desc[*seq_size],
443 ctx_p->hw.key2_slot);
444 } else {
445 set_din_type(&desc[*seq_size], DMA_DLLI,
446 (key_dma_addr + (key_len / 2)),
447 (key_len / 2), NS_BIT);
448 }
449 set_xex_data_unit_size(&desc[*seq_size], du_size);
450 set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
451 set_key_size_aes(&desc[*seq_size], (key_len / 2));
452 set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
453 (*seq_size)++;
454
455 /* Set state */
456 hw_desc_init(&desc[*seq_size]);
457 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
458 set_cipher_mode(&desc[*seq_size], cipher_mode);
459 set_cipher_config0(&desc[*seq_size], direction);
460 set_key_size_aes(&desc[*seq_size], (key_len / 2));
461 set_flow_mode(&desc[*seq_size], flow_mode);
462 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
463 CC_AES_BLOCK_SIZE, NS_BIT);
464 (*seq_size)++;
465 break;
466 default:
467 dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
468 }
469}
470
471static void cc_setup_cipher_data(struct crypto_tfm *tfm,
472 struct cipher_req_ctx *req_ctx,
473 struct scatterlist *dst,
474 struct scatterlist *src, unsigned int nbytes,
475 void *areq, struct cc_hw_desc desc[],
476 unsigned int *seq_size)
477{
478 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
479 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
480 unsigned int flow_mode = ctx_p->flow_mode;
481
482 switch (ctx_p->flow_mode) {
483 case S_DIN_to_AES:
484 flow_mode = DIN_AES_DOUT;
485 break;
486 case S_DIN_to_DES:
487 flow_mode = DIN_DES_DOUT;
488 break;
489 default:
490 dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
491 return;
492 }
493 /* Process */
494 if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
495 dev_dbg(dev, " data params addr %pad length 0x%X\n",
496 &sg_dma_address(src), nbytes);
497 dev_dbg(dev, " data params addr %pad length 0x%X\n",
498 &sg_dma_address(dst), nbytes);
499 hw_desc_init(&desc[*seq_size]);
500 set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
501 nbytes, NS_BIT);
502 set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
503 nbytes, NS_BIT, (!areq ? 0 : 1));
504 if (areq)
505 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
506
507 set_flow_mode(&desc[*seq_size], flow_mode);
508 (*seq_size)++;
509 } else {
510 /* bypass */
511 dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
512 &req_ctx->mlli_params.mlli_dma_addr,
513 req_ctx->mlli_params.mlli_len,
514 (unsigned int)ctx_p->drvdata->mlli_sram_addr);
515 hw_desc_init(&desc[*seq_size]);
516 set_din_type(&desc[*seq_size], DMA_DLLI,
517 req_ctx->mlli_params.mlli_dma_addr,
518 req_ctx->mlli_params.mlli_len, NS_BIT);
519 set_dout_sram(&desc[*seq_size],
520 ctx_p->drvdata->mlli_sram_addr,
521 req_ctx->mlli_params.mlli_len);
522 set_flow_mode(&desc[*seq_size], BYPASS);
523 (*seq_size)++;
524
525 hw_desc_init(&desc[*seq_size]);
526 set_din_type(&desc[*seq_size], DMA_MLLI,
527 ctx_p->drvdata->mlli_sram_addr,
528 req_ctx->in_mlli_nents, NS_BIT);
529 if (req_ctx->out_nents == 0) {
530 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
531 (unsigned int)ctx_p->drvdata->mlli_sram_addr,
532 (unsigned int)ctx_p->drvdata->mlli_sram_addr);
533 set_dout_mlli(&desc[*seq_size],
534 ctx_p->drvdata->mlli_sram_addr,
535 req_ctx->in_mlli_nents, NS_BIT,
536 (!areq ? 0 : 1));
537 } else {
538 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
539 (unsigned int)ctx_p->drvdata->mlli_sram_addr,
540 (unsigned int)ctx_p->drvdata->mlli_sram_addr +
541 (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
542 set_dout_mlli(&desc[*seq_size],
543 (ctx_p->drvdata->mlli_sram_addr +
544 (LLI_ENTRY_BYTE_SIZE *
545 req_ctx->in_mlli_nents)),
546 req_ctx->out_mlli_nents, NS_BIT,
547 (!areq ? 0 : 1));
548 }
549 if (areq)
550 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
551
552 set_flow_mode(&desc[*seq_size], flow_mode);
553 (*seq_size)++;
554 }
555}
556
557static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
558{
559 struct skcipher_request *req = (struct skcipher_request *)cc_req;
560 struct scatterlist *dst = req->dst;
561 struct scatterlist *src = req->src;
562 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
563 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
564 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
565
566 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
567 kzfree(req_ctx->iv);
568
569 /*
570 * The crypto API expects us to set the req->iv to the last
571 * ciphertext block. For encrypt, simply copy from the result.
572 * For decrypt, we must copy from a saved buffer since this
573 * could be an in-place decryption operation and the src is
574 * lost by this point.
575 */
576 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
577 memcpy(req->iv, req_ctx->backup_info, ivsize);
578 kzfree(req_ctx->backup_info);
579 } else if (!err) {
580 scatterwalk_map_and_copy(req->iv, req->dst,
581 (req->cryptlen - ivsize),
582 ivsize, 0);
583 }
584
585 skcipher_request_complete(req, err);
586}
587
588static int cc_cipher_process(struct skcipher_request *req,
589 enum drv_crypto_direction direction)
590{
591 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
592 struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
593 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
594 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
595 struct scatterlist *dst = req->dst;
596 struct scatterlist *src = req->src;
597 unsigned int nbytes = req->cryptlen;
598 void *iv = req->iv;
599 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
600 struct device *dev = drvdata_to_dev(ctx_p->drvdata);
601 struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
602 struct cc_crypto_req cc_req = {};
603 int rc, cts_restore_flag = 0;
604 unsigned int seq_len = 0;
605 gfp_t flags = cc_gfp_flags(&req->base);
606
607 dev_dbg(dev, "%s req=%p iv=%p nbytes=%d\n",
608 ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
609 "Encrypt" : "Decrypt"), req, iv, nbytes);
610
611 /* STAT_PHASE_0: Init and sanity checks */
612
613 /* TODO: check data length according to mode */
614 if (validate_data_size(ctx_p, nbytes)) {
615 dev_err(dev, "Unsupported data size %d.\n", nbytes);
616 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
617 rc = -EINVAL;
618 goto exit_process;
619 }
620 if (nbytes == 0) {
621 /* No data to process is valid */
622 rc = 0;
623 goto exit_process;
624 }
625
626 /* The IV we are handed may be allocted from the stack so
627 * we must copy it to a DMAable buffer before use.
628 */
629 req_ctx->iv = kmemdup(iv, ivsize, flags);
630 if (!req_ctx->iv) {
631 rc = -ENOMEM;
632 goto exit_process;
633 }
634
635 /*For CTS in case of data size aligned to 16 use CBC mode*/
636 if (((nbytes % AES_BLOCK_SIZE) == 0) &&
637 ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
638 ctx_p->cipher_mode = DRV_CIPHER_CBC;
639 cts_restore_flag = 1;
640 }
641
642 /* Setup request structure */
643 cc_req.user_cb = (void *)cc_cipher_complete;
644 cc_req.user_arg = (void *)req;
645
646#ifdef ENABLE_CYCLE_COUNT
647 cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
648 STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
649
650#endif
651
652 /* Setup request context */
653 req_ctx->gen_ctx.op_type = direction;
654
655 /* STAT_PHASE_1: Map buffers */
656
657 rc = cc_map_cipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
658 req_ctx->iv, src, dst, flags);
659 if (rc) {
660 dev_err(dev, "map_request() failed\n");
661 goto exit_process;
662 }
663
664 /* STAT_PHASE_2: Create sequence */
665
666 /* Setup processing */
667 cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
668 /* Data processing */
669 cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
670 &seq_len);
671
672 /* do we need to generate IV? */
673 if (req_ctx->is_giv) {
674 cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
675 cc_req.ivgen_dma_addr_len = 1;
676 /* set the IV size (8/16 B long)*/
677 cc_req.ivgen_size = ivsize;
678 }
679
680 /* STAT_PHASE_3: Lock HW and push sequence */
681
682 rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
683 &req->base);
684 if (rc != -EINPROGRESS && rc != -EBUSY) {
685 /* Failed to send the request or request completed
686 * synchronously
687 */
688 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
689 }
690
691exit_process:
692 if (cts_restore_flag)
693 ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
694
695 if (rc != -EINPROGRESS && rc != -EBUSY) {
696 kzfree(req_ctx->backup_info);
697 kzfree(req_ctx->iv);
698 }
699
700 return rc;
701}
702
703static int cc_cipher_encrypt(struct skcipher_request *req)
704{
705 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
706
707 req_ctx->is_giv = false;
708 req_ctx->backup_info = NULL;
709
710 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
711}
712
713static int cc_cipher_decrypt(struct skcipher_request *req)
714{
715 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
716 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
717 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
718 gfp_t flags = cc_gfp_flags(&req->base);
719
720 /*
721 * Allocate and save the last IV sized bytes of the source, which will
722 * be lost in case of in-place decryption and might be needed for CTS.
723 */
724 req_ctx->backup_info = kmalloc(ivsize, flags);
725 if (!req_ctx->backup_info)
726 return -ENOMEM;
727
728 scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
729 (req->cryptlen - ivsize), ivsize, 0);
730 req_ctx->is_giv = false;
731
732 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
733}
734
735/* Block cipher alg */
736static const struct cc_alg_template skcipher_algs[] = {
737 {
738 .name = "xts(aes)",
739 .driver_name = "xts-aes-ccree",
740 .blocksize = AES_BLOCK_SIZE,
741 .template_skcipher = {
742 .setkey = cc_cipher_setkey,
743 .encrypt = cc_cipher_encrypt,
744 .decrypt = cc_cipher_decrypt,
745 .min_keysize = AES_MIN_KEY_SIZE * 2,
746 .max_keysize = AES_MAX_KEY_SIZE * 2,
747 .ivsize = AES_BLOCK_SIZE,
748 },
749 .cipher_mode = DRV_CIPHER_XTS,
750 .flow_mode = S_DIN_to_AES,
751 .min_hw_rev = CC_HW_REV_630,
752 },
753 {
754 .name = "xts512(aes)",
755 .driver_name = "xts-aes-du512-ccree",
756 .blocksize = AES_BLOCK_SIZE,
757 .template_skcipher = {
758 .setkey = cc_cipher_setkey,
759 .encrypt = cc_cipher_encrypt,
760 .decrypt = cc_cipher_decrypt,
761 .min_keysize = AES_MIN_KEY_SIZE * 2,
762 .max_keysize = AES_MAX_KEY_SIZE * 2,
763 .ivsize = AES_BLOCK_SIZE,
764 },
765 .cipher_mode = DRV_CIPHER_XTS,
766 .flow_mode = S_DIN_to_AES,
767 .data_unit = 512,
768 .min_hw_rev = CC_HW_REV_712,
769 },
770 {
771 .name = "xts4096(aes)",
772 .driver_name = "xts-aes-du4096-ccree",
773 .blocksize = AES_BLOCK_SIZE,
774 .template_skcipher = {
775 .setkey = cc_cipher_setkey,
776 .encrypt = cc_cipher_encrypt,
777 .decrypt = cc_cipher_decrypt,
778 .min_keysize = AES_MIN_KEY_SIZE * 2,
779 .max_keysize = AES_MAX_KEY_SIZE * 2,
780 .ivsize = AES_BLOCK_SIZE,
781 },
782 .cipher_mode = DRV_CIPHER_XTS,
783 .flow_mode = S_DIN_to_AES,
784 .data_unit = 4096,
785 .min_hw_rev = CC_HW_REV_712,
786 },
787 {
788 .name = "essiv(aes)",
789 .driver_name = "essiv-aes-ccree",
790 .blocksize = AES_BLOCK_SIZE,
791 .template_skcipher = {
792 .setkey = cc_cipher_setkey,
793 .encrypt = cc_cipher_encrypt,
794 .decrypt = cc_cipher_decrypt,
795 .min_keysize = AES_MIN_KEY_SIZE * 2,
796 .max_keysize = AES_MAX_KEY_SIZE * 2,
797 .ivsize = AES_BLOCK_SIZE,
798 },
799 .cipher_mode = DRV_CIPHER_ESSIV,
800 .flow_mode = S_DIN_to_AES,
801 .min_hw_rev = CC_HW_REV_712,
802 },
803 {
804 .name = "essiv512(aes)",
805 .driver_name = "essiv-aes-du512-ccree",
806 .blocksize = AES_BLOCK_SIZE,
807 .template_skcipher = {
808 .setkey = cc_cipher_setkey,
809 .encrypt = cc_cipher_encrypt,
810 .decrypt = cc_cipher_decrypt,
811 .min_keysize = AES_MIN_KEY_SIZE * 2,
812 .max_keysize = AES_MAX_KEY_SIZE * 2,
813 .ivsize = AES_BLOCK_SIZE,
814 },
815 .cipher_mode = DRV_CIPHER_ESSIV,
816 .flow_mode = S_DIN_to_AES,
817 .data_unit = 512,
818 .min_hw_rev = CC_HW_REV_712,
819 },
820 {
821 .name = "essiv4096(aes)",
822 .driver_name = "essiv-aes-du4096-ccree",
823 .blocksize = AES_BLOCK_SIZE,
824 .template_skcipher = {
825 .setkey = cc_cipher_setkey,
826 .encrypt = cc_cipher_encrypt,
827 .decrypt = cc_cipher_decrypt,
828 .min_keysize = AES_MIN_KEY_SIZE * 2,
829 .max_keysize = AES_MAX_KEY_SIZE * 2,
830 .ivsize = AES_BLOCK_SIZE,
831 },
832 .cipher_mode = DRV_CIPHER_ESSIV,
833 .flow_mode = S_DIN_to_AES,
834 .data_unit = 4096,
835 .min_hw_rev = CC_HW_REV_712,
836 },
837 {
838 .name = "bitlocker(aes)",
839 .driver_name = "bitlocker-aes-ccree",
840 .blocksize = AES_BLOCK_SIZE,
841 .template_skcipher = {
842 .setkey = cc_cipher_setkey,
843 .encrypt = cc_cipher_encrypt,
844 .decrypt = cc_cipher_decrypt,
845 .min_keysize = AES_MIN_KEY_SIZE * 2,
846 .max_keysize = AES_MAX_KEY_SIZE * 2,
847 .ivsize = AES_BLOCK_SIZE,
848 },
849 .cipher_mode = DRV_CIPHER_BITLOCKER,
850 .flow_mode = S_DIN_to_AES,
851 .min_hw_rev = CC_HW_REV_712,
852 },
853 {
854 .name = "bitlocker512(aes)",
855 .driver_name = "bitlocker-aes-du512-ccree",
856 .blocksize = AES_BLOCK_SIZE,
857 .template_skcipher = {
858 .setkey = cc_cipher_setkey,
859 .encrypt = cc_cipher_encrypt,
860 .decrypt = cc_cipher_decrypt,
861 .min_keysize = AES_MIN_KEY_SIZE * 2,
862 .max_keysize = AES_MAX_KEY_SIZE * 2,
863 .ivsize = AES_BLOCK_SIZE,
864 },
865 .cipher_mode = DRV_CIPHER_BITLOCKER,
866 .flow_mode = S_DIN_to_AES,
867 .data_unit = 512,
868 .min_hw_rev = CC_HW_REV_712,
869 },
870 {
871 .name = "bitlocker4096(aes)",
872 .driver_name = "bitlocker-aes-du4096-ccree",
873 .blocksize = AES_BLOCK_SIZE,
874 .template_skcipher = {
875 .setkey = cc_cipher_setkey,
876 .encrypt = cc_cipher_encrypt,
877 .decrypt = cc_cipher_decrypt,
878 .min_keysize = AES_MIN_KEY_SIZE * 2,
879 .max_keysize = AES_MAX_KEY_SIZE * 2,
880 .ivsize = AES_BLOCK_SIZE,
881 },
882 .cipher_mode = DRV_CIPHER_BITLOCKER,
883 .flow_mode = S_DIN_to_AES,
884 .data_unit = 4096,
885 .min_hw_rev = CC_HW_REV_712,
886 },
887 {
888 .name = "ecb(aes)",
889 .driver_name = "ecb-aes-ccree",
890 .blocksize = AES_BLOCK_SIZE,
891 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
892 .template_skcipher = {
893 .setkey = cc_cipher_setkey,
894 .encrypt = cc_cipher_encrypt,
895 .decrypt = cc_cipher_decrypt,
896 .min_keysize = AES_MIN_KEY_SIZE,
897 .max_keysize = AES_MAX_KEY_SIZE,
898 .ivsize = 0,
899 },
900 .cipher_mode = DRV_CIPHER_ECB,
901 .flow_mode = S_DIN_to_AES,
902 .min_hw_rev = CC_HW_REV_630,
903 },
904 {
905 .name = "cbc(aes)",
906 .driver_name = "cbc-aes-ccree",
907 .blocksize = AES_BLOCK_SIZE,
908 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
909 .template_skcipher = {
910 .setkey = cc_cipher_setkey,
911 .encrypt = cc_cipher_encrypt,
912 .decrypt = cc_cipher_decrypt,
913 .min_keysize = AES_MIN_KEY_SIZE,
914 .max_keysize = AES_MAX_KEY_SIZE,
915 .ivsize = AES_BLOCK_SIZE,
916 },
917 .cipher_mode = DRV_CIPHER_CBC,
918 .flow_mode = S_DIN_to_AES,
919 .min_hw_rev = CC_HW_REV_630,
920 },
921 {
922 .name = "ofb(aes)",
923 .driver_name = "ofb-aes-ccree",
924 .blocksize = AES_BLOCK_SIZE,
925 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
926 .template_skcipher = {
927 .setkey = cc_cipher_setkey,
928 .encrypt = cc_cipher_encrypt,
929 .decrypt = cc_cipher_decrypt,
930 .min_keysize = AES_MIN_KEY_SIZE,
931 .max_keysize = AES_MAX_KEY_SIZE,
932 .ivsize = AES_BLOCK_SIZE,
933 },
934 .cipher_mode = DRV_CIPHER_OFB,
935 .flow_mode = S_DIN_to_AES,
936 .min_hw_rev = CC_HW_REV_630,
937 },
938 {
939 .name = "cts1(cbc(aes))",
940 .driver_name = "cts1-cbc-aes-ccree",
941 .blocksize = AES_BLOCK_SIZE,
942 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
943 .template_skcipher = {
944 .setkey = cc_cipher_setkey,
945 .encrypt = cc_cipher_encrypt,
946 .decrypt = cc_cipher_decrypt,
947 .min_keysize = AES_MIN_KEY_SIZE,
948 .max_keysize = AES_MAX_KEY_SIZE,
949 .ivsize = AES_BLOCK_SIZE,
950 },
951 .cipher_mode = DRV_CIPHER_CBC_CTS,
952 .flow_mode = S_DIN_to_AES,
953 .min_hw_rev = CC_HW_REV_630,
954 },
955 {
956 .name = "ctr(aes)",
957 .driver_name = "ctr-aes-ccree",
958 .blocksize = 1,
959 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
960 .template_skcipher = {
961 .setkey = cc_cipher_setkey,
962 .encrypt = cc_cipher_encrypt,
963 .decrypt = cc_cipher_decrypt,
964 .min_keysize = AES_MIN_KEY_SIZE,
965 .max_keysize = AES_MAX_KEY_SIZE,
966 .ivsize = AES_BLOCK_SIZE,
967 },
968 .cipher_mode = DRV_CIPHER_CTR,
969 .flow_mode = S_DIN_to_AES,
970 .min_hw_rev = CC_HW_REV_630,
971 },
972 {
973 .name = "cbc(des3_ede)",
974 .driver_name = "cbc-3des-ccree",
975 .blocksize = DES3_EDE_BLOCK_SIZE,
976 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
977 .template_skcipher = {
978 .setkey = cc_cipher_setkey,
979 .encrypt = cc_cipher_encrypt,
980 .decrypt = cc_cipher_decrypt,
981 .min_keysize = DES3_EDE_KEY_SIZE,
982 .max_keysize = DES3_EDE_KEY_SIZE,
983 .ivsize = DES3_EDE_BLOCK_SIZE,
984 },
985 .cipher_mode = DRV_CIPHER_CBC,
986 .flow_mode = S_DIN_to_DES,
987 .min_hw_rev = CC_HW_REV_630,
988 },
989 {
990 .name = "ecb(des3_ede)",
991 .driver_name = "ecb-3des-ccree",
992 .blocksize = DES3_EDE_BLOCK_SIZE,
993 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
994 .template_skcipher = {
995 .setkey = cc_cipher_setkey,
996 .encrypt = cc_cipher_encrypt,
997 .decrypt = cc_cipher_decrypt,
998 .min_keysize = DES3_EDE_KEY_SIZE,
999 .max_keysize = DES3_EDE_KEY_SIZE,
1000 .ivsize = 0,
1001 },
1002 .cipher_mode = DRV_CIPHER_ECB,
1003 .flow_mode = S_DIN_to_DES,
1004 .min_hw_rev = CC_HW_REV_630,
1005 },
1006 {
1007 .name = "cbc(des)",
1008 .driver_name = "cbc-des-ccree",
1009 .blocksize = DES_BLOCK_SIZE,
1010 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1011 .template_skcipher = {
1012 .setkey = cc_cipher_setkey,
1013 .encrypt = cc_cipher_encrypt,
1014 .decrypt = cc_cipher_decrypt,
1015 .min_keysize = DES_KEY_SIZE,
1016 .max_keysize = DES_KEY_SIZE,
1017 .ivsize = DES_BLOCK_SIZE,
1018 },
1019 .cipher_mode = DRV_CIPHER_CBC,
1020 .flow_mode = S_DIN_to_DES,
1021 .min_hw_rev = CC_HW_REV_630,
1022 },
1023 {
1024 .name = "ecb(des)",
1025 .driver_name = "ecb-des-ccree",
1026 .blocksize = DES_BLOCK_SIZE,
1027 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1028 .template_skcipher = {
1029 .setkey = cc_cipher_setkey,
1030 .encrypt = cc_cipher_encrypt,
1031 .decrypt = cc_cipher_decrypt,
1032 .min_keysize = DES_KEY_SIZE,
1033 .max_keysize = DES_KEY_SIZE,
1034 .ivsize = 0,
1035 },
1036 .cipher_mode = DRV_CIPHER_ECB,
1037 .flow_mode = S_DIN_to_DES,
1038 .min_hw_rev = CC_HW_REV_630,
1039 },
1040};
1041
1042static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
1043 struct device *dev)
1044{
1045 struct cc_crypto_alg *t_alg;
1046 struct skcipher_alg *alg;
1047
1048 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1049 if (!t_alg)
1050 return ERR_PTR(-ENOMEM);
1051
1052 alg = &t_alg->skcipher_alg;
1053
1054 memcpy(alg, &tmpl->template_skcipher, sizeof(*alg));
1055
1056 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1057 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1058 tmpl->driver_name);
1059 alg->base.cra_module = THIS_MODULE;
1060 alg->base.cra_priority = CC_CRA_PRIO;
1061 alg->base.cra_blocksize = tmpl->blocksize;
1062 alg->base.cra_alignmask = 0;
1063 alg->base.cra_ctxsize = sizeof(struct cc_cipher_ctx);
1064
1065 alg->base.cra_init = cc_cipher_init;
1066 alg->base.cra_exit = cc_cipher_exit;
1067 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
1068 CRYPTO_ALG_TYPE_SKCIPHER;
1069
1070 t_alg->cipher_mode = tmpl->cipher_mode;
1071 t_alg->flow_mode = tmpl->flow_mode;
1072 t_alg->data_unit = tmpl->data_unit;
1073
1074 return t_alg;
1075}
1076
1077int cc_cipher_free(struct cc_drvdata *drvdata)
1078{
1079 struct cc_crypto_alg *t_alg, *n;
1080 struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle;
1081
1082 if (cipher_handle) {
1083 /* Remove registered algs */
1084 list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list,
1085 entry) {
1086 crypto_unregister_skcipher(&t_alg->skcipher_alg);
1087 list_del(&t_alg->entry);
1088 kfree(t_alg);
1089 }
1090 kfree(cipher_handle);
1091 drvdata->cipher_handle = NULL;
1092 }
1093 return 0;
1094}
1095
1096int cc_cipher_alloc(struct cc_drvdata *drvdata)
1097{
1098 struct cc_cipher_handle *cipher_handle;
1099 struct cc_crypto_alg *t_alg;
1100 struct device *dev = drvdata_to_dev(drvdata);
1101 int rc = -ENOMEM;
1102 int alg;
1103
1104 cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL);
1105 if (!cipher_handle)
1106 return -ENOMEM;
1107
1108 INIT_LIST_HEAD(&cipher_handle->alg_list);
1109 drvdata->cipher_handle = cipher_handle;
1110
1111 /* Linux crypto */
1112 dev_dbg(dev, "Number of algorithms = %zu\n",
1113 ARRAY_SIZE(skcipher_algs));
1114 for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
1115 if (skcipher_algs[alg].min_hw_rev > drvdata->hw_rev)
1116 continue;
1117
1118 dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
1119 t_alg = cc_create_alg(&skcipher_algs[alg], dev);
1120 if (IS_ERR(t_alg)) {
1121 rc = PTR_ERR(t_alg);
1122 dev_err(dev, "%s alg allocation failed\n",
1123 skcipher_algs[alg].driver_name);
1124 goto fail0;
1125 }
1126 t_alg->drvdata = drvdata;
1127
1128 dev_dbg(dev, "registering %s\n",
1129 skcipher_algs[alg].driver_name);
1130 rc = crypto_register_skcipher(&t_alg->skcipher_alg);
1131 dev_dbg(dev, "%s alg registration rc = %x\n",
1132 t_alg->skcipher_alg.base.cra_driver_name, rc);
1133 if (rc) {
1134 dev_err(dev, "%s alg registration failed\n",
1135 t_alg->skcipher_alg.base.cra_driver_name);
1136 kfree(t_alg);
1137 goto fail0;
1138 } else {
1139 list_add_tail(&t_alg->entry,
1140 &cipher_handle->alg_list);
1141 dev_dbg(dev, "Registered %s\n",
1142 t_alg->skcipher_alg.base.cra_driver_name);
1143 }
1144 }
1145 return 0;
1146
1147fail0:
1148 cc_cipher_free(drvdata);
1149 return rc;
1150}
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
new file mode 100644
index 000000000000..2a2a6f46c515
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -0,0 +1,59 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4/* \file cc_cipher.h
5 * ARM CryptoCell Cipher Crypto API
6 */
7
8#ifndef __CC_CIPHER_H__
9#define __CC_CIPHER_H__
10
11#include <linux/kernel.h>
12#include <crypto/algapi.h>
13#include "cc_driver.h"
14#include "cc_buffer_mgr.h"
15
16/* Crypto cipher flags */
17#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
18#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
19#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
20#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
21#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
22
23#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
24 CC_CRYPTO_CIPHER_KEY_KFDE1 | \
25 CC_CRYPTO_CIPHER_KEY_KFDE2 | \
26 CC_CRYPTO_CIPHER_KEY_KFDE3)
27
28struct cipher_req_ctx {
29 struct async_gen_req_ctx gen_ctx;
30 enum cc_req_dma_buf_type dma_buf_type;
31 u32 in_nents;
32 u32 in_mlli_nents;
33 u32 out_nents;
34 u32 out_mlli_nents;
35 u8 *backup_info; /*store iv for generated IV flow*/
36 u8 *iv;
37 bool is_giv;
38 struct mlli_params mlli_params;
39};
40
41int cc_cipher_alloc(struct cc_drvdata *drvdata);
42
43int cc_cipher_free(struct cc_drvdata *drvdata);
44
45struct arm_hw_key_info {
46 int hw_key1;
47 int hw_key2;
48};
49
50/*
51 * This is a stub function that will replaced when we
52 * implement secure keys
53 */
54static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
55{
56 return false;
57}
58
59#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
new file mode 100644
index 000000000000..e032544f4e31
--- /dev/null
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -0,0 +1,133 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#ifndef _CC_CRYPTO_CTX_H_
5#define _CC_CRYPTO_CTX_H_
6
7#include <linux/types.h>
8
9#define CC_DRV_DES_IV_SIZE 8
10#define CC_DRV_DES_BLOCK_SIZE 8
11
12#define CC_DRV_DES_ONE_KEY_SIZE 8
13#define CC_DRV_DES_DOUBLE_KEY_SIZE 16
14#define CC_DRV_DES_TRIPLE_KEY_SIZE 24
15#define CC_DRV_DES_KEY_SIZE_MAX CC_DRV_DES_TRIPLE_KEY_SIZE
16
17#define CC_AES_IV_SIZE 16
18#define CC_AES_IV_SIZE_WORDS (CC_AES_IV_SIZE >> 2)
19
20#define CC_AES_BLOCK_SIZE 16
21#define CC_AES_BLOCK_SIZE_WORDS 4
22
23#define CC_AES_128_BIT_KEY_SIZE 16
24#define CC_AES_128_BIT_KEY_SIZE_WORDS (CC_AES_128_BIT_KEY_SIZE >> 2)
25#define CC_AES_192_BIT_KEY_SIZE 24
26#define CC_AES_192_BIT_KEY_SIZE_WORDS (CC_AES_192_BIT_KEY_SIZE >> 2)
27#define CC_AES_256_BIT_KEY_SIZE 32
28#define CC_AES_256_BIT_KEY_SIZE_WORDS (CC_AES_256_BIT_KEY_SIZE >> 2)
29#define CC_AES_KEY_SIZE_MAX CC_AES_256_BIT_KEY_SIZE
30#define CC_AES_KEY_SIZE_WORDS_MAX (CC_AES_KEY_SIZE_MAX >> 2)
31
32#define CC_MD5_DIGEST_SIZE 16
33#define CC_SHA1_DIGEST_SIZE 20
34#define CC_SHA224_DIGEST_SIZE 28
35#define CC_SHA256_DIGEST_SIZE 32
36#define CC_SHA256_DIGEST_SIZE_IN_WORDS 8
37#define CC_SHA384_DIGEST_SIZE 48
38#define CC_SHA512_DIGEST_SIZE 64
39
40#define CC_SHA1_BLOCK_SIZE 64
41#define CC_SHA1_BLOCK_SIZE_IN_WORDS 16
42#define CC_MD5_BLOCK_SIZE 64
43#define CC_MD5_BLOCK_SIZE_IN_WORDS 16
44#define CC_SHA224_BLOCK_SIZE 64
45#define CC_SHA256_BLOCK_SIZE 64
46#define CC_SHA256_BLOCK_SIZE_IN_WORDS 16
47#define CC_SHA1_224_256_BLOCK_SIZE 64
48#define CC_SHA384_BLOCK_SIZE 128
49#define CC_SHA512_BLOCK_SIZE 128
50
51#define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE
52#define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/
53
54#define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX
55
56#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
57
58enum drv_engine_type {
59 DRV_ENGINE_NULL = 0,
60 DRV_ENGINE_AES = 1,
61 DRV_ENGINE_DES = 2,
62 DRV_ENGINE_HASH = 3,
63 DRV_ENGINE_RC4 = 4,
64 DRV_ENGINE_DOUT = 5,
65 DRV_ENGINE_RESERVE32B = S32_MAX,
66};
67
68enum drv_crypto_alg {
69 DRV_CRYPTO_ALG_NULL = -1,
70 DRV_CRYPTO_ALG_AES = 0,
71 DRV_CRYPTO_ALG_DES = 1,
72 DRV_CRYPTO_ALG_HASH = 2,
73 DRV_CRYPTO_ALG_C2 = 3,
74 DRV_CRYPTO_ALG_HMAC = 4,
75 DRV_CRYPTO_ALG_AEAD = 5,
76 DRV_CRYPTO_ALG_BYPASS = 6,
77 DRV_CRYPTO_ALG_NUM = 7,
78 DRV_CRYPTO_ALG_RESERVE32B = S32_MAX
79};
80
81enum drv_crypto_direction {
82 DRV_CRYPTO_DIRECTION_NULL = -1,
83 DRV_CRYPTO_DIRECTION_ENCRYPT = 0,
84 DRV_CRYPTO_DIRECTION_DECRYPT = 1,
85 DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3,
86 DRV_CRYPTO_DIRECTION_RESERVE32B = S32_MAX
87};
88
89enum drv_cipher_mode {
90 DRV_CIPHER_NULL_MODE = -1,
91 DRV_CIPHER_ECB = 0,
92 DRV_CIPHER_CBC = 1,
93 DRV_CIPHER_CTR = 2,
94 DRV_CIPHER_CBC_MAC = 3,
95 DRV_CIPHER_XTS = 4,
96 DRV_CIPHER_XCBC_MAC = 5,
97 DRV_CIPHER_OFB = 6,
98 DRV_CIPHER_CMAC = 7,
99 DRV_CIPHER_CCM = 8,
100 DRV_CIPHER_CBC_CTS = 11,
101 DRV_CIPHER_GCTR = 12,
102 DRV_CIPHER_ESSIV = 13,
103 DRV_CIPHER_BITLOCKER = 14,
104 DRV_CIPHER_RESERVE32B = S32_MAX
105};
106
107enum drv_hash_mode {
108 DRV_HASH_NULL = -1,
109 DRV_HASH_SHA1 = 0,
110 DRV_HASH_SHA256 = 1,
111 DRV_HASH_SHA224 = 2,
112 DRV_HASH_SHA512 = 3,
113 DRV_HASH_SHA384 = 4,
114 DRV_HASH_MD5 = 5,
115 DRV_HASH_CBC_MAC = 6,
116 DRV_HASH_XCBC_MAC = 7,
117 DRV_HASH_CMAC = 8,
118 DRV_HASH_MODE_NUM = 9,
119 DRV_HASH_RESERVE32B = S32_MAX
120};
121
122enum drv_hash_hw_mode {
123 DRV_HASH_HW_MD5 = 0,
124 DRV_HASH_HW_SHA1 = 1,
125 DRV_HASH_HW_SHA256 = 2,
126 DRV_HASH_HW_SHA224 = 10,
127 DRV_HASH_HW_SHA512 = 4,
128 DRV_HASH_HW_SHA384 = 12,
129 DRV_HASH_HW_GHASH = 6,
130 DRV_HASH_HW_RESERVE32B = S32_MAX
131};
132
133#endif /* _CC_CRYPTO_CTX_H_ */
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
new file mode 100644
index 000000000000..08f8db489cf0
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -0,0 +1,101 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/debugfs.h>
6#include <linux/stringify.h>
7#include "cc_driver.h"
8#include "cc_crypto_ctx.h"
9#include "cc_debugfs.h"
10
11struct cc_debugfs_ctx {
12 struct dentry *dir;
13};
14
15#define CC_DEBUG_REG(_X) { \
16 .name = __stringify(_X),\
17 .offset = CC_REG(_X) \
18 }
19
20/*
21 * This is a global var for the dentry of the
22 * debugfs ccree/ dir. It is not tied down to
23 * a specific instance of ccree, hence it is
24 * global.
25 */
26static struct dentry *cc_debugfs_dir;
27
28static struct debugfs_reg32 debug_regs[] = {
29 CC_DEBUG_REG(HOST_SIGNATURE),
30 CC_DEBUG_REG(HOST_IRR),
31 CC_DEBUG_REG(HOST_POWER_DOWN_EN),
32 CC_DEBUG_REG(AXIM_MON_ERR),
33 CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT),
34 CC_DEBUG_REG(HOST_IMR),
35 CC_DEBUG_REG(AXIM_CFG),
36 CC_DEBUG_REG(AXIM_CACHE_PARAMS),
37 CC_DEBUG_REG(HOST_VERSION),
38 CC_DEBUG_REG(GPR_HOST),
39 CC_DEBUG_REG(AXIM_MON_COMP),
40};
41
42int __init cc_debugfs_global_init(void)
43{
44 cc_debugfs_dir = debugfs_create_dir("ccree", NULL);
45
46 return !cc_debugfs_dir;
47}
48
49void __exit cc_debugfs_global_fini(void)
50{
51 debugfs_remove(cc_debugfs_dir);
52}
53
54int cc_debugfs_init(struct cc_drvdata *drvdata)
55{
56 struct device *dev = drvdata_to_dev(drvdata);
57 struct cc_debugfs_ctx *ctx;
58 struct debugfs_regset32 *regset;
59 struct dentry *file;
60
61 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
62 if (!ctx)
63 return -ENOMEM;
64
65 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
66 if (!regset)
67 return -ENOMEM;
68
69 regset->regs = debug_regs;
70 regset->nregs = ARRAY_SIZE(debug_regs);
71 regset->base = drvdata->cc_base;
72
73 ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir);
74 if (!ctx->dir)
75 return -ENFILE;
76
77 file = debugfs_create_regset32("regs", 0400, ctx->dir, regset);
78 if (!file) {
79 debugfs_remove(ctx->dir);
80 return -ENFILE;
81 }
82
83 file = debugfs_create_bool("coherent", 0400, ctx->dir,
84 &drvdata->coherent);
85
86 if (!file) {
87 debugfs_remove_recursive(ctx->dir);
88 return -ENFILE;
89 }
90
91 drvdata->debugfs = ctx;
92
93 return 0;
94}
95
96void cc_debugfs_fini(struct cc_drvdata *drvdata)
97{
98 struct cc_debugfs_ctx *ctx = (struct cc_debugfs_ctx *)drvdata->debugfs;
99
100 debugfs_remove_recursive(ctx->dir);
101}
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
new file mode 100644
index 000000000000..5b5320eca7d2
--- /dev/null
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -0,0 +1,32 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#ifndef __CC_DEBUGFS_H__
5#define __CC_DEBUGFS_H__
6
7#ifdef CONFIG_DEBUG_FS
8int cc_debugfs_global_init(void);
9void cc_debugfs_global_fini(void);
10
11int cc_debugfs_init(struct cc_drvdata *drvdata);
12void cc_debugfs_fini(struct cc_drvdata *drvdata);
13
14#else
15
16static inline int cc_debugfs_global_init(void)
17{
18 return 0;
19}
20
21static inline void cc_debugfs_global_fini(void) {}
22
23static inline int cc_debugfs_init(struct cc_drvdata *drvdata)
24{
25 return 0;
26}
27
28static inline void cc_debugfs_fini(struct cc_drvdata *drvdata) {}
29
30#endif
31
32#endif /*__CC_SYSFS_H__*/
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
new file mode 100644
index 000000000000..89ce013ae093
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -0,0 +1,518 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6
7#include <linux/crypto.h>
8#include <linux/moduleparam.h>
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/platform_device.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/of.h>
15#include <linux/clk.h>
16#include <linux/of_address.h>
17
18#include "cc_driver.h"
19#include "cc_request_mgr.h"
20#include "cc_buffer_mgr.h"
21#include "cc_debugfs.h"
22#include "cc_cipher.h"
23#include "cc_aead.h"
24#include "cc_hash.h"
25#include "cc_ivgen.h"
26#include "cc_sram_mgr.h"
27#include "cc_pm.h"
28#include "cc_fips.h"
29
30bool cc_dump_desc;
31module_param_named(dump_desc, cc_dump_desc, bool, 0600);
32MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
33
34bool cc_dump_bytes;
35module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
36MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
37
38struct cc_hw_data {
39 char *name;
40 enum cc_hw_rev rev;
41 u32 sig;
42};
43
44/* Hardware revisions defs. */
45
46static const struct cc_hw_data cc712_hw = {
47 .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U
48};
49
50static const struct cc_hw_data cc710_hw = {
51 .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U
52};
53
54static const struct cc_hw_data cc630p_hw = {
55 .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U
56};
57
58static const struct of_device_id arm_ccree_dev_of_match[] = {
59 { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
60 { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
61 { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
62 {}
63};
64MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
65
66void __dump_byte_array(const char *name, const u8 *buf, size_t len)
67{
68 char prefix[64];
69
70 if (!buf)
71 return;
72
73 snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
74
75 print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
76 len, false);
77}
78
79static irqreturn_t cc_isr(int irq, void *dev_id)
80{
81 struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
82 struct device *dev = drvdata_to_dev(drvdata);
83 u32 irr;
84 u32 imr;
85
86 /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
87
88 /* read the interrupt status */
89 irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
90 dev_dbg(dev, "Got IRR=0x%08X\n", irr);
91 if (irr == 0) { /* Probably shared interrupt line */
92 dev_err(dev, "Got interrupt with empty IRR\n");
93 return IRQ_NONE;
94 }
95 imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
96
97 /* clear interrupt - must be before processing events */
98 cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
99
100 drvdata->irq = irr;
101 /* Completion interrupt - most probable */
102 if (irr & CC_COMP_IRQ_MASK) {
103 /* Mask AXI completion interrupt - will be unmasked in
104 * Deferred service handler
105 */
106 cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
107 irr &= ~CC_COMP_IRQ_MASK;
108 complete_request(drvdata);
109 }
110#ifdef CONFIG_CRYPTO_FIPS
111 /* TEE FIPS interrupt */
112 if (irr & CC_GPR0_IRQ_MASK) {
113 /* Mask interrupt - will be unmasked in Deferred service
114 * handler
115 */
116 cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
117 irr &= ~CC_GPR0_IRQ_MASK;
118 fips_handler(drvdata);
119 }
120#endif
121 /* AXI error interrupt */
122 if (irr & CC_AXI_ERR_IRQ_MASK) {
123 u32 axi_err;
124
125 /* Read the AXI error ID */
126 axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
127 dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
128 axi_err);
129
130 irr &= ~CC_AXI_ERR_IRQ_MASK;
131 }
132
133 if (irr) {
134 dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
135 irr);
136 /* Just warning */
137 }
138
139 return IRQ_HANDLED;
140}
141
142int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
143{
144 unsigned int val, cache_params;
145 struct device *dev = drvdata_to_dev(drvdata);
146
147 /* Unmask all AXI interrupt sources AXI_CFG1 register */
148 val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
149 cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
150 dev_dbg(dev, "AXIM_CFG=0x%08X\n",
151 cc_ioread(drvdata, CC_REG(AXIM_CFG)));
152
153 /* Clear all pending interrupts */
154 val = cc_ioread(drvdata, CC_REG(HOST_IRR));
155 dev_dbg(dev, "IRR=0x%08X\n", val);
156 cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
157
158 /* Unmask relevant interrupt cause */
159 val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
160
161 if (drvdata->hw_rev >= CC_HW_REV_712)
162 val |= CC_GPR0_IRQ_MASK;
163
164 cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val);
165
166 cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
167
168 val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
169
170 if (is_probe)
171 dev_info(dev, "Cache params previous: 0x%08X\n", val);
172
173 cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
174 val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
175
176 if (is_probe)
177 dev_info(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
178 val, cache_params);
179
180 return 0;
181}
182
183static int init_cc_resources(struct platform_device *plat_dev)
184{
185 struct resource *req_mem_cc_regs = NULL;
186 struct cc_drvdata *new_drvdata;
187 struct device *dev = &plat_dev->dev;
188 struct device_node *np = dev->of_node;
189 u32 signature_val;
190 u64 dma_mask;
191 const struct cc_hw_data *hw_rev;
192 const struct of_device_id *dev_id;
193 int rc = 0;
194
195 new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
196 if (!new_drvdata)
197 return -ENOMEM;
198
199 dev_id = of_match_node(arm_ccree_dev_of_match, np);
200 if (!dev_id)
201 return -ENODEV;
202
203 hw_rev = (struct cc_hw_data *)dev_id->data;
204 new_drvdata->hw_rev_name = hw_rev->name;
205 new_drvdata->hw_rev = hw_rev->rev;
206
207 if (hw_rev->rev >= CC_HW_REV_712) {
208 new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
209 new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
210 } else {
211 new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
212 new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
213 }
214
215 platform_set_drvdata(plat_dev, new_drvdata);
216 new_drvdata->plat_dev = plat_dev;
217
218 new_drvdata->clk = of_clk_get(np, 0);
219 new_drvdata->coherent = of_dma_is_coherent(np);
220
221 /* Get device resources */
222 /* First CC registers space */
223 req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
224 /* Map registers space */
225 new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
226 if (IS_ERR(new_drvdata->cc_base)) {
227 dev_err(dev, "Failed to ioremap registers");
228 return PTR_ERR(new_drvdata->cc_base);
229 }
230
231 dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
232 req_mem_cc_regs);
233 dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
234 &req_mem_cc_regs->start, new_drvdata->cc_base);
235
236 /* Then IRQ */
237 new_drvdata->irq = platform_get_irq(plat_dev, 0);
238 if (new_drvdata->irq < 0) {
239 dev_err(dev, "Failed getting IRQ resource\n");
240 return new_drvdata->irq;
241 }
242
243 rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
244 IRQF_SHARED, "ccree", new_drvdata);
245 if (rc) {
246 dev_err(dev, "Could not register to interrupt %d\n",
247 new_drvdata->irq);
248 return rc;
249 }
250 dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
251
252 init_completion(&new_drvdata->hw_queue_avail);
253
254 if (!plat_dev->dev.dma_mask)
255 plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
256
257 dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
258 while (dma_mask > 0x7fffffffUL) {
259 if (dma_supported(&plat_dev->dev, dma_mask)) {
260 rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
261 if (!rc)
262 break;
263 }
264 dma_mask >>= 1;
265 }
266
267 if (rc) {
268 dev_err(dev, "Failed in dma_set_mask, mask=%pad\n", &dma_mask);
269 return rc;
270 }
271
272 rc = cc_clk_on(new_drvdata);
273 if (rc) {
274 dev_err(dev, "Failed to enable clock");
275 return rc;
276 }
277
278 /* Verify correct mapping */
279 signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
280 if (signature_val != hw_rev->sig) {
281 dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
282 signature_val, hw_rev->sig);
283 rc = -EINVAL;
284 goto post_clk_err;
285 }
286 dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
287
288 /* Display HW versions */
289 dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
290 hw_rev->name, cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
291 DRV_MODULE_VERSION);
292
293 rc = init_cc_regs(new_drvdata, true);
294 if (rc) {
295 dev_err(dev, "init_cc_regs failed\n");
296 goto post_clk_err;
297 }
298
299 rc = cc_debugfs_init(new_drvdata);
300 if (rc) {
301 dev_err(dev, "Failed registering debugfs interface\n");
302 goto post_regs_err;
303 }
304
305 rc = cc_fips_init(new_drvdata);
306 if (rc) {
307 dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
308 goto post_debugfs_err;
309 }
310 rc = cc_sram_mgr_init(new_drvdata);
311 if (rc) {
312 dev_err(dev, "cc_sram_mgr_init failed\n");
313 goto post_fips_init_err;
314 }
315
316 new_drvdata->mlli_sram_addr =
317 cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
318 if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
319 dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
320 rc = -ENOMEM;
321 goto post_sram_mgr_err;
322 }
323
324 rc = cc_req_mgr_init(new_drvdata);
325 if (rc) {
326 dev_err(dev, "cc_req_mgr_init failed\n");
327 goto post_sram_mgr_err;
328 }
329
330 rc = cc_buffer_mgr_init(new_drvdata);
331 if (rc) {
332 dev_err(dev, "buffer_mgr_init failed\n");
333 goto post_req_mgr_err;
334 }
335
336 rc = cc_pm_init(new_drvdata);
337 if (rc) {
338 dev_err(dev, "ssi_power_mgr_init failed\n");
339 goto post_buf_mgr_err;
340 }
341
342 rc = cc_ivgen_init(new_drvdata);
343 if (rc) {
344 dev_err(dev, "cc_ivgen_init failed\n");
345 goto post_power_mgr_err;
346 }
347
348 /* Allocate crypto algs */
349 rc = cc_cipher_alloc(new_drvdata);
350 if (rc) {
351 dev_err(dev, "cc_cipher_alloc failed\n");
352 goto post_ivgen_err;
353 }
354
355 /* hash must be allocated before aead since hash exports APIs */
356 rc = cc_hash_alloc(new_drvdata);
357 if (rc) {
358 dev_err(dev, "cc_hash_alloc failed\n");
359 goto post_cipher_err;
360 }
361
362 rc = cc_aead_alloc(new_drvdata);
363 if (rc) {
364 dev_err(dev, "cc_aead_alloc failed\n");
365 goto post_hash_err;
366 }
367
368 /* If we got here and FIPS mode is enabled
369 * it means all FIPS test passed, so let TEE
370 * know we're good.
371 */
372 cc_set_ree_fips_status(new_drvdata, true);
373
374 return 0;
375
376post_hash_err:
377 cc_hash_free(new_drvdata);
378post_cipher_err:
379 cc_cipher_free(new_drvdata);
380post_ivgen_err:
381 cc_ivgen_fini(new_drvdata);
382post_power_mgr_err:
383 cc_pm_fini(new_drvdata);
384post_buf_mgr_err:
385 cc_buffer_mgr_fini(new_drvdata);
386post_req_mgr_err:
387 cc_req_mgr_fini(new_drvdata);
388post_sram_mgr_err:
389 cc_sram_mgr_fini(new_drvdata);
390post_fips_init_err:
391 cc_fips_fini(new_drvdata);
392post_debugfs_err:
393 cc_debugfs_fini(new_drvdata);
394post_regs_err:
395 fini_cc_regs(new_drvdata);
396post_clk_err:
397 cc_clk_off(new_drvdata);
398 return rc;
399}
400
401void fini_cc_regs(struct cc_drvdata *drvdata)
402{
403 /* Mask all interrupts */
404 cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
405}
406
407static void cleanup_cc_resources(struct platform_device *plat_dev)
408{
409 struct cc_drvdata *drvdata =
410 (struct cc_drvdata *)platform_get_drvdata(plat_dev);
411
412 cc_aead_free(drvdata);
413 cc_hash_free(drvdata);
414 cc_cipher_free(drvdata);
415 cc_ivgen_fini(drvdata);
416 cc_pm_fini(drvdata);
417 cc_buffer_mgr_fini(drvdata);
418 cc_req_mgr_fini(drvdata);
419 cc_sram_mgr_fini(drvdata);
420 cc_fips_fini(drvdata);
421 cc_debugfs_fini(drvdata);
422 fini_cc_regs(drvdata);
423 cc_clk_off(drvdata);
424}
425
426int cc_clk_on(struct cc_drvdata *drvdata)
427{
428 struct clk *clk = drvdata->clk;
429 int rc;
430
431 if (IS_ERR(clk))
432 /* Not all devices have a clock associated with CCREE */
433 return 0;
434
435 rc = clk_prepare_enable(clk);
436 if (rc)
437 return rc;
438
439 return 0;
440}
441
442void cc_clk_off(struct cc_drvdata *drvdata)
443{
444 struct clk *clk = drvdata->clk;
445
446 if (IS_ERR(clk))
447 /* Not all devices have a clock associated with CCREE */
448 return;
449
450 clk_disable_unprepare(clk);
451}
452
453static int ccree_probe(struct platform_device *plat_dev)
454{
455 int rc;
456 struct device *dev = &plat_dev->dev;
457
458 /* Map registers space */
459 rc = init_cc_resources(plat_dev);
460 if (rc)
461 return rc;
462
463 dev_info(dev, "ARM ccree device initialized\n");
464
465 return 0;
466}
467
468static int ccree_remove(struct platform_device *plat_dev)
469{
470 struct device *dev = &plat_dev->dev;
471
472 dev_dbg(dev, "Releasing ccree resources...\n");
473
474 cleanup_cc_resources(plat_dev);
475
476 dev_info(dev, "ARM ccree device terminated\n");
477
478 return 0;
479}
480
481static struct platform_driver ccree_driver = {
482 .driver = {
483 .name = "ccree",
484 .of_match_table = arm_ccree_dev_of_match,
485#ifdef CONFIG_PM
486 .pm = &ccree_pm,
487#endif
488 },
489 .probe = ccree_probe,
490 .remove = ccree_remove,
491};
492
493static int __init ccree_init(void)
494{
495 int ret;
496
497 cc_hash_global_init();
498
499 ret = cc_debugfs_global_init();
500 if (ret)
501 return ret;
502
503 return platform_driver_register(&ccree_driver);
504}
505module_init(ccree_init);
506
507static void __exit ccree_exit(void)
508{
509 platform_driver_unregister(&ccree_driver);
510 cc_debugfs_global_fini();
511}
512module_exit(ccree_exit);
513
514/* Module description */
515MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
516MODULE_VERSION(DRV_MODULE_VERSION);
517MODULE_AUTHOR("ARM");
518MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
new file mode 100644
index 000000000000..2048fdeb9579
--- /dev/null
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -0,0 +1,208 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4/* \file cc_driver.h
5 * ARM CryptoCell Linux Crypto Driver
6 */
7
8#ifndef __CC_DRIVER_H__
9#define __CC_DRIVER_H__
10
11#ifdef COMP_IN_WQ
12#include <linux/workqueue.h>
13#else
14#include <linux/interrupt.h>
15#endif
16#include <linux/dma-mapping.h>
17#include <crypto/algapi.h>
18#include <crypto/internal/skcipher.h>
19#include <crypto/aes.h>
20#include <crypto/sha.h>
21#include <crypto/aead.h>
22#include <crypto/authenc.h>
23#include <crypto/hash.h>
24#include <crypto/skcipher.h>
25#include <linux/version.h>
26#include <linux/clk.h>
27#include <linux/platform_device.h>
28
29/* Registers definitions from shared/hw/ree_include */
30#include "cc_host_regs.h"
31#define CC_DEV_SHA_MAX 512
32#include "cc_crypto_ctx.h"
33#include "cc_hw_queue_defs.h"
34#include "cc_sram_mgr.h"
35
36extern bool cc_dump_desc;
37extern bool cc_dump_bytes;
38
39#define DRV_MODULE_VERSION "4.0"
40
41enum cc_hw_rev {
42 CC_HW_REV_630 = 630,
43 CC_HW_REV_710 = 710,
44 CC_HW_REV_712 = 712
45};
46
47#define CC_COHERENT_CACHE_PARAMS 0xEEE
48
49/* Maximum DMA mask supported by IP */
50#define DMA_BIT_MASK_LEN 48
51
52#define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
53 (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
54 (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
55 (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
56
57#define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
58
59#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
60
61#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
62 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
63 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
64
65/* Register name mangling macro */
66#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
67
68/* TEE FIPS status interrupt */
69#define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
70
71#define CC_CRA_PRIO 400
72
73#define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
74
75#define MAX_REQUEST_QUEUE_SIZE 4096
76#define MAX_MLLI_BUFF_SIZE 2080
77#define MAX_ICV_NENTS_SUPPORTED 2
78
79/* Definitions for HW descriptors DIN/DOUT fields */
80#define NS_BIT 1
81#define AXI_ID 0
82/* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
83 * field in the HW descriptor. The DMA engine +8 that value.
84 */
85
86#define CC_MAX_IVGEN_DMA_ADDRESSES 3
87struct cc_crypto_req {
88 void (*user_cb)(struct device *dev, void *req, int err);
89 void *user_arg;
90 dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
91 /* For the first 'ivgen_dma_addr_len' addresses of this array,
92 * generated IV would be placed in it by send_request().
93 * Same generated IV for all addresses!
94 */
95 /* Amount of 'ivgen_dma_addr' elements to be filled. */
96 unsigned int ivgen_dma_addr_len;
97 /* The generated IV size required, 8/16 B allowed. */
98 unsigned int ivgen_size;
99 struct completion seq_compl; /* request completion */
100};
101
102/**
103 * struct cc_drvdata - driver private data context
104 * @cc_base: virt address of the CC registers
105 * @irq: device IRQ number
106 * @irq_mask: Interrupt mask shadow (1 for masked interrupts)
107 * @fw_ver: SeP loaded firmware version
108 */
109struct cc_drvdata {
110 void __iomem *cc_base;
111 int irq;
112 u32 irq_mask;
113 u32 fw_ver;
114 struct completion hw_queue_avail; /* wait for HW queue availability */
115 struct platform_device *plat_dev;
116 cc_sram_addr_t mlli_sram_addr;
117 void *buff_mgr_handle;
118 void *cipher_handle;
119 void *hash_handle;
120 void *aead_handle;
121 void *request_mgr_handle;
122 void *fips_handle;
123 void *ivgen_handle;
124 void *sram_mgr_handle;
125 void *debugfs;
126 struct clk *clk;
127 bool coherent;
128 char *hw_rev_name;
129 enum cc_hw_rev hw_rev;
130 u32 hash_len_sz;
131 u32 axim_mon_offset;
132};
133
134struct cc_crypto_alg {
135 struct list_head entry;
136 int cipher_mode;
137 int flow_mode; /* Note: currently, refers to the cipher mode only. */
138 int auth_mode;
139 unsigned int data_unit;
140 struct cc_drvdata *drvdata;
141 struct skcipher_alg skcipher_alg;
142 struct aead_alg aead_alg;
143};
144
145struct cc_alg_template {
146 char name[CRYPTO_MAX_ALG_NAME];
147 char driver_name[CRYPTO_MAX_ALG_NAME];
148 unsigned int blocksize;
149 u32 type;
150 union {
151 struct skcipher_alg skcipher;
152 struct aead_alg aead;
153 } template_u;
154 int cipher_mode;
155 int flow_mode; /* Note: currently, refers to the cipher mode only. */
156 int auth_mode;
157 u32 min_hw_rev;
158 unsigned int data_unit;
159 struct cc_drvdata *drvdata;
160};
161
162struct async_gen_req_ctx {
163 dma_addr_t iv_dma_addr;
164 enum drv_crypto_direction op_type;
165};
166
167static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
168{
169 return &drvdata->plat_dev->dev;
170}
171
172void __dump_byte_array(const char *name, const u8 *buf, size_t len);
173static inline void dump_byte_array(const char *name, const u8 *the_array,
174 size_t size)
175{
176 if (cc_dump_bytes)
177 __dump_byte_array(name, the_array, size);
178}
179
180int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
181void fini_cc_regs(struct cc_drvdata *drvdata);
182int cc_clk_on(struct cc_drvdata *drvdata);
183void cc_clk_off(struct cc_drvdata *drvdata);
184
185static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
186{
187 iowrite32(val, (drvdata->cc_base + reg));
188}
189
190static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
191{
192 return ioread32(drvdata->cc_base + reg);
193}
194
195static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
196{
197 return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
198 GFP_KERNEL : GFP_ATOMIC;
199}
200
201static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
202 struct cc_hw_desc *pdesc)
203{
204 if (drvdata->hw_rev >= CC_HW_REV_712)
205 set_queue_last_ind_bit(pdesc);
206}
207
208#endif /*__CC_DRIVER_H__*/
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
new file mode 100644
index 000000000000..b4d0a6d983e0
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -0,0 +1,120 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/fips.h>
6
7#include "cc_driver.h"
8#include "cc_fips.h"
9
10static void fips_dsr(unsigned long devarg);
11
12struct cc_fips_handle {
13 struct tasklet_struct tasklet;
14};
15
16/* The function called once at driver entry point to check
17 * whether TEE FIPS error occurred.
18 */
19static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
20{
21 u32 reg;
22
23 reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
24 return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
25}
26
27/*
28 * This function should push the FIPS REE library status towards the TEE library
29 * by writing the error state to HOST_GPR0 register.
30 */
31void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
32{
33 int val = CC_FIPS_SYNC_REE_STATUS;
34
35 if (drvdata->hw_rev < CC_HW_REV_712)
36 return;
37
38 val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
39
40 cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
41}
42
43void cc_fips_fini(struct cc_drvdata *drvdata)
44{
45 struct cc_fips_handle *fips_h = drvdata->fips_handle;
46
47 if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h)
48 return;
49
50 /* Kill tasklet */
51 tasklet_kill(&fips_h->tasklet);
52
53 kfree(fips_h);
54 drvdata->fips_handle = NULL;
55}
56
57void fips_handler(struct cc_drvdata *drvdata)
58{
59 struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle;
60
61 if (drvdata->hw_rev < CC_HW_REV_712)
62 return;
63
64 tasklet_schedule(&fips_handle_ptr->tasklet);
65}
66
67static inline void tee_fips_error(struct device *dev)
68{
69 if (fips_enabled)
70 panic("ccree: TEE reported cryptographic error in fips mode!\n");
71 else
72 dev_err(dev, "TEE reported error!\n");
73}
74
75/* Deferred service handler, run as interrupt-fired tasklet */
76static void fips_dsr(unsigned long devarg)
77{
78 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
79 struct device *dev = drvdata_to_dev(drvdata);
80 u32 irq, state, val;
81
82 irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
83
84 if (irq) {
85 state = cc_ioread(drvdata, CC_REG(GPR_HOST));
86
87 if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
88 tee_fips_error(dev);
89 }
90
91 /* after verifing that there is nothing to do,
92 * unmask AXI completion interrupt.
93 */
94 val = (CC_REG(HOST_IMR) & ~irq);
95 cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
96}
97
98/* The function called once at driver entry point .*/
99int cc_fips_init(struct cc_drvdata *p_drvdata)
100{
101 struct cc_fips_handle *fips_h;
102 struct device *dev = drvdata_to_dev(p_drvdata);
103
104 if (p_drvdata->hw_rev < CC_HW_REV_712)
105 return 0;
106
107 fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
108 if (!fips_h)
109 return -ENOMEM;
110
111 p_drvdata->fips_handle = fips_h;
112
113 dev_dbg(dev, "Initializing fips tasklet\n");
114 tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
115
116 if (!cc_get_tee_fips_status(p_drvdata))
117 tee_fips_error(dev);
118
119 return 0;
120}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
new file mode 100644
index 000000000000..645e096a7a82
--- /dev/null
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -0,0 +1,36 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#ifndef __CC_FIPS_H__
5#define __CC_FIPS_H__
6
7#ifdef CONFIG_CRYPTO_FIPS
8
9enum cc_fips_status {
10 CC_FIPS_SYNC_MODULE_OK = 0x0,
11 CC_FIPS_SYNC_MODULE_ERROR = 0x1,
12 CC_FIPS_SYNC_REE_STATUS = 0x4,
13 CC_FIPS_SYNC_TEE_STATUS = 0x8,
14 CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
15};
16
17int cc_fips_init(struct cc_drvdata *p_drvdata);
18void cc_fips_fini(struct cc_drvdata *drvdata);
19void fips_handler(struct cc_drvdata *drvdata);
20void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
21
22#else /* CONFIG_CRYPTO_FIPS */
23
24static inline int cc_fips_init(struct cc_drvdata *p_drvdata)
25{
26 return 0;
27}
28
29static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
30static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
31 bool ok) {}
32static inline void fips_handler(struct cc_drvdata *drvdata) {}
33
34#endif /* CONFIG_CRYPTO_FIPS */
35
36#endif /*__CC_FIPS_H__*/
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
new file mode 100644
index 000000000000..96ff777474d7
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -0,0 +1,2296 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <crypto/algapi.h>
7#include <crypto/hash.h>
8#include <crypto/md5.h>
9#include <crypto/internal/hash.h>
10
11#include "cc_driver.h"
12#include "cc_request_mgr.h"
13#include "cc_buffer_mgr.h"
14#include "cc_hash.h"
15#include "cc_sram_mgr.h"
16
17#define CC_MAX_HASH_SEQ_LEN 12
18#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
19
20struct cc_hash_handle {
21 cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
22 cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
23 struct list_head hash_list;
24};
25
26static const u32 digest_len_init[] = {
27 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
28static const u32 md5_init[] = {
29 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
30static const u32 sha1_init[] = {
31 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32static const u32 sha224_init[] = {
33 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
34 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
35static const u32 sha256_init[] = {
36 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
37 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
38static const u32 digest_len_sha512_init[] = {
39 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
40static u64 sha384_init[] = {
41 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
42 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
43static u64 sha512_init[] = {
44 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
45 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
46
47static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
48 unsigned int *seq_size);
49
50static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
51 unsigned int *seq_size);
52
53static const void *cc_larval_digest(struct device *dev, u32 mode);
54
55struct cc_hash_alg {
56 struct list_head entry;
57 int hash_mode;
58 int hw_mode;
59 int inter_digestsize;
60 struct cc_drvdata *drvdata;
61 struct ahash_alg ahash_alg;
62};
63
64struct hash_key_req_ctx {
65 u32 keylen;
66 dma_addr_t key_dma_addr;
67};
68
69/* hash per-session context */
70struct cc_hash_ctx {
71 struct cc_drvdata *drvdata;
72 /* holds the origin digest; the digest after "setkey" if HMAC,*
73 * the initial digest if HASH.
74 */
75 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
76 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
77
78 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
79 dma_addr_t digest_buff_dma_addr;
80 /* use for hmac with key large then mode block size */
81 struct hash_key_req_ctx key_params;
82 int hash_mode;
83 int hw_mode;
84 int inter_digestsize;
85 struct completion setkey_comp;
86 bool is_hmac;
87};
88
89static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
90 unsigned int flow_mode, struct cc_hw_desc desc[],
91 bool is_not_last_data, unsigned int *seq_size);
92
93static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
94{
95 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
96 mode == DRV_HASH_SHA512) {
97 set_bytes_swap(desc, 1);
98 } else {
99 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
100 }
101}
102
103static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
104 unsigned int digestsize)
105{
106 state->digest_result_dma_addr =
107 dma_map_single(dev, state->digest_result_buff,
108 digestsize, DMA_BIDIRECTIONAL);
109 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
110 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
111 digestsize);
112 return -ENOMEM;
113 }
114 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
115 digestsize, state->digest_result_buff,
116 &state->digest_result_dma_addr);
117
118 return 0;
119}
120
121static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
122 struct cc_hash_ctx *ctx)
123{
124 bool is_hmac = ctx->is_hmac;
125
126 memset(state, 0, sizeof(*state));
127
128 if (is_hmac) {
129 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
130 ctx->hw_mode != DRV_CIPHER_CMAC) {
131 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
132 ctx->inter_digestsize,
133 DMA_BIDIRECTIONAL);
134
135 memcpy(state->digest_buff, ctx->digest_buff,
136 ctx->inter_digestsize);
137 if (ctx->hash_mode == DRV_HASH_SHA512 ||
138 ctx->hash_mode == DRV_HASH_SHA384)
139 memcpy(state->digest_bytes_len,
140 digest_len_sha512_init,
141 ctx->drvdata->hash_len_sz);
142 else
143 memcpy(state->digest_bytes_len, digest_len_init,
144 ctx->drvdata->hash_len_sz);
145 }
146
147 if (ctx->hash_mode != DRV_HASH_NULL) {
148 dma_sync_single_for_cpu(dev,
149 ctx->opad_tmp_keys_dma_addr,
150 ctx->inter_digestsize,
151 DMA_BIDIRECTIONAL);
152 memcpy(state->opad_digest_buff,
153 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
154 }
155 } else { /*hash*/
156 /* Copy the initial digests if hash flow. */
157 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
158
159 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
160 }
161}
162
163static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
164 struct cc_hash_ctx *ctx)
165{
166 bool is_hmac = ctx->is_hmac;
167
168 state->digest_buff_dma_addr =
169 dma_map_single(dev, state->digest_buff,
170 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
171 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
172 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
173 ctx->inter_digestsize, state->digest_buff);
174 return -EINVAL;
175 }
176 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
177 ctx->inter_digestsize, state->digest_buff,
178 &state->digest_buff_dma_addr);
179
180 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
181 state->digest_bytes_len_dma_addr =
182 dma_map_single(dev, state->digest_bytes_len,
183 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
184 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
185 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
186 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
187 goto unmap_digest_buf;
188 }
189 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
190 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
191 &state->digest_bytes_len_dma_addr);
192 }
193
194 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
195 state->opad_digest_dma_addr =
196 dma_map_single(dev, state->opad_digest_buff,
197 ctx->inter_digestsize,
198 DMA_BIDIRECTIONAL);
199 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
200 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
201 ctx->inter_digestsize,
202 state->opad_digest_buff);
203 goto unmap_digest_len;
204 }
205 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
206 ctx->inter_digestsize, state->opad_digest_buff,
207 &state->opad_digest_dma_addr);
208 }
209
210 return 0;
211
212unmap_digest_len:
213 if (state->digest_bytes_len_dma_addr) {
214 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
215 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
216 state->digest_bytes_len_dma_addr = 0;
217 }
218unmap_digest_buf:
219 if (state->digest_buff_dma_addr) {
220 dma_unmap_single(dev, state->digest_buff_dma_addr,
221 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
222 state->digest_buff_dma_addr = 0;
223 }
224
225 return -EINVAL;
226}
227
228static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
229 struct cc_hash_ctx *ctx)
230{
231 if (state->digest_buff_dma_addr) {
232 dma_unmap_single(dev, state->digest_buff_dma_addr,
233 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
234 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
235 &state->digest_buff_dma_addr);
236 state->digest_buff_dma_addr = 0;
237 }
238 if (state->digest_bytes_len_dma_addr) {
239 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
240 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
241 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
242 &state->digest_bytes_len_dma_addr);
243 state->digest_bytes_len_dma_addr = 0;
244 }
245 if (state->opad_digest_dma_addr) {
246 dma_unmap_single(dev, state->opad_digest_dma_addr,
247 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
248 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
249 &state->opad_digest_dma_addr);
250 state->opad_digest_dma_addr = 0;
251 }
252}
253
254static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
255 unsigned int digestsize, u8 *result)
256{
257 if (state->digest_result_dma_addr) {
258 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
259 DMA_BIDIRECTIONAL);
260 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
261 state->digest_result_buff,
262 &state->digest_result_dma_addr, digestsize);
263 memcpy(result, state->digest_result_buff, digestsize);
264 }
265 state->digest_result_dma_addr = 0;
266}
267
268static void cc_update_complete(struct device *dev, void *cc_req, int err)
269{
270 struct ahash_request *req = (struct ahash_request *)cc_req;
271 struct ahash_req_ctx *state = ahash_request_ctx(req);
272 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
273 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
274
275 dev_dbg(dev, "req=%pK\n", req);
276
277 cc_unmap_hash_request(dev, state, req->src, false);
278 cc_unmap_req(dev, state, ctx);
279 req->base.complete(&req->base, err);
280}
281
282static void cc_digest_complete(struct device *dev, void *cc_req, int err)
283{
284 struct ahash_request *req = (struct ahash_request *)cc_req;
285 struct ahash_req_ctx *state = ahash_request_ctx(req);
286 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
287 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
288 u32 digestsize = crypto_ahash_digestsize(tfm);
289
290 dev_dbg(dev, "req=%pK\n", req);
291
292 cc_unmap_hash_request(dev, state, req->src, false);
293 cc_unmap_result(dev, state, digestsize, req->result);
294 cc_unmap_req(dev, state, ctx);
295 req->base.complete(&req->base, err);
296}
297
298static void cc_hash_complete(struct device *dev, void *cc_req, int err)
299{
300 struct ahash_request *req = (struct ahash_request *)cc_req;
301 struct ahash_req_ctx *state = ahash_request_ctx(req);
302 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
303 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
304 u32 digestsize = crypto_ahash_digestsize(tfm);
305
306 dev_dbg(dev, "req=%pK\n", req);
307
308 cc_unmap_hash_request(dev, state, req->src, false);
309 cc_unmap_result(dev, state, digestsize, req->result);
310 cc_unmap_req(dev, state, ctx);
311 req->base.complete(&req->base, err);
312}
313
314static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
315 int idx)
316{
317 struct ahash_req_ctx *state = ahash_request_ctx(req);
318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
320 u32 digestsize = crypto_ahash_digestsize(tfm);
321
322 /* Get final MAC result */
323 hw_desc_init(&desc[idx]);
324 set_cipher_mode(&desc[idx], ctx->hw_mode);
325 /* TODO */
326 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
327 NS_BIT, 1);
328 set_queue_last_ind(ctx->drvdata, &desc[idx]);
329 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
330 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
331 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
332 cc_set_endianity(ctx->hash_mode, &desc[idx]);
333 idx++;
334
335 return idx;
336}
337
338static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
339 int idx)
340{
341 struct ahash_req_ctx *state = ahash_request_ctx(req);
342 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
343 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
344 u32 digestsize = crypto_ahash_digestsize(tfm);
345
346 /* store the hash digest result in the context */
347 hw_desc_init(&desc[idx]);
348 set_cipher_mode(&desc[idx], ctx->hw_mode);
349 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
350 NS_BIT, 0);
351 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
352 cc_set_endianity(ctx->hash_mode, &desc[idx]);
353 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
354 idx++;
355
356 /* Loading hash opad xor key state */
357 hw_desc_init(&desc[idx]);
358 set_cipher_mode(&desc[idx], ctx->hw_mode);
359 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
360 ctx->inter_digestsize, NS_BIT);
361 set_flow_mode(&desc[idx], S_DIN_to_HASH);
362 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
363 idx++;
364
365 /* Load the hash current length */
366 hw_desc_init(&desc[idx]);
367 set_cipher_mode(&desc[idx], ctx->hw_mode);
368 set_din_sram(&desc[idx],
369 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
370 ctx->drvdata->hash_len_sz);
371 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
372 set_flow_mode(&desc[idx], S_DIN_to_HASH);
373 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
374 idx++;
375
376 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
377 hw_desc_init(&desc[idx]);
378 set_din_no_dma(&desc[idx], 0, 0xfffff0);
379 set_dout_no_dma(&desc[idx], 0, 0, 1);
380 idx++;
381
382 /* Perform HASH update */
383 hw_desc_init(&desc[idx]);
384 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
385 digestsize, NS_BIT);
386 set_flow_mode(&desc[idx], DIN_HASH);
387 idx++;
388
389 return idx;
390}
391
392static int cc_hash_digest(struct ahash_request *req)
393{
394 struct ahash_req_ctx *state = ahash_request_ctx(req);
395 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
396 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
397 u32 digestsize = crypto_ahash_digestsize(tfm);
398 struct scatterlist *src = req->src;
399 unsigned int nbytes = req->nbytes;
400 u8 *result = req->result;
401 struct device *dev = drvdata_to_dev(ctx->drvdata);
402 bool is_hmac = ctx->is_hmac;
403 struct cc_crypto_req cc_req = {};
404 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
405 cc_sram_addr_t larval_digest_addr =
406 cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
407 int idx = 0;
408 int rc = 0;
409 gfp_t flags = cc_gfp_flags(&req->base);
410
411 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
412 nbytes);
413
414 cc_init_req(dev, state, ctx);
415
416 if (cc_map_req(dev, state, ctx)) {
417 dev_err(dev, "map_ahash_source() failed\n");
418 return -ENOMEM;
419 }
420
421 if (cc_map_result(dev, state, digestsize)) {
422 dev_err(dev, "map_ahash_digest() failed\n");
423 cc_unmap_req(dev, state, ctx);
424 return -ENOMEM;
425 }
426
427 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
428 flags)) {
429 dev_err(dev, "map_ahash_request_final() failed\n");
430 cc_unmap_result(dev, state, digestsize, result);
431 cc_unmap_req(dev, state, ctx);
432 return -ENOMEM;
433 }
434
435 /* Setup request structure */
436 cc_req.user_cb = cc_digest_complete;
437 cc_req.user_arg = req;
438
439 /* If HMAC then load hash IPAD xor key, if HASH then load initial
440 * digest
441 */
442 hw_desc_init(&desc[idx]);
443 set_cipher_mode(&desc[idx], ctx->hw_mode);
444 if (is_hmac) {
445 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
446 ctx->inter_digestsize, NS_BIT);
447 } else {
448 set_din_sram(&desc[idx], larval_digest_addr,
449 ctx->inter_digestsize);
450 }
451 set_flow_mode(&desc[idx], S_DIN_to_HASH);
452 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
453 idx++;
454
455 /* Load the hash current length */
456 hw_desc_init(&desc[idx]);
457 set_cipher_mode(&desc[idx], ctx->hw_mode);
458
459 if (is_hmac) {
460 set_din_type(&desc[idx], DMA_DLLI,
461 state->digest_bytes_len_dma_addr,
462 ctx->drvdata->hash_len_sz, NS_BIT);
463 } else {
464 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
465 if (nbytes)
466 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
467 else
468 set_cipher_do(&desc[idx], DO_PAD);
469 }
470 set_flow_mode(&desc[idx], S_DIN_to_HASH);
471 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
472 idx++;
473
474 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
475
476 if (is_hmac) {
477 /* HW last hash block padding (aka. "DO_PAD") */
478 hw_desc_init(&desc[idx]);
479 set_cipher_mode(&desc[idx], ctx->hw_mode);
480 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
481 ctx->drvdata->hash_len_sz, NS_BIT, 0);
482 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
483 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
484 set_cipher_do(&desc[idx], DO_PAD);
485 idx++;
486
487 idx = cc_fin_hmac(desc, req, idx);
488 }
489
490 idx = cc_fin_result(desc, req, idx);
491
492 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
493 if (rc != -EINPROGRESS && rc != -EBUSY) {
494 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
495 cc_unmap_hash_request(dev, state, src, true);
496 cc_unmap_result(dev, state, digestsize, result);
497 cc_unmap_req(dev, state, ctx);
498 }
499 return rc;
500}
501
502static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
503 struct ahash_req_ctx *state, unsigned int idx)
504{
505 /* Restore hash digest */
506 hw_desc_init(&desc[idx]);
507 set_cipher_mode(&desc[idx], ctx->hw_mode);
508 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
509 ctx->inter_digestsize, NS_BIT);
510 set_flow_mode(&desc[idx], S_DIN_to_HASH);
511 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
512 idx++;
513
514 /* Restore hash current length */
515 hw_desc_init(&desc[idx]);
516 set_cipher_mode(&desc[idx], ctx->hw_mode);
517 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
518 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
519 ctx->drvdata->hash_len_sz, NS_BIT);
520 set_flow_mode(&desc[idx], S_DIN_to_HASH);
521 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
522 idx++;
523
524 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
525
526 return idx;
527}
528
529static int cc_hash_update(struct ahash_request *req)
530{
531 struct ahash_req_ctx *state = ahash_request_ctx(req);
532 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
533 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
534 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
535 struct scatterlist *src = req->src;
536 unsigned int nbytes = req->nbytes;
537 struct device *dev = drvdata_to_dev(ctx->drvdata);
538 struct cc_crypto_req cc_req = {};
539 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
540 u32 idx = 0;
541 int rc;
542 gfp_t flags = cc_gfp_flags(&req->base);
543
544 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
545 "hmac" : "hash", nbytes);
546
547 if (nbytes == 0) {
548 /* no real updates required */
549 return 0;
550 }
551
552 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
553 block_size, flags);
554 if (rc) {
555 if (rc == 1) {
556 dev_dbg(dev, " data size not require HW update %x\n",
557 nbytes);
558 /* No hardware updates are required */
559 return 0;
560 }
561 dev_err(dev, "map_ahash_request_update() failed\n");
562 return -ENOMEM;
563 }
564
565 if (cc_map_req(dev, state, ctx)) {
566 dev_err(dev, "map_ahash_source() failed\n");
567 cc_unmap_hash_request(dev, state, src, true);
568 return -EINVAL;
569 }
570
571 /* Setup request structure */
572 cc_req.user_cb = cc_update_complete;
573 cc_req.user_arg = req;
574
575 idx = cc_restore_hash(desc, ctx, state, idx);
576
577 /* store the hash digest result in context */
578 hw_desc_init(&desc[idx]);
579 set_cipher_mode(&desc[idx], ctx->hw_mode);
580 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
581 ctx->inter_digestsize, NS_BIT, 0);
582 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
583 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
584 idx++;
585
586 /* store current hash length in context */
587 hw_desc_init(&desc[idx]);
588 set_cipher_mode(&desc[idx], ctx->hw_mode);
589 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
590 ctx->drvdata->hash_len_sz, NS_BIT, 1);
591 set_queue_last_ind(ctx->drvdata, &desc[idx]);
592 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
593 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
594 idx++;
595
596 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
597 if (rc != -EINPROGRESS && rc != -EBUSY) {
598 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
599 cc_unmap_hash_request(dev, state, src, true);
600 cc_unmap_req(dev, state, ctx);
601 }
602 return rc;
603}
604
605static int cc_hash_finup(struct ahash_request *req)
606{
607 struct ahash_req_ctx *state = ahash_request_ctx(req);
608 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
609 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
610 u32 digestsize = crypto_ahash_digestsize(tfm);
611 struct scatterlist *src = req->src;
612 unsigned int nbytes = req->nbytes;
613 u8 *result = req->result;
614 struct device *dev = drvdata_to_dev(ctx->drvdata);
615 bool is_hmac = ctx->is_hmac;
616 struct cc_crypto_req cc_req = {};
617 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
618 unsigned int idx = 0;
619 int rc;
620 gfp_t flags = cc_gfp_flags(&req->base);
621
622 dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
623 nbytes);
624
625 if (cc_map_req(dev, state, ctx)) {
626 dev_err(dev, "map_ahash_source() failed\n");
627 return -EINVAL;
628 }
629
630 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
631 flags)) {
632 dev_err(dev, "map_ahash_request_final() failed\n");
633 cc_unmap_req(dev, state, ctx);
634 return -ENOMEM;
635 }
636 if (cc_map_result(dev, state, digestsize)) {
637 dev_err(dev, "map_ahash_digest() failed\n");
638 cc_unmap_hash_request(dev, state, src, true);
639 cc_unmap_req(dev, state, ctx);
640 return -ENOMEM;
641 }
642
643 /* Setup request structure */
644 cc_req.user_cb = cc_hash_complete;
645 cc_req.user_arg = req;
646
647 idx = cc_restore_hash(desc, ctx, state, idx);
648
649 if (is_hmac)
650 idx = cc_fin_hmac(desc, req, idx);
651
652 idx = cc_fin_result(desc, req, idx);
653
654 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
655 if (rc != -EINPROGRESS && rc != -EBUSY) {
656 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
657 cc_unmap_hash_request(dev, state, src, true);
658 cc_unmap_result(dev, state, digestsize, result);
659 cc_unmap_req(dev, state, ctx);
660 }
661 return rc;
662}
663
664static int cc_hash_final(struct ahash_request *req)
665{
666 struct ahash_req_ctx *state = ahash_request_ctx(req);
667 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
668 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
669 u32 digestsize = crypto_ahash_digestsize(tfm);
670 struct scatterlist *src = req->src;
671 unsigned int nbytes = req->nbytes;
672 u8 *result = req->result;
673 struct device *dev = drvdata_to_dev(ctx->drvdata);
674 bool is_hmac = ctx->is_hmac;
675 struct cc_crypto_req cc_req = {};
676 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
677 unsigned int idx = 0;
678 int rc;
679 gfp_t flags = cc_gfp_flags(&req->base);
680
681 dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
682 nbytes);
683
684 if (cc_map_req(dev, state, ctx)) {
685 dev_err(dev, "map_ahash_source() failed\n");
686 return -EINVAL;
687 }
688
689 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
690 flags)) {
691 dev_err(dev, "map_ahash_request_final() failed\n");
692 cc_unmap_req(dev, state, ctx);
693 return -ENOMEM;
694 }
695
696 if (cc_map_result(dev, state, digestsize)) {
697 dev_err(dev, "map_ahash_digest() failed\n");
698 cc_unmap_hash_request(dev, state, src, true);
699 cc_unmap_req(dev, state, ctx);
700 return -ENOMEM;
701 }
702
703 /* Setup request structure */
704 cc_req.user_cb = cc_hash_complete;
705 cc_req.user_arg = req;
706
707 idx = cc_restore_hash(desc, ctx, state, idx);
708
709 /* "DO-PAD" must be enabled only when writing current length to HW */
710 hw_desc_init(&desc[idx]);
711 set_cipher_do(&desc[idx], DO_PAD);
712 set_cipher_mode(&desc[idx], ctx->hw_mode);
713 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
714 ctx->drvdata->hash_len_sz, NS_BIT, 0);
715 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
716 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
717 idx++;
718
719 if (is_hmac)
720 idx = cc_fin_hmac(desc, req, idx);
721
722 idx = cc_fin_result(desc, req, idx);
723
724 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
725 if (rc != -EINPROGRESS && rc != -EBUSY) {
726 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
727 cc_unmap_hash_request(dev, state, src, true);
728 cc_unmap_result(dev, state, digestsize, result);
729 cc_unmap_req(dev, state, ctx);
730 }
731 return rc;
732}
733
734static int cc_hash_init(struct ahash_request *req)
735{
736 struct ahash_req_ctx *state = ahash_request_ctx(req);
737 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
738 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
739 struct device *dev = drvdata_to_dev(ctx->drvdata);
740
741 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
742
743 cc_init_req(dev, state, ctx);
744
745 return 0;
746}
747
748static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
749 unsigned int keylen)
750{
751 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
752 struct cc_crypto_req cc_req = {};
753 struct cc_hash_ctx *ctx = NULL;
754 int blocksize = 0;
755 int digestsize = 0;
756 int i, idx = 0, rc = 0;
757 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
758 cc_sram_addr_t larval_addr;
759 struct device *dev;
760
761 ctx = crypto_ahash_ctx(ahash);
762 dev = drvdata_to_dev(ctx->drvdata);
763 dev_dbg(dev, "start keylen: %d", keylen);
764
765 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
766 digestsize = crypto_ahash_digestsize(ahash);
767
768 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
769
770 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
771 * any NON-ZERO value utilizes HMAC flow
772 */
773 ctx->key_params.keylen = keylen;
774 ctx->key_params.key_dma_addr = 0;
775 ctx->is_hmac = true;
776
777 if (keylen) {
778 ctx->key_params.key_dma_addr =
779 dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
780 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
781 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
782 key, keylen);
783 return -ENOMEM;
784 }
785 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
786 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
787
788 if (keylen > blocksize) {
789 /* Load hash initial state */
790 hw_desc_init(&desc[idx]);
791 set_cipher_mode(&desc[idx], ctx->hw_mode);
792 set_din_sram(&desc[idx], larval_addr,
793 ctx->inter_digestsize);
794 set_flow_mode(&desc[idx], S_DIN_to_HASH);
795 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
796 idx++;
797
798 /* Load the hash current length*/
799 hw_desc_init(&desc[idx]);
800 set_cipher_mode(&desc[idx], ctx->hw_mode);
801 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
802 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
803 set_flow_mode(&desc[idx], S_DIN_to_HASH);
804 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
805 idx++;
806
807 hw_desc_init(&desc[idx]);
808 set_din_type(&desc[idx], DMA_DLLI,
809 ctx->key_params.key_dma_addr, keylen,
810 NS_BIT);
811 set_flow_mode(&desc[idx], DIN_HASH);
812 idx++;
813
814 /* Get hashed key */
815 hw_desc_init(&desc[idx]);
816 set_cipher_mode(&desc[idx], ctx->hw_mode);
817 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
818 digestsize, NS_BIT, 0);
819 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
820 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
821 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
822 cc_set_endianity(ctx->hash_mode, &desc[idx]);
823 idx++;
824
825 hw_desc_init(&desc[idx]);
826 set_din_const(&desc[idx], 0, (blocksize - digestsize));
827 set_flow_mode(&desc[idx], BYPASS);
828 set_dout_dlli(&desc[idx],
829 (ctx->opad_tmp_keys_dma_addr +
830 digestsize),
831 (blocksize - digestsize), NS_BIT, 0);
832 idx++;
833 } else {
834 hw_desc_init(&desc[idx]);
835 set_din_type(&desc[idx], DMA_DLLI,
836 ctx->key_params.key_dma_addr, keylen,
837 NS_BIT);
838 set_flow_mode(&desc[idx], BYPASS);
839 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
840 keylen, NS_BIT, 0);
841 idx++;
842
843 if ((blocksize - keylen)) {
844 hw_desc_init(&desc[idx]);
845 set_din_const(&desc[idx], 0,
846 (blocksize - keylen));
847 set_flow_mode(&desc[idx], BYPASS);
848 set_dout_dlli(&desc[idx],
849 (ctx->opad_tmp_keys_dma_addr +
850 keylen), (blocksize - keylen),
851 NS_BIT, 0);
852 idx++;
853 }
854 }
855 } else {
856 hw_desc_init(&desc[idx]);
857 set_din_const(&desc[idx], 0, blocksize);
858 set_flow_mode(&desc[idx], BYPASS);
859 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
860 blocksize, NS_BIT, 0);
861 idx++;
862 }
863
864 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
865 if (rc) {
866 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
867 goto out;
868 }
869
870 /* calc derived HMAC key */
871 for (idx = 0, i = 0; i < 2; i++) {
872 /* Load hash initial state */
873 hw_desc_init(&desc[idx]);
874 set_cipher_mode(&desc[idx], ctx->hw_mode);
875 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
876 set_flow_mode(&desc[idx], S_DIN_to_HASH);
877 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
878 idx++;
879
880 /* Load the hash current length*/
881 hw_desc_init(&desc[idx]);
882 set_cipher_mode(&desc[idx], ctx->hw_mode);
883 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
884 set_flow_mode(&desc[idx], S_DIN_to_HASH);
885 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
886 idx++;
887
888 /* Prepare ipad key */
889 hw_desc_init(&desc[idx]);
890 set_xor_val(&desc[idx], hmac_pad_const[i]);
891 set_cipher_mode(&desc[idx], ctx->hw_mode);
892 set_flow_mode(&desc[idx], S_DIN_to_HASH);
893 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
894 idx++;
895
896 /* Perform HASH update */
897 hw_desc_init(&desc[idx]);
898 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
899 blocksize, NS_BIT);
900 set_cipher_mode(&desc[idx], ctx->hw_mode);
901 set_xor_active(&desc[idx]);
902 set_flow_mode(&desc[idx], DIN_HASH);
903 idx++;
904
905 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
906 * of the first HASH "update" state)
907 */
908 hw_desc_init(&desc[idx]);
909 set_cipher_mode(&desc[idx], ctx->hw_mode);
910 if (i > 0) /* Not first iteration */
911 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
912 ctx->inter_digestsize, NS_BIT, 0);
913 else /* First iteration */
914 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
915 ctx->inter_digestsize, NS_BIT, 0);
916 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
917 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
918 idx++;
919 }
920
921 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
922
923out:
924 if (rc)
925 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
926
927 if (ctx->key_params.key_dma_addr) {
928 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
929 ctx->key_params.keylen, DMA_TO_DEVICE);
930 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
931 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
932 }
933 return rc;
934}
935
936static int cc_xcbc_setkey(struct crypto_ahash *ahash,
937 const u8 *key, unsigned int keylen)
938{
939 struct cc_crypto_req cc_req = {};
940 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
941 struct device *dev = drvdata_to_dev(ctx->drvdata);
942 int rc = 0;
943 unsigned int idx = 0;
944 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
945
946 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
947
948 switch (keylen) {
949 case AES_KEYSIZE_128:
950 case AES_KEYSIZE_192:
951 case AES_KEYSIZE_256:
952 break;
953 default:
954 return -EINVAL;
955 }
956
957 ctx->key_params.keylen = keylen;
958
959 ctx->key_params.key_dma_addr =
960 dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
961 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
962 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
963 key, keylen);
964 return -ENOMEM;
965 }
966 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
967 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
968
969 ctx->is_hmac = true;
970 /* 1. Load the AES key */
971 hw_desc_init(&desc[idx]);
972 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
973 keylen, NS_BIT);
974 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
975 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
976 set_key_size_aes(&desc[idx], keylen);
977 set_flow_mode(&desc[idx], S_DIN_to_AES);
978 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
979 idx++;
980
981 hw_desc_init(&desc[idx]);
982 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
983 set_flow_mode(&desc[idx], DIN_AES_DOUT);
984 set_dout_dlli(&desc[idx],
985 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
986 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
987 idx++;
988
989 hw_desc_init(&desc[idx]);
990 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
991 set_flow_mode(&desc[idx], DIN_AES_DOUT);
992 set_dout_dlli(&desc[idx],
993 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
994 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
995 idx++;
996
997 hw_desc_init(&desc[idx]);
998 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
999 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1000 set_dout_dlli(&desc[idx],
1001 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
1002 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1003 idx++;
1004
1005 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
1006
1007 if (rc)
1008 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1009
1010 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1011 ctx->key_params.keylen, DMA_TO_DEVICE);
1012 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1013 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1014
1015 return rc;
1016}
1017
1018static int cc_cmac_setkey(struct crypto_ahash *ahash,
1019 const u8 *key, unsigned int keylen)
1020{
1021 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1022 struct device *dev = drvdata_to_dev(ctx->drvdata);
1023
1024 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1025
1026 ctx->is_hmac = true;
1027
1028 switch (keylen) {
1029 case AES_KEYSIZE_128:
1030 case AES_KEYSIZE_192:
1031 case AES_KEYSIZE_256:
1032 break;
1033 default:
1034 return -EINVAL;
1035 }
1036
1037 ctx->key_params.keylen = keylen;
1038
1039 /* STAT_PHASE_1: Copy key to ctx */
1040
1041 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1042 keylen, DMA_TO_DEVICE);
1043
1044 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1045 if (keylen == 24) {
1046 memset(ctx->opad_tmp_keys_buff + 24, 0,
1047 CC_AES_KEY_SIZE_MAX - 24);
1048 }
1049
1050 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1051 keylen, DMA_TO_DEVICE);
1052
1053 ctx->key_params.keylen = keylen;
1054
1055 return 0;
1056}
1057
1058static void cc_free_ctx(struct cc_hash_ctx *ctx)
1059{
1060 struct device *dev = drvdata_to_dev(ctx->drvdata);
1061
1062 if (ctx->digest_buff_dma_addr) {
1063 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1064 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1065 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1066 &ctx->digest_buff_dma_addr);
1067 ctx->digest_buff_dma_addr = 0;
1068 }
1069 if (ctx->opad_tmp_keys_dma_addr) {
1070 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1071 sizeof(ctx->opad_tmp_keys_buff),
1072 DMA_BIDIRECTIONAL);
1073 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1074 &ctx->opad_tmp_keys_dma_addr);
1075 ctx->opad_tmp_keys_dma_addr = 0;
1076 }
1077
1078 ctx->key_params.keylen = 0;
1079}
1080
1081static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1082{
1083 struct device *dev = drvdata_to_dev(ctx->drvdata);
1084
1085 ctx->key_params.keylen = 0;
1086
1087 ctx->digest_buff_dma_addr =
1088 dma_map_single(dev, (void *)ctx->digest_buff,
1089 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1090 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1091 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1092 sizeof(ctx->digest_buff), ctx->digest_buff);
1093 goto fail;
1094 }
1095 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1096 sizeof(ctx->digest_buff), ctx->digest_buff,
1097 &ctx->digest_buff_dma_addr);
1098
1099 ctx->opad_tmp_keys_dma_addr =
1100 dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1101 sizeof(ctx->opad_tmp_keys_buff),
1102 DMA_BIDIRECTIONAL);
1103 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1104 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1105 sizeof(ctx->opad_tmp_keys_buff),
1106 ctx->opad_tmp_keys_buff);
1107 goto fail;
1108 }
1109 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1110 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1111 &ctx->opad_tmp_keys_dma_addr);
1112
1113 ctx->is_hmac = false;
1114 return 0;
1115
1116fail:
1117 cc_free_ctx(ctx);
1118 return -ENOMEM;
1119}
1120
1121static int cc_cra_init(struct crypto_tfm *tfm)
1122{
1123 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1124 struct hash_alg_common *hash_alg_common =
1125 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1126 struct ahash_alg *ahash_alg =
1127 container_of(hash_alg_common, struct ahash_alg, halg);
1128 struct cc_hash_alg *cc_alg =
1129 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1130
1131 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1132 sizeof(struct ahash_req_ctx));
1133
1134 ctx->hash_mode = cc_alg->hash_mode;
1135 ctx->hw_mode = cc_alg->hw_mode;
1136 ctx->inter_digestsize = cc_alg->inter_digestsize;
1137 ctx->drvdata = cc_alg->drvdata;
1138
1139 return cc_alloc_ctx(ctx);
1140}
1141
1142static void cc_cra_exit(struct crypto_tfm *tfm)
1143{
1144 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1145 struct device *dev = drvdata_to_dev(ctx->drvdata);
1146
1147 dev_dbg(dev, "cc_cra_exit");
1148 cc_free_ctx(ctx);
1149}
1150
1151static int cc_mac_update(struct ahash_request *req)
1152{
1153 struct ahash_req_ctx *state = ahash_request_ctx(req);
1154 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1155 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1156 struct device *dev = drvdata_to_dev(ctx->drvdata);
1157 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1158 struct cc_crypto_req cc_req = {};
1159 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1160 int rc;
1161 u32 idx = 0;
1162 gfp_t flags = cc_gfp_flags(&req->base);
1163
1164 if (req->nbytes == 0) {
1165 /* no real updates required */
1166 return 0;
1167 }
1168
1169 state->xcbc_count++;
1170
1171 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1172 req->nbytes, block_size, flags);
1173 if (rc) {
1174 if (rc == 1) {
1175 dev_dbg(dev, " data size not require HW update %x\n",
1176 req->nbytes);
1177 /* No hardware updates are required */
1178 return 0;
1179 }
1180 dev_err(dev, "map_ahash_request_update() failed\n");
1181 return -ENOMEM;
1182 }
1183
1184 if (cc_map_req(dev, state, ctx)) {
1185 dev_err(dev, "map_ahash_source() failed\n");
1186 return -EINVAL;
1187 }
1188
1189 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1190 cc_setup_xcbc(req, desc, &idx);
1191 else
1192 cc_setup_cmac(req, desc, &idx);
1193
1194 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1195
1196 /* store the hash digest result in context */
1197 hw_desc_init(&desc[idx]);
1198 set_cipher_mode(&desc[idx], ctx->hw_mode);
1199 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1200 ctx->inter_digestsize, NS_BIT, 1);
1201 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1202 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1203 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1204 idx++;
1205
1206 /* Setup request structure */
1207 cc_req.user_cb = (void *)cc_update_complete;
1208 cc_req.user_arg = (void *)req;
1209
1210 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1211 if (rc != -EINPROGRESS && rc != -EBUSY) {
1212 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1213 cc_unmap_hash_request(dev, state, req->src, true);
1214 cc_unmap_req(dev, state, ctx);
1215 }
1216 return rc;
1217}
1218
1219static int cc_mac_final(struct ahash_request *req)
1220{
1221 struct ahash_req_ctx *state = ahash_request_ctx(req);
1222 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1223 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1224 struct device *dev = drvdata_to_dev(ctx->drvdata);
1225 struct cc_crypto_req cc_req = {};
1226 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1227 int idx = 0;
1228 int rc = 0;
1229 u32 key_size, key_len;
1230 u32 digestsize = crypto_ahash_digestsize(tfm);
1231 gfp_t flags = cc_gfp_flags(&req->base);
1232 u32 rem_cnt = *cc_hash_buf_cnt(state);
1233
1234 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1235 key_size = CC_AES_128_BIT_KEY_SIZE;
1236 key_len = CC_AES_128_BIT_KEY_SIZE;
1237 } else {
1238 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1239 ctx->key_params.keylen;
1240 key_len = ctx->key_params.keylen;
1241 }
1242
1243 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1244
1245 if (cc_map_req(dev, state, ctx)) {
1246 dev_err(dev, "map_ahash_source() failed\n");
1247 return -EINVAL;
1248 }
1249
1250 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1251 req->nbytes, 0, flags)) {
1252 dev_err(dev, "map_ahash_request_final() failed\n");
1253 cc_unmap_req(dev, state, ctx);
1254 return -ENOMEM;
1255 }
1256
1257 if (cc_map_result(dev, state, digestsize)) {
1258 dev_err(dev, "map_ahash_digest() failed\n");
1259 cc_unmap_hash_request(dev, state, req->src, true);
1260 cc_unmap_req(dev, state, ctx);
1261 return -ENOMEM;
1262 }
1263
1264 /* Setup request structure */
1265 cc_req.user_cb = (void *)cc_hash_complete;
1266 cc_req.user_arg = (void *)req;
1267
1268 if (state->xcbc_count && rem_cnt == 0) {
1269 /* Load key for ECB decryption */
1270 hw_desc_init(&desc[idx]);
1271 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1272 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1273 set_din_type(&desc[idx], DMA_DLLI,
1274 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1275 key_size, NS_BIT);
1276 set_key_size_aes(&desc[idx], key_len);
1277 set_flow_mode(&desc[idx], S_DIN_to_AES);
1278 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1279 idx++;
1280
1281 /* Initiate decryption of block state to previous
1282 * block_state-XOR-M[n]
1283 */
1284 hw_desc_init(&desc[idx]);
1285 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1286 CC_AES_BLOCK_SIZE, NS_BIT);
1287 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1288 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1289 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1290 idx++;
1291
1292 /* Memory Barrier: wait for axi write to complete */
1293 hw_desc_init(&desc[idx]);
1294 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1295 set_dout_no_dma(&desc[idx], 0, 0, 1);
1296 idx++;
1297 }
1298
1299 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1300 cc_setup_xcbc(req, desc, &idx);
1301 else
1302 cc_setup_cmac(req, desc, &idx);
1303
1304 if (state->xcbc_count == 0) {
1305 hw_desc_init(&desc[idx]);
1306 set_cipher_mode(&desc[idx], ctx->hw_mode);
1307 set_key_size_aes(&desc[idx], key_len);
1308 set_cmac_size0_mode(&desc[idx]);
1309 set_flow_mode(&desc[idx], S_DIN_to_AES);
1310 idx++;
1311 } else if (rem_cnt > 0) {
1312 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1313 } else {
1314 hw_desc_init(&desc[idx]);
1315 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1316 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1317 idx++;
1318 }
1319
1320 /* Get final MAC result */
1321 hw_desc_init(&desc[idx]);
1322 /* TODO */
1323 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1324 digestsize, NS_BIT, 1);
1325 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1326 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1327 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1328 set_cipher_mode(&desc[idx], ctx->hw_mode);
1329 idx++;
1330
1331 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1332 if (rc != -EINPROGRESS && rc != -EBUSY) {
1333 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1334 cc_unmap_hash_request(dev, state, req->src, true);
1335 cc_unmap_result(dev, state, digestsize, req->result);
1336 cc_unmap_req(dev, state, ctx);
1337 }
1338 return rc;
1339}
1340
1341static int cc_mac_finup(struct ahash_request *req)
1342{
1343 struct ahash_req_ctx *state = ahash_request_ctx(req);
1344 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1345 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1346 struct device *dev = drvdata_to_dev(ctx->drvdata);
1347 struct cc_crypto_req cc_req = {};
1348 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1349 int idx = 0;
1350 int rc = 0;
1351 u32 key_len = 0;
1352 u32 digestsize = crypto_ahash_digestsize(tfm);
1353 gfp_t flags = cc_gfp_flags(&req->base);
1354
1355 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1356 if (state->xcbc_count > 0 && req->nbytes == 0) {
1357 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1358 return cc_mac_final(req);
1359 }
1360
1361 if (cc_map_req(dev, state, ctx)) {
1362 dev_err(dev, "map_ahash_source() failed\n");
1363 return -EINVAL;
1364 }
1365
1366 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1367 req->nbytes, 1, flags)) {
1368 dev_err(dev, "map_ahash_request_final() failed\n");
1369 cc_unmap_req(dev, state, ctx);
1370 return -ENOMEM;
1371 }
1372 if (cc_map_result(dev, state, digestsize)) {
1373 dev_err(dev, "map_ahash_digest() failed\n");
1374 cc_unmap_hash_request(dev, state, req->src, true);
1375 cc_unmap_req(dev, state, ctx);
1376 return -ENOMEM;
1377 }
1378
1379 /* Setup request structure */
1380 cc_req.user_cb = (void *)cc_hash_complete;
1381 cc_req.user_arg = (void *)req;
1382
1383 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1384 key_len = CC_AES_128_BIT_KEY_SIZE;
1385 cc_setup_xcbc(req, desc, &idx);
1386 } else {
1387 key_len = ctx->key_params.keylen;
1388 cc_setup_cmac(req, desc, &idx);
1389 }
1390
1391 if (req->nbytes == 0) {
1392 hw_desc_init(&desc[idx]);
1393 set_cipher_mode(&desc[idx], ctx->hw_mode);
1394 set_key_size_aes(&desc[idx], key_len);
1395 set_cmac_size0_mode(&desc[idx]);
1396 set_flow_mode(&desc[idx], S_DIN_to_AES);
1397 idx++;
1398 } else {
1399 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1400 }
1401
1402 /* Get final MAC result */
1403 hw_desc_init(&desc[idx]);
1404 /* TODO */
1405 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1406 digestsize, NS_BIT, 1);
1407 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1408 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1409 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1410 set_cipher_mode(&desc[idx], ctx->hw_mode);
1411 idx++;
1412
1413 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1414 if (rc != -EINPROGRESS && rc != -EBUSY) {
1415 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1416 cc_unmap_hash_request(dev, state, req->src, true);
1417 cc_unmap_result(dev, state, digestsize, req->result);
1418 cc_unmap_req(dev, state, ctx);
1419 }
1420 return rc;
1421}
1422
1423static int cc_mac_digest(struct ahash_request *req)
1424{
1425 struct ahash_req_ctx *state = ahash_request_ctx(req);
1426 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1427 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1428 struct device *dev = drvdata_to_dev(ctx->drvdata);
1429 u32 digestsize = crypto_ahash_digestsize(tfm);
1430 struct cc_crypto_req cc_req = {};
1431 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1432 u32 key_len;
1433 unsigned int idx = 0;
1434 int rc;
1435 gfp_t flags = cc_gfp_flags(&req->base);
1436
1437 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1438
1439 cc_init_req(dev, state, ctx);
1440
1441 if (cc_map_req(dev, state, ctx)) {
1442 dev_err(dev, "map_ahash_source() failed\n");
1443 return -ENOMEM;
1444 }
1445 if (cc_map_result(dev, state, digestsize)) {
1446 dev_err(dev, "map_ahash_digest() failed\n");
1447 cc_unmap_req(dev, state, ctx);
1448 return -ENOMEM;
1449 }
1450
1451 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1452 req->nbytes, 1, flags)) {
1453 dev_err(dev, "map_ahash_request_final() failed\n");
1454 cc_unmap_req(dev, state, ctx);
1455 return -ENOMEM;
1456 }
1457
1458 /* Setup request structure */
1459 cc_req.user_cb = (void *)cc_digest_complete;
1460 cc_req.user_arg = (void *)req;
1461
1462 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1463 key_len = CC_AES_128_BIT_KEY_SIZE;
1464 cc_setup_xcbc(req, desc, &idx);
1465 } else {
1466 key_len = ctx->key_params.keylen;
1467 cc_setup_cmac(req, desc, &idx);
1468 }
1469
1470 if (req->nbytes == 0) {
1471 hw_desc_init(&desc[idx]);
1472 set_cipher_mode(&desc[idx], ctx->hw_mode);
1473 set_key_size_aes(&desc[idx], key_len);
1474 set_cmac_size0_mode(&desc[idx]);
1475 set_flow_mode(&desc[idx], S_DIN_to_AES);
1476 idx++;
1477 } else {
1478 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1479 }
1480
1481 /* Get final MAC result */
1482 hw_desc_init(&desc[idx]);
1483 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1484 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1485 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1486 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1487 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1488 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1489 set_cipher_mode(&desc[idx], ctx->hw_mode);
1490 idx++;
1491
1492 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1493 if (rc != -EINPROGRESS && rc != -EBUSY) {
1494 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1495 cc_unmap_hash_request(dev, state, req->src, true);
1496 cc_unmap_result(dev, state, digestsize, req->result);
1497 cc_unmap_req(dev, state, ctx);
1498 }
1499 return rc;
1500}
1501
1502static int cc_hash_export(struct ahash_request *req, void *out)
1503{
1504 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1505 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1506 struct ahash_req_ctx *state = ahash_request_ctx(req);
1507 u8 *curr_buff = cc_hash_buf(state);
1508 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1509 const u32 tmp = CC_EXPORT_MAGIC;
1510
1511 memcpy(out, &tmp, sizeof(u32));
1512 out += sizeof(u32);
1513
1514 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1515 out += ctx->inter_digestsize;
1516
1517 memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
1518 out += ctx->drvdata->hash_len_sz;
1519
1520 memcpy(out, &curr_buff_cnt, sizeof(u32));
1521 out += sizeof(u32);
1522
1523 memcpy(out, curr_buff, curr_buff_cnt);
1524
1525 return 0;
1526}
1527
1528static int cc_hash_import(struct ahash_request *req, const void *in)
1529{
1530 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1531 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1532 struct device *dev = drvdata_to_dev(ctx->drvdata);
1533 struct ahash_req_ctx *state = ahash_request_ctx(req);
1534 u32 tmp;
1535
1536 memcpy(&tmp, in, sizeof(u32));
1537 if (tmp != CC_EXPORT_MAGIC)
1538 return -EINVAL;
1539 in += sizeof(u32);
1540
1541 cc_init_req(dev, state, ctx);
1542
1543 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1544 in += ctx->inter_digestsize;
1545
1546 memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
1547 in += ctx->drvdata->hash_len_sz;
1548
1549 /* Sanity check the data as much as possible */
1550 memcpy(&tmp, in, sizeof(u32));
1551 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1552 return -EINVAL;
1553 in += sizeof(u32);
1554
1555 state->buf_cnt[0] = tmp;
1556 memcpy(state->buffers[0], in, tmp);
1557
1558 return 0;
1559}
1560
1561struct cc_hash_template {
1562 char name[CRYPTO_MAX_ALG_NAME];
1563 char driver_name[CRYPTO_MAX_ALG_NAME];
1564 char mac_name[CRYPTO_MAX_ALG_NAME];
1565 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1566 unsigned int blocksize;
1567 bool synchronize;
1568 struct ahash_alg template_ahash;
1569 int hash_mode;
1570 int hw_mode;
1571 int inter_digestsize;
1572 struct cc_drvdata *drvdata;
1573 u32 min_hw_rev;
1574};
1575
1576#define CC_STATE_SIZE(_x) \
1577 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1578
1579/* hash descriptors */
1580static struct cc_hash_template driver_hash[] = {
1581 //Asynchronize hash template
1582 {
1583 .name = "sha1",
1584 .driver_name = "sha1-ccree",
1585 .mac_name = "hmac(sha1)",
1586 .mac_driver_name = "hmac-sha1-ccree",
1587 .blocksize = SHA1_BLOCK_SIZE,
1588 .synchronize = false,
1589 .template_ahash = {
1590 .init = cc_hash_init,
1591 .update = cc_hash_update,
1592 .final = cc_hash_final,
1593 .finup = cc_hash_finup,
1594 .digest = cc_hash_digest,
1595 .export = cc_hash_export,
1596 .import = cc_hash_import,
1597 .setkey = cc_hash_setkey,
1598 .halg = {
1599 .digestsize = SHA1_DIGEST_SIZE,
1600 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1601 },
1602 },
1603 .hash_mode = DRV_HASH_SHA1,
1604 .hw_mode = DRV_HASH_HW_SHA1,
1605 .inter_digestsize = SHA1_DIGEST_SIZE,
1606 .min_hw_rev = CC_HW_REV_630,
1607 },
1608 {
1609 .name = "sha256",
1610 .driver_name = "sha256-ccree",
1611 .mac_name = "hmac(sha256)",
1612 .mac_driver_name = "hmac-sha256-ccree",
1613 .blocksize = SHA256_BLOCK_SIZE,
1614 .template_ahash = {
1615 .init = cc_hash_init,
1616 .update = cc_hash_update,
1617 .final = cc_hash_final,
1618 .finup = cc_hash_finup,
1619 .digest = cc_hash_digest,
1620 .export = cc_hash_export,
1621 .import = cc_hash_import,
1622 .setkey = cc_hash_setkey,
1623 .halg = {
1624 .digestsize = SHA256_DIGEST_SIZE,
1625 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1626 },
1627 },
1628 .hash_mode = DRV_HASH_SHA256,
1629 .hw_mode = DRV_HASH_HW_SHA256,
1630 .inter_digestsize = SHA256_DIGEST_SIZE,
1631 .min_hw_rev = CC_HW_REV_630,
1632 },
1633 {
1634 .name = "sha224",
1635 .driver_name = "sha224-ccree",
1636 .mac_name = "hmac(sha224)",
1637 .mac_driver_name = "hmac-sha224-ccree",
1638 .blocksize = SHA224_BLOCK_SIZE,
1639 .template_ahash = {
1640 .init = cc_hash_init,
1641 .update = cc_hash_update,
1642 .final = cc_hash_final,
1643 .finup = cc_hash_finup,
1644 .digest = cc_hash_digest,
1645 .export = cc_hash_export,
1646 .import = cc_hash_import,
1647 .setkey = cc_hash_setkey,
1648 .halg = {
1649 .digestsize = SHA224_DIGEST_SIZE,
1650 .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1651 },
1652 },
1653 .hash_mode = DRV_HASH_SHA224,
1654 .hw_mode = DRV_HASH_HW_SHA256,
1655 .inter_digestsize = SHA256_DIGEST_SIZE,
1656 .min_hw_rev = CC_HW_REV_630,
1657 },
1658 {
1659 .name = "sha384",
1660 .driver_name = "sha384-ccree",
1661 .mac_name = "hmac(sha384)",
1662 .mac_driver_name = "hmac-sha384-ccree",
1663 .blocksize = SHA384_BLOCK_SIZE,
1664 .template_ahash = {
1665 .init = cc_hash_init,
1666 .update = cc_hash_update,
1667 .final = cc_hash_final,
1668 .finup = cc_hash_finup,
1669 .digest = cc_hash_digest,
1670 .export = cc_hash_export,
1671 .import = cc_hash_import,
1672 .setkey = cc_hash_setkey,
1673 .halg = {
1674 .digestsize = SHA384_DIGEST_SIZE,
1675 .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1676 },
1677 },
1678 .hash_mode = DRV_HASH_SHA384,
1679 .hw_mode = DRV_HASH_HW_SHA512,
1680 .inter_digestsize = SHA512_DIGEST_SIZE,
1681 .min_hw_rev = CC_HW_REV_712,
1682 },
1683 {
1684 .name = "sha512",
1685 .driver_name = "sha512-ccree",
1686 .mac_name = "hmac(sha512)",
1687 .mac_driver_name = "hmac-sha512-ccree",
1688 .blocksize = SHA512_BLOCK_SIZE,
1689 .template_ahash = {
1690 .init = cc_hash_init,
1691 .update = cc_hash_update,
1692 .final = cc_hash_final,
1693 .finup = cc_hash_finup,
1694 .digest = cc_hash_digest,
1695 .export = cc_hash_export,
1696 .import = cc_hash_import,
1697 .setkey = cc_hash_setkey,
1698 .halg = {
1699 .digestsize = SHA512_DIGEST_SIZE,
1700 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1701 },
1702 },
1703 .hash_mode = DRV_HASH_SHA512,
1704 .hw_mode = DRV_HASH_HW_SHA512,
1705 .inter_digestsize = SHA512_DIGEST_SIZE,
1706 .min_hw_rev = CC_HW_REV_712,
1707 },
1708 {
1709 .name = "md5",
1710 .driver_name = "md5-ccree",
1711 .mac_name = "hmac(md5)",
1712 .mac_driver_name = "hmac-md5-ccree",
1713 .blocksize = MD5_HMAC_BLOCK_SIZE,
1714 .template_ahash = {
1715 .init = cc_hash_init,
1716 .update = cc_hash_update,
1717 .final = cc_hash_final,
1718 .finup = cc_hash_finup,
1719 .digest = cc_hash_digest,
1720 .export = cc_hash_export,
1721 .import = cc_hash_import,
1722 .setkey = cc_hash_setkey,
1723 .halg = {
1724 .digestsize = MD5_DIGEST_SIZE,
1725 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1726 },
1727 },
1728 .hash_mode = DRV_HASH_MD5,
1729 .hw_mode = DRV_HASH_HW_MD5,
1730 .inter_digestsize = MD5_DIGEST_SIZE,
1731 .min_hw_rev = CC_HW_REV_630,
1732 },
1733 {
1734 .mac_name = "xcbc(aes)",
1735 .mac_driver_name = "xcbc-aes-ccree",
1736 .blocksize = AES_BLOCK_SIZE,
1737 .template_ahash = {
1738 .init = cc_hash_init,
1739 .update = cc_mac_update,
1740 .final = cc_mac_final,
1741 .finup = cc_mac_finup,
1742 .digest = cc_mac_digest,
1743 .setkey = cc_xcbc_setkey,
1744 .export = cc_hash_export,
1745 .import = cc_hash_import,
1746 .halg = {
1747 .digestsize = AES_BLOCK_SIZE,
1748 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1749 },
1750 },
1751 .hash_mode = DRV_HASH_NULL,
1752 .hw_mode = DRV_CIPHER_XCBC_MAC,
1753 .inter_digestsize = AES_BLOCK_SIZE,
1754 .min_hw_rev = CC_HW_REV_630,
1755 },
1756 {
1757 .mac_name = "cmac(aes)",
1758 .mac_driver_name = "cmac-aes-ccree",
1759 .blocksize = AES_BLOCK_SIZE,
1760 .template_ahash = {
1761 .init = cc_hash_init,
1762 .update = cc_mac_update,
1763 .final = cc_mac_final,
1764 .finup = cc_mac_finup,
1765 .digest = cc_mac_digest,
1766 .setkey = cc_cmac_setkey,
1767 .export = cc_hash_export,
1768 .import = cc_hash_import,
1769 .halg = {
1770 .digestsize = AES_BLOCK_SIZE,
1771 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1772 },
1773 },
1774 .hash_mode = DRV_HASH_NULL,
1775 .hw_mode = DRV_CIPHER_CMAC,
1776 .inter_digestsize = AES_BLOCK_SIZE,
1777 .min_hw_rev = CC_HW_REV_630,
1778 },
1779};
1780
1781static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1782 struct device *dev, bool keyed)
1783{
1784 struct cc_hash_alg *t_crypto_alg;
1785 struct crypto_alg *alg;
1786 struct ahash_alg *halg;
1787
1788 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1789 if (!t_crypto_alg)
1790 return ERR_PTR(-ENOMEM);
1791
1792 t_crypto_alg->ahash_alg = template->template_ahash;
1793 halg = &t_crypto_alg->ahash_alg;
1794 alg = &halg->halg.base;
1795
1796 if (keyed) {
1797 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1798 template->mac_name);
1799 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1800 template->mac_driver_name);
1801 } else {
1802 halg->setkey = NULL;
1803 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1804 template->name);
1805 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1806 template->driver_name);
1807 }
1808 alg->cra_module = THIS_MODULE;
1809 alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1810 alg->cra_priority = CC_CRA_PRIO;
1811 alg->cra_blocksize = template->blocksize;
1812 alg->cra_alignmask = 0;
1813 alg->cra_exit = cc_cra_exit;
1814
1815 alg->cra_init = cc_cra_init;
1816 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
1817 CRYPTO_ALG_KERN_DRIVER_ONLY;
1818 alg->cra_type = &crypto_ahash_type;
1819
1820 t_crypto_alg->hash_mode = template->hash_mode;
1821 t_crypto_alg->hw_mode = template->hw_mode;
1822 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1823
1824 return t_crypto_alg;
1825}
1826
1827int cc_init_hash_sram(struct cc_drvdata *drvdata)
1828{
1829 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1830 cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1831 unsigned int larval_seq_len = 0;
1832 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1833 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1834 int rc = 0;
1835
1836 /* Copy-to-sram digest-len */
1837 cc_set_sram_desc(digest_len_init, sram_buff_ofs,
1838 ARRAY_SIZE(digest_len_init), larval_seq,
1839 &larval_seq_len);
1840 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1841 if (rc)
1842 goto init_digest_const_err;
1843
1844 sram_buff_ofs += sizeof(digest_len_init);
1845 larval_seq_len = 0;
1846
1847 if (large_sha_supported) {
1848 /* Copy-to-sram digest-len for sha384/512 */
1849 cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
1850 ARRAY_SIZE(digest_len_sha512_init),
1851 larval_seq, &larval_seq_len);
1852 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1853 if (rc)
1854 goto init_digest_const_err;
1855
1856 sram_buff_ofs += sizeof(digest_len_sha512_init);
1857 larval_seq_len = 0;
1858 }
1859
1860 /* The initial digests offset */
1861 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1862
1863 /* Copy-to-sram initial SHA* digests */
1864 cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
1865 larval_seq, &larval_seq_len);
1866 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1867 if (rc)
1868 goto init_digest_const_err;
1869 sram_buff_ofs += sizeof(md5_init);
1870 larval_seq_len = 0;
1871
1872 cc_set_sram_desc(sha1_init, sram_buff_ofs,
1873 ARRAY_SIZE(sha1_init), larval_seq,
1874 &larval_seq_len);
1875 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1876 if (rc)
1877 goto init_digest_const_err;
1878 sram_buff_ofs += sizeof(sha1_init);
1879 larval_seq_len = 0;
1880
1881 cc_set_sram_desc(sha224_init, sram_buff_ofs,
1882 ARRAY_SIZE(sha224_init), larval_seq,
1883 &larval_seq_len);
1884 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1885 if (rc)
1886 goto init_digest_const_err;
1887 sram_buff_ofs += sizeof(sha224_init);
1888 larval_seq_len = 0;
1889
1890 cc_set_sram_desc(sha256_init, sram_buff_ofs,
1891 ARRAY_SIZE(sha256_init), larval_seq,
1892 &larval_seq_len);
1893 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1894 if (rc)
1895 goto init_digest_const_err;
1896 sram_buff_ofs += sizeof(sha256_init);
1897 larval_seq_len = 0;
1898
1899 if (large_sha_supported) {
1900 cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
1901 (ARRAY_SIZE(sha384_init) * 2), larval_seq,
1902 &larval_seq_len);
1903 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1904 if (rc)
1905 goto init_digest_const_err;
1906 sram_buff_ofs += sizeof(sha384_init);
1907 larval_seq_len = 0;
1908
1909 cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
1910 (ARRAY_SIZE(sha512_init) * 2), larval_seq,
1911 &larval_seq_len);
1912 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1913 if (rc)
1914 goto init_digest_const_err;
1915 }
1916
1917init_digest_const_err:
1918 return rc;
1919}
1920
1921static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1922{
1923 int i;
1924 u32 tmp;
1925
1926 for (i = 0; i < size; i += 2) {
1927 tmp = buf[i];
1928 buf[i] = buf[i + 1];
1929 buf[i + 1] = tmp;
1930 }
1931}
1932
1933/*
1934 * Due to the way the HW works we need to swap every
1935 * double word in the SHA384 and SHA512 larval hashes
1936 */
1937void __init cc_hash_global_init(void)
1938{
1939 cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
1940 cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
1941}
1942
1943int cc_hash_alloc(struct cc_drvdata *drvdata)
1944{
1945 struct cc_hash_handle *hash_handle;
1946 cc_sram_addr_t sram_buff;
1947 u32 sram_size_to_alloc;
1948 struct device *dev = drvdata_to_dev(drvdata);
1949 int rc = 0;
1950 int alg;
1951
1952 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
1953 if (!hash_handle)
1954 return -ENOMEM;
1955
1956 INIT_LIST_HEAD(&hash_handle->hash_list);
1957 drvdata->hash_handle = hash_handle;
1958
1959 sram_size_to_alloc = sizeof(digest_len_init) +
1960 sizeof(md5_init) +
1961 sizeof(sha1_init) +
1962 sizeof(sha224_init) +
1963 sizeof(sha256_init);
1964
1965 if (drvdata->hw_rev >= CC_HW_REV_712)
1966 sram_size_to_alloc += sizeof(digest_len_sha512_init) +
1967 sizeof(sha384_init) + sizeof(sha512_init);
1968
1969 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1970 if (sram_buff == NULL_SRAM_ADDR) {
1971 dev_err(dev, "SRAM pool exhausted\n");
1972 rc = -ENOMEM;
1973 goto fail;
1974 }
1975
1976 /* The initial digest-len offset */
1977 hash_handle->digest_len_sram_addr = sram_buff;
1978
1979 /*must be set before the alg registration as it is being used there*/
1980 rc = cc_init_hash_sram(drvdata);
1981 if (rc) {
1982 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1983 goto fail;
1984 }
1985
1986 /* ahash registration */
1987 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
1988 struct cc_hash_alg *t_alg;
1989 int hw_mode = driver_hash[alg].hw_mode;
1990
1991 /* We either support both HASH and MAC or none */
1992 if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
1993 continue;
1994
1995 /* register hmac version */
1996 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
1997 if (IS_ERR(t_alg)) {
1998 rc = PTR_ERR(t_alg);
1999 dev_err(dev, "%s alg allocation failed\n",
2000 driver_hash[alg].driver_name);
2001 goto fail;
2002 }
2003 t_alg->drvdata = drvdata;
2004
2005 rc = crypto_register_ahash(&t_alg->ahash_alg);
2006 if (rc) {
2007 dev_err(dev, "%s alg registration failed\n",
2008 driver_hash[alg].driver_name);
2009 kfree(t_alg);
2010 goto fail;
2011 } else {
2012 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2013 }
2014
2015 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2016 hw_mode == DRV_CIPHER_CMAC)
2017 continue;
2018
2019 /* register hash version */
2020 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2021 if (IS_ERR(t_alg)) {
2022 rc = PTR_ERR(t_alg);
2023 dev_err(dev, "%s alg allocation failed\n",
2024 driver_hash[alg].driver_name);
2025 goto fail;
2026 }
2027 t_alg->drvdata = drvdata;
2028
2029 rc = crypto_register_ahash(&t_alg->ahash_alg);
2030 if (rc) {
2031 dev_err(dev, "%s alg registration failed\n",
2032 driver_hash[alg].driver_name);
2033 kfree(t_alg);
2034 goto fail;
2035 } else {
2036 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2037 }
2038 }
2039
2040 return 0;
2041
2042fail:
2043 kfree(drvdata->hash_handle);
2044 drvdata->hash_handle = NULL;
2045 return rc;
2046}
2047
2048int cc_hash_free(struct cc_drvdata *drvdata)
2049{
2050 struct cc_hash_alg *t_hash_alg, *hash_n;
2051 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2052
2053 if (hash_handle) {
2054 list_for_each_entry_safe(t_hash_alg, hash_n,
2055 &hash_handle->hash_list, entry) {
2056 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2057 list_del(&t_hash_alg->entry);
2058 kfree(t_hash_alg);
2059 }
2060
2061 kfree(hash_handle);
2062 drvdata->hash_handle = NULL;
2063 }
2064 return 0;
2065}
2066
2067static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2068 unsigned int *seq_size)
2069{
2070 unsigned int idx = *seq_size;
2071 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2072 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2073 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2074
2075 /* Setup XCBC MAC K1 */
2076 hw_desc_init(&desc[idx]);
2077 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2078 XCBC_MAC_K1_OFFSET),
2079 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2080 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2081 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2082 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2083 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2084 set_flow_mode(&desc[idx], S_DIN_to_AES);
2085 idx++;
2086
2087 /* Setup XCBC MAC K2 */
2088 hw_desc_init(&desc[idx]);
2089 set_din_type(&desc[idx], DMA_DLLI,
2090 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2091 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2092 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2093 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2094 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2095 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2096 set_flow_mode(&desc[idx], S_DIN_to_AES);
2097 idx++;
2098
2099 /* Setup XCBC MAC K3 */
2100 hw_desc_init(&desc[idx]);
2101 set_din_type(&desc[idx], DMA_DLLI,
2102 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2103 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2104 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2105 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2106 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2107 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2108 set_flow_mode(&desc[idx], S_DIN_to_AES);
2109 idx++;
2110
2111 /* Loading MAC state */
2112 hw_desc_init(&desc[idx]);
2113 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2114 CC_AES_BLOCK_SIZE, NS_BIT);
2115 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2116 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2117 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2118 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2119 set_flow_mode(&desc[idx], S_DIN_to_AES);
2120 idx++;
2121 *seq_size = idx;
2122}
2123
2124static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2125 unsigned int *seq_size)
2126{
2127 unsigned int idx = *seq_size;
2128 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2129 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2130 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2131
2132 /* Setup CMAC Key */
2133 hw_desc_init(&desc[idx]);
2134 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2135 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2136 ctx->key_params.keylen), NS_BIT);
2137 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2138 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2139 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2140 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2141 set_flow_mode(&desc[idx], S_DIN_to_AES);
2142 idx++;
2143
2144 /* Load MAC state */
2145 hw_desc_init(&desc[idx]);
2146 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2147 CC_AES_BLOCK_SIZE, NS_BIT);
2148 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2149 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2150 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2151 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2152 set_flow_mode(&desc[idx], S_DIN_to_AES);
2153 idx++;
2154 *seq_size = idx;
2155}
2156
2157static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2158 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2159 struct cc_hw_desc desc[], bool is_not_last_data,
2160 unsigned int *seq_size)
2161{
2162 unsigned int idx = *seq_size;
2163 struct device *dev = drvdata_to_dev(ctx->drvdata);
2164
2165 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2166 hw_desc_init(&desc[idx]);
2167 set_din_type(&desc[idx], DMA_DLLI,
2168 sg_dma_address(areq_ctx->curr_sg),
2169 areq_ctx->curr_sg->length, NS_BIT);
2170 set_flow_mode(&desc[idx], flow_mode);
2171 idx++;
2172 } else {
2173 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2174 dev_dbg(dev, " NULL mode\n");
2175 /* nothing to build */
2176 return;
2177 }
2178 /* bypass */
2179 hw_desc_init(&desc[idx]);
2180 set_din_type(&desc[idx], DMA_DLLI,
2181 areq_ctx->mlli_params.mlli_dma_addr,
2182 areq_ctx->mlli_params.mlli_len, NS_BIT);
2183 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2184 areq_ctx->mlli_params.mlli_len);
2185 set_flow_mode(&desc[idx], BYPASS);
2186 idx++;
2187 /* process */
2188 hw_desc_init(&desc[idx]);
2189 set_din_type(&desc[idx], DMA_MLLI,
2190 ctx->drvdata->mlli_sram_addr,
2191 areq_ctx->mlli_nents, NS_BIT);
2192 set_flow_mode(&desc[idx], flow_mode);
2193 idx++;
2194 }
2195 if (is_not_last_data)
2196 set_din_not_last_indication(&desc[(idx - 1)]);
2197 /* return updated desc sequence size */
2198 *seq_size = idx;
2199}
2200
2201static const void *cc_larval_digest(struct device *dev, u32 mode)
2202{
2203 switch (mode) {
2204 case DRV_HASH_MD5:
2205 return md5_init;
2206 case DRV_HASH_SHA1:
2207 return sha1_init;
2208 case DRV_HASH_SHA224:
2209 return sha224_init;
2210 case DRV_HASH_SHA256:
2211 return sha256_init;
2212 case DRV_HASH_SHA384:
2213 return sha384_init;
2214 case DRV_HASH_SHA512:
2215 return sha512_init;
2216 default:
2217 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2218 return md5_init;
2219 }
2220}
2221
2222/*!
2223 * Gets the address of the initial digest in SRAM
2224 * according to the given hash mode
2225 *
2226 * \param drvdata
2227 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2228 *
2229 * \return u32 The address of the initial digest in SRAM
2230 */
2231cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2232{
2233 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2234 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2235 struct device *dev = drvdata_to_dev(_drvdata);
2236
2237 switch (mode) {
2238 case DRV_HASH_NULL:
2239 break; /*Ignore*/
2240 case DRV_HASH_MD5:
2241 return (hash_handle->larval_digest_sram_addr);
2242 case DRV_HASH_SHA1:
2243 return (hash_handle->larval_digest_sram_addr +
2244 sizeof(md5_init));
2245 case DRV_HASH_SHA224:
2246 return (hash_handle->larval_digest_sram_addr +
2247 sizeof(md5_init) +
2248 sizeof(sha1_init));
2249 case DRV_HASH_SHA256:
2250 return (hash_handle->larval_digest_sram_addr +
2251 sizeof(md5_init) +
2252 sizeof(sha1_init) +
2253 sizeof(sha224_init));
2254 case DRV_HASH_SHA384:
2255 return (hash_handle->larval_digest_sram_addr +
2256 sizeof(md5_init) +
2257 sizeof(sha1_init) +
2258 sizeof(sha224_init) +
2259 sizeof(sha256_init));
2260 case DRV_HASH_SHA512:
2261 return (hash_handle->larval_digest_sram_addr +
2262 sizeof(md5_init) +
2263 sizeof(sha1_init) +
2264 sizeof(sha224_init) +
2265 sizeof(sha256_init) +
2266 sizeof(sha384_init));
2267 default:
2268 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2269 }
2270
2271 /*This is valid wrong value to avoid kernel crash*/
2272 return hash_handle->larval_digest_sram_addr;
2273}
2274
2275cc_sram_addr_t
2276cc_digest_len_addr(void *drvdata, u32 mode)
2277{
2278 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2279 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2280 cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2281
2282 switch (mode) {
2283 case DRV_HASH_SHA1:
2284 case DRV_HASH_SHA224:
2285 case DRV_HASH_SHA256:
2286 case DRV_HASH_MD5:
2287 return digest_len_addr;
2288#if (CC_DEV_SHA_MAX > 256)
2289 case DRV_HASH_SHA384:
2290 case DRV_HASH_SHA512:
2291 return digest_len_addr + sizeof(digest_len_init);
2292#endif
2293 default:
2294 return digest_len_addr; /*to avoid kernel crash*/
2295 }
2296}
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
new file mode 100644
index 000000000000..2e5bf8b0bbb6
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -0,0 +1,109 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4/* \file cc_hash.h
5 * ARM CryptoCell Hash Crypto API
6 */
7
8#ifndef __CC_HASH_H__
9#define __CC_HASH_H__
10
11#include "cc_buffer_mgr.h"
12
13#define HMAC_IPAD_CONST 0x36363636
14#define HMAC_OPAD_CONST 0x5C5C5C5C
15#define HASH_LEN_SIZE_712 16
16#define HASH_LEN_SIZE_630 8
17#define HASH_MAX_LEN_SIZE HASH_LEN_SIZE_712
18#define CC_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
19#define CC_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE
20
21#define XCBC_MAC_K1_OFFSET 0
22#define XCBC_MAC_K2_OFFSET 16
23#define XCBC_MAC_K3_OFFSET 32
24
25#define CC_EXPORT_MAGIC 0xC2EE1070U
26
27/* this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used
28 * for xcbc/cmac statesize
29 */
30struct aeshash_state {
31 u8 state[AES_BLOCK_SIZE];
32 unsigned int count;
33 u8 buffer[AES_BLOCK_SIZE];
34};
35
36/* ahash state */
37struct ahash_req_ctx {
38 u8 buffers[2][CC_MAX_HASH_BLCK_SIZE] ____cacheline_aligned;
39 u8 digest_result_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
40 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
41 u8 opad_digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
42 u8 digest_bytes_len[HASH_MAX_LEN_SIZE] ____cacheline_aligned;
43 struct async_gen_req_ctx gen_ctx ____cacheline_aligned;
44 enum cc_req_dma_buf_type data_dma_buf_type;
45 dma_addr_t opad_digest_dma_addr;
46 dma_addr_t digest_buff_dma_addr;
47 dma_addr_t digest_bytes_len_dma_addr;
48 dma_addr_t digest_result_dma_addr;
49 u32 buf_cnt[2];
50 u32 buff_index;
51 u32 xcbc_count; /* count xcbc update operatations */
52 struct scatterlist buff_sg[2];
53 struct scatterlist *curr_sg;
54 u32 in_nents;
55 u32 mlli_nents;
56 struct mlli_params mlli_params;
57};
58
59static inline u32 *cc_hash_buf_cnt(struct ahash_req_ctx *state)
60{
61 return &state->buf_cnt[state->buff_index];
62}
63
64static inline u8 *cc_hash_buf(struct ahash_req_ctx *state)
65{
66 return state->buffers[state->buff_index];
67}
68
69static inline u32 *cc_next_buf_cnt(struct ahash_req_ctx *state)
70{
71 return &state->buf_cnt[state->buff_index ^ 1];
72}
73
74static inline u8 *cc_next_buf(struct ahash_req_ctx *state)
75{
76 return state->buffers[state->buff_index ^ 1];
77}
78
79int cc_hash_alloc(struct cc_drvdata *drvdata);
80int cc_init_hash_sram(struct cc_drvdata *drvdata);
81int cc_hash_free(struct cc_drvdata *drvdata);
82
83/*!
84 * Gets the initial digest length
85 *
86 * \param drvdata
87 * \param mode The Hash mode. Supported modes:
88 * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
89 *
90 * \return u32 returns the address of the initial digest length in SRAM
91 */
92cc_sram_addr_t
93cc_digest_len_addr(void *drvdata, u32 mode);
94
95/*!
96 * Gets the address of the initial digest in SRAM
97 * according to the given hash mode
98 *
99 * \param drvdata
100 * \param mode The Hash mode. Supported modes:
101 * MD5/SHA1/SHA224/SHA256/SHA384/SHA512
102 *
103 * \return u32 The address of the initial digest in SRAM
104 */
105cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
106
107void cc_hash_global_init(void);
108
109#endif /*__CC_HASH_H__*/
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
new file mode 100644
index 000000000000..f51001898ca1
--- /dev/null
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -0,0 +1,145 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#ifndef __CC_HOST_H__
5#define __CC_HOST_H__
6
7// --------------------------------------
8// BLOCK: HOST_P
9// --------------------------------------
10#define CC_HOST_IRR_REG_OFFSET 0xA00UL
11#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
12#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
13#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
14#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
15#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
16#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
17#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
18#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
19#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
20#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
21#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
22#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
23#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
24#define CC_HOST_IMR_REG_OFFSET 0xA04UL
25#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
26#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
27#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
28#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
29#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
30#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
31#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
32#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
33#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
34#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
35#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
36#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
37#define CC_HOST_ICR_REG_OFFSET 0xA08UL
38#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
39#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
40#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL
41#define CC_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL
42#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL
43#define CC_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL
44#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL
45#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
46#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
47#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
48#define CC_HOST_SIGNATURE_REG_OFFSET 0xA24UL
49#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
50#define CC_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL
51#define CC_HOST_BOOT_REG_OFFSET 0xA28UL
52#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL
53#define CC_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL
54#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL
55#define CC_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL
56#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL
57#define CC_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL
58#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL
59#define CC_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL
60#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL
61#define CC_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL
62#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL
63#define CC_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL
64#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL
65#define CC_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
66#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL
67#define CC_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL
68#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL
69#define CC_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL
70#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL
71#define CC_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL
72#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL
73#define CC_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL
74#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL
75#define CC_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL
76#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL
77#define CC_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL
78#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL
79#define CC_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL
80#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL
81#define CC_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL
82#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL
83#define CC_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL
84#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL
85#define CC_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL
86#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL
87#define CC_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
88#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL
89#define CC_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL
90#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL
91#define CC_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL
92#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL
93#define CC_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL
94#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL
95#define CC_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL
96#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL
97#define CC_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL
98#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL
99#define CC_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL
100#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL
101#define CC_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL
102#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL
103#define CC_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL
104#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL
105#define CC_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL
106#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL
107#define CC_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL
108#define CC_HOST_VERSION_REG_OFFSET 0xA40UL
109#define CC_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL
110#define CC_HOST_VERSION_VALUE_BIT_SIZE 0x20UL
111#define CC_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL
112#define CC_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL
113#define CC_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL
114#define CC_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL
115#define CC_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL
116#define CC_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL
117#define CC_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL
118#define CC_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL
119#define CC_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL
120#define CC_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL
121#define CC_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL
122#define CC_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL
123#define CC_HOST_GPR0_REG_OFFSET 0xA70UL
124#define CC_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL
125#define CC_HOST_GPR0_VALUE_BIT_SIZE 0x20UL
126#define CC_GPR_HOST_REG_OFFSET 0xA74UL
127#define CC_GPR_HOST_VALUE_BIT_SHIFT 0x0UL
128#define CC_GPR_HOST_VALUE_BIT_SIZE 0x20UL
129#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL
130#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
131#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
132// --------------------------------------
133// BLOCK: HOST_SRAM
134// --------------------------------------
135#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
136#define CC_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL
137#define CC_SRAM_DATA_VALUE_BIT_SIZE 0x20UL
138#define CC_SRAM_ADDR_REG_OFFSET 0xF04UL
139#define CC_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL
140#define CC_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL
141#define CC_SRAM_DATA_READY_REG_OFFSET 0xF08UL
142#define CC_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL
143#define CC_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL
144
145#endif //__CC_HOST_H__
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
new file mode 100644
index 000000000000..a091ae57f902
--- /dev/null
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -0,0 +1,576 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#ifndef __CC_HW_QUEUE_DEFS_H__
5#define __CC_HW_QUEUE_DEFS_H__
6
7#include <linux/types.h>
8
9#include "cc_kernel_regs.h"
10#include <linux/bitfield.h>
11
12/******************************************************************************
13 * DEFINITIONS
14 ******************************************************************************/
15
16#define HW_DESC_SIZE_WORDS 6
17/* Define max. available slots in HW queue */
18#define HW_QUEUE_SLOTS_MAX 15
19
20#define CC_REG_LOW(word, name) \
21 (CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SHIFT)
22
23#define CC_REG_HIGH(word, name) \
24 (CC_REG_LOW(word, name) + \
25 CC_DSCRPTR_QUEUE_WORD ## word ## _ ## name ## _BIT_SIZE - 1)
26
27#define CC_GENMASK(word, name) \
28 GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
29
30#define WORD0_VALUE CC_GENMASK(0, VALUE)
31#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
32#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
33#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
34#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
35#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
36#define WORD2_VALUE CC_GENMASK(2, VALUE)
37#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
38#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
39#define WORD3_DOUT_SIZE CC_GENMASK(3, DOUT_SIZE)
40#define WORD3_HASH_XOR_BIT CC_GENMASK(3, HASH_XOR_BIT)
41#define WORD3_NS_BIT CC_GENMASK(3, NS_BIT)
42#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
43#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
44#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
45#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
46#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
47#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
48#define WORD4_CIPHER_CONF2 CC_GENMASK(4, CIPHER_CONF2)
49#define WORD4_CIPHER_DO CC_GENMASK(4, CIPHER_DO)
50#define WORD4_CIPHER_MODE CC_GENMASK(4, CIPHER_MODE)
51#define WORD4_CMAC_SIZE0 CC_GENMASK(4, CMAC_SIZE0)
52#define WORD4_DATA_FLOW_MODE CC_GENMASK(4, DATA_FLOW_MODE)
53#define WORD4_KEY_SIZE CC_GENMASK(4, KEY_SIZE)
54#define WORD4_SETUP_OPERATION CC_GENMASK(4, SETUP_OPERATION)
55#define WORD5_DIN_ADDR_HIGH CC_GENMASK(5, DIN_ADDR_HIGH)
56#define WORD5_DOUT_ADDR_HIGH CC_GENMASK(5, DOUT_ADDR_HIGH)
57
58/******************************************************************************
59 * TYPE DEFINITIONS
60 ******************************************************************************/
61
62struct cc_hw_desc {
63 union {
64 u32 word[HW_DESC_SIZE_WORDS];
65 u16 hword[HW_DESC_SIZE_WORDS * 2];
66 };
67};
68
69enum cc_axi_sec {
70 AXI_SECURE = 0,
71 AXI_NOT_SECURE = 1
72};
73
74enum cc_desc_direction {
75 DESC_DIRECTION_ILLEGAL = -1,
76 DESC_DIRECTION_ENCRYPT_ENCRYPT = 0,
77 DESC_DIRECTION_DECRYPT_DECRYPT = 1,
78 DESC_DIRECTION_DECRYPT_ENCRYPT = 3,
79 DESC_DIRECTION_END = S32_MAX,
80};
81
82enum cc_dma_mode {
83 DMA_MODE_NULL = -1,
84 NO_DMA = 0,
85 DMA_SRAM = 1,
86 DMA_DLLI = 2,
87 DMA_MLLI = 3,
88 DMA_MODE_END = S32_MAX,
89};
90
91enum cc_flow_mode {
92 FLOW_MODE_NULL = -1,
93 /* data flows */
94 BYPASS = 0,
95 DIN_AES_DOUT = 1,
96 AES_to_HASH = 2,
97 AES_and_HASH = 3,
98 DIN_DES_DOUT = 4,
99 DES_to_HASH = 5,
100 DES_and_HASH = 6,
101 DIN_HASH = 7,
102 DIN_HASH_and_BYPASS = 8,
103 AESMAC_and_BYPASS = 9,
104 AES_to_HASH_and_DOUT = 10,
105 DIN_RC4_DOUT = 11,
106 DES_to_HASH_and_DOUT = 12,
107 AES_to_AES_to_HASH_and_DOUT = 13,
108 AES_to_AES_to_HASH = 14,
109 AES_to_HASH_and_AES = 15,
110 DIN_AES_AESMAC = 17,
111 HASH_to_DOUT = 18,
112 /* setup flows */
113 S_DIN_to_AES = 32,
114 S_DIN_to_AES2 = 33,
115 S_DIN_to_DES = 34,
116 S_DIN_to_RC4 = 35,
117 S_DIN_to_HASH = 37,
118 S_AES_to_DOUT = 38,
119 S_AES2_to_DOUT = 39,
120 S_RC4_to_DOUT = 41,
121 S_DES_to_DOUT = 42,
122 S_HASH_to_DOUT = 43,
123 SET_FLOW_ID = 44,
124 FLOW_MODE_END = S32_MAX,
125};
126
127enum cc_setup_op {
128 SETUP_LOAD_NOP = 0,
129 SETUP_LOAD_STATE0 = 1,
130 SETUP_LOAD_STATE1 = 2,
131 SETUP_LOAD_STATE2 = 3,
132 SETUP_LOAD_KEY0 = 4,
133 SETUP_LOAD_XEX_KEY = 5,
134 SETUP_WRITE_STATE0 = 8,
135 SETUP_WRITE_STATE1 = 9,
136 SETUP_WRITE_STATE2 = 10,
137 SETUP_WRITE_STATE3 = 11,
138 SETUP_OP_END = S32_MAX,
139};
140
141enum cc_hash_conf_pad {
142 HASH_PADDING_DISABLED = 0,
143 HASH_PADDING_ENABLED = 1,
144 HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2,
145 HASH_CONFIG1_PADDING_RESERVE32 = S32_MAX,
146};
147
148enum cc_aes_mac_selector {
149 AES_SK = 1,
150 AES_CMAC_INIT = 2,
151 AES_CMAC_SIZE0 = 3,
152 AES_MAC_END = S32_MAX,
153};
154
155#define HW_KEY_MASK_CIPHER_DO 0x3
156#define HW_KEY_SHIFT_CIPHER_CFG2 2
157
158/* HwCryptoKey[1:0] is mapped to cipher_do[1:0] */
159/* HwCryptoKey[2:3] is mapped to cipher_config2[1:0] */
160enum cc_hw_crypto_key {
161 USER_KEY = 0, /* 0x0000 */
162 ROOT_KEY = 1, /* 0x0001 */
163 PROVISIONING_KEY = 2, /* 0x0010 */ /* ==KCP */
164 SESSION_KEY = 3, /* 0x0011 */
165 RESERVED_KEY = 4, /* NA */
166 PLATFORM_KEY = 5, /* 0x0101 */
167 CUSTOMER_KEY = 6, /* 0x0110 */
168 KFDE0_KEY = 7, /* 0x0111 */
169 KFDE1_KEY = 9, /* 0x1001 */
170 KFDE2_KEY = 10, /* 0x1010 */
171 KFDE3_KEY = 11, /* 0x1011 */
172 END_OF_KEYS = S32_MAX,
173};
174
175enum cc_hw_aes_key_size {
176 AES_128_KEY = 0,
177 AES_192_KEY = 1,
178 AES_256_KEY = 2,
179 END_OF_AES_KEYS = S32_MAX,
180};
181
182enum cc_hash_cipher_pad {
183 DO_NOT_PAD = 0,
184 DO_PAD = 1,
185 HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
186};
187
188/*****************************/
189/* Descriptor packing macros */
190/*****************************/
191
192/*
193 * Init a HW descriptor struct
194 * @pdesc: pointer HW descriptor struct
195 */
196static inline void hw_desc_init(struct cc_hw_desc *pdesc)
197{
198 memset(pdesc, 0, sizeof(struct cc_hw_desc));
199}
200
201/*
202 * Indicates the end of current HW descriptors flow and release the HW engines.
203 *
204 * @pdesc: pointer HW descriptor struct
205 */
206static inline void set_queue_last_ind_bit(struct cc_hw_desc *pdesc)
207{
208 pdesc->word[3] |= FIELD_PREP(WORD3_QUEUE_LAST_IND, 1);
209}
210
211/*
212 * Set the DIN field of a HW descriptors
213 *
214 * @pdesc: pointer HW descriptor struct
215 * @dma_mode: dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
216 * @addr: dinAdr DIN address
217 * @size: Data size in bytes
218 * @axi_sec: AXI secure bit
219 */
220static inline void set_din_type(struct cc_hw_desc *pdesc,
221 enum cc_dma_mode dma_mode, dma_addr_t addr,
222 u32 size, enum cc_axi_sec axi_sec)
223{
224 pdesc->word[0] = (u32)addr;
225#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
226 pdesc->word[5] |= FIELD_PREP(WORD5_DIN_ADDR_HIGH, ((u16)(addr >> 32)));
227#endif
228 pdesc->word[1] |= FIELD_PREP(WORD1_DIN_DMA_MODE, dma_mode) |
229 FIELD_PREP(WORD1_DIN_SIZE, size) |
230 FIELD_PREP(WORD1_NS_BIT, axi_sec);
231}
232
233/*
234 * Set the DIN field of a HW descriptors to NO DMA mode.
235 * Used for NOP descriptor, register patches and other special modes.
236 *
237 * @pdesc: pointer HW descriptor struct
238 * @addr: DIN address
239 * @size: Data size in bytes
240 */
241static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
242{
243 pdesc->word[0] = addr;
244 pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size);
245}
246
247/*
248 * Set the DIN field of a HW descriptors to SRAM mode.
249 * Note: No need to check SRAM alignment since host requests do not use SRAM and
250 * adaptor will enforce alignment check.
251 *
252 * @pdesc: pointer HW descriptor struct
253 * @addr: DIN address
254 * @size Data size in bytes
255 */
256static inline void set_din_sram(struct cc_hw_desc *pdesc, dma_addr_t addr,
257 u32 size)
258{
259 pdesc->word[0] = (u32)addr;
260 pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, size) |
261 FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM);
262}
263
264/*
265 * Set the DIN field of a HW descriptors to CONST mode
266 *
267 * @pdesc: pointer HW descriptor struct
268 * @val: DIN const value
269 * @size: Data size in bytes
270 */
271static inline void set_din_const(struct cc_hw_desc *pdesc, u32 val, u32 size)
272{
273 pdesc->word[0] = val;
274 pdesc->word[1] |= FIELD_PREP(WORD1_DIN_CONST_VALUE, 1) |
275 FIELD_PREP(WORD1_DIN_DMA_MODE, DMA_SRAM) |
276 FIELD_PREP(WORD1_DIN_SIZE, size);
277}
278
279/*
280 * Set the DIN not last input data indicator
281 *
282 * @pdesc: pointer HW descriptor struct
283 */
284static inline void set_din_not_last_indication(struct cc_hw_desc *pdesc)
285{
286 pdesc->word[1] |= FIELD_PREP(WORD1_NOT_LAST, 1);
287}
288
289/*
290 * Set the DOUT field of a HW descriptors
291 *
292 * @pdesc: pointer HW descriptor struct
293 * @dma_mode: The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT
294 * @addr: DOUT address
295 * @size: Data size in bytes
296 * @axi_sec: AXI secure bit
297 */
298static inline void set_dout_type(struct cc_hw_desc *pdesc,
299 enum cc_dma_mode dma_mode, dma_addr_t addr,
300 u32 size, enum cc_axi_sec axi_sec)
301{
302 pdesc->word[2] = (u32)addr;
303#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
304 pdesc->word[5] |= FIELD_PREP(WORD5_DOUT_ADDR_HIGH, ((u16)(addr >> 32)));
305#endif
306 pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, dma_mode) |
307 FIELD_PREP(WORD3_DOUT_SIZE, size) |
308 FIELD_PREP(WORD3_NS_BIT, axi_sec);
309}
310
311/*
312 * Set the DOUT field of a HW descriptors to DLLI type
313 * The LAST INDICATION is provided by the user
314 *
315 * @pdesc pointer HW descriptor struct
316 * @addr: DOUT address
317 * @size: Data size in bytes
318 * @last_ind: The last indication bit
319 * @axi_sec: AXI secure bit
320 */
321static inline void set_dout_dlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
322 u32 size, enum cc_axi_sec axi_sec,
323 u32 last_ind)
324{
325 set_dout_type(pdesc, DMA_DLLI, addr, size, axi_sec);
326 pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
327}
328
329/*
330 * Set the DOUT field of a HW descriptors to DLLI type
331 * The LAST INDICATION is provided by the user
332 *
333 * @pdesc: pointer HW descriptor struct
334 * @addr: DOUT address
335 * @size: Data size in bytes
336 * @last_ind: The last indication bit
337 * @axi_sec: AXI secure bit
338 */
339static inline void set_dout_mlli(struct cc_hw_desc *pdesc, dma_addr_t addr,
340 u32 size, enum cc_axi_sec axi_sec,
341 bool last_ind)
342{
343 set_dout_type(pdesc, DMA_MLLI, addr, size, axi_sec);
344 pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_LAST_IND, last_ind);
345}
346
347/*
348 * Set the DOUT field of a HW descriptors to NO DMA mode.
349 * Used for NOP descriptor, register patches and other special modes.
350 *
351 * @pdesc: pointer HW descriptor struct
352 * @addr: DOUT address
353 * @size: Data size in bytes
354 * @write_enable: Enables a write operation to a register
355 */
356static inline void set_dout_no_dma(struct cc_hw_desc *pdesc, u32 addr,
357 u32 size, bool write_enable)
358{
359 pdesc->word[2] = addr;
360 pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_SIZE, size) |
361 FIELD_PREP(WORD3_DOUT_LAST_IND, write_enable);
362}
363
364/*
365 * Set the word for the XOR operation.
366 *
367 * @pdesc: pointer HW descriptor struct
368 * @val: xor data value
369 */
370static inline void set_xor_val(struct cc_hw_desc *pdesc, u32 val)
371{
372 pdesc->word[2] = val;
373}
374
375/*
376 * Sets the XOR indicator bit in the descriptor
377 *
378 * @pdesc: pointer HW descriptor struct
379 */
380static inline void set_xor_active(struct cc_hw_desc *pdesc)
381{
382 pdesc->word[3] |= FIELD_PREP(WORD3_HASH_XOR_BIT, 1);
383}
384
385/*
386 * Select the AES engine instead of HASH engine when setting up combined mode
387 * with AES XCBC MAC
388 *
389 * @pdesc: pointer HW descriptor struct
390 */
391static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
392{
393 pdesc->word[4] |= FIELD_PREP(WORD4_AES_SEL_N_HASH, 1);
394}
395
396/*
397 * Set the DOUT field of a HW descriptors to SRAM mode
398 * Note: No need to check SRAM alignment since host requests do not use SRAM and
399 * adaptor will enforce alignment check.
400 *
401 * @pdesc: pointer HW descriptor struct
402 * @addr: DOUT address
403 * @size: Data size in bytes
404 */
405static inline void set_dout_sram(struct cc_hw_desc *pdesc, u32 addr, u32 size)
406{
407 pdesc->word[2] = addr;
408 pdesc->word[3] |= FIELD_PREP(WORD3_DOUT_DMA_MODE, DMA_SRAM) |
409 FIELD_PREP(WORD3_DOUT_SIZE, size);
410}
411
412/*
413 * Sets the data unit size for XEX mode in data_out_addr[15:0]
414 *
415 * @pdesc: pDesc pointer HW descriptor struct
416 * @size: data unit size for XEX mode
417 */
418static inline void set_xex_data_unit_size(struct cc_hw_desc *pdesc, u32 size)
419{
420 pdesc->word[2] = size;
421}
422
423/*
424 * Set the number of rounds for Multi2 in data_out_addr[15:0]
425 *
426 * @pdesc: pointer HW descriptor struct
427 * @num: number of rounds for Multi2
428 */
429static inline void set_multi2_num_rounds(struct cc_hw_desc *pdesc, u32 num)
430{
431 pdesc->word[2] = num;
432}
433
434/*
435 * Set the flow mode.
436 *
437 * @pdesc: pointer HW descriptor struct
438 * @mode: Any one of the modes defined in [CC7x-DESC]
439 */
440static inline void set_flow_mode(struct cc_hw_desc *pdesc,
441 enum cc_flow_mode mode)
442{
443 pdesc->word[4] |= FIELD_PREP(WORD4_DATA_FLOW_MODE, mode);
444}
445
446/*
447 * Set the cipher mode.
448 *
449 * @pdesc: pointer HW descriptor struct
450 * @mode: Any one of the modes defined in [CC7x-DESC]
451 */
452static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
453 enum drv_cipher_mode mode)
454{
455 pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
456}
457
458/*
459 * Set the cipher configuration fields.
460 *
461 * @pdesc: pointer HW descriptor struct
462 * @mode: Any one of the modes defined in [CC7x-DESC]
463 */
464static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
465 enum drv_crypto_direction mode)
466{
467 pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
468}
469
470/*
471 * Set the cipher configuration fields.
472 *
473 * @pdesc: pointer HW descriptor struct
474 * @config: Any one of the modes defined in [CC7x-DESC]
475 */
476static inline void set_cipher_config1(struct cc_hw_desc *pdesc,
477 enum cc_hash_conf_pad config)
478{
479 pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF1, config);
480}
481
482/*
483 * Set HW key configuration fields.
484 *
485 * @pdesc: pointer HW descriptor struct
486 * @hw_key: The HW key slot asdefined in enum cc_hw_crypto_key
487 */
488static inline void set_hw_crypto_key(struct cc_hw_desc *pdesc,
489 enum cc_hw_crypto_key hw_key)
490{
491 pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
492 (hw_key & HW_KEY_MASK_CIPHER_DO)) |
493 FIELD_PREP(WORD4_CIPHER_CONF2,
494 (hw_key >> HW_KEY_SHIFT_CIPHER_CFG2));
495}
496
497/*
498 * Set byte order of all setup-finalize descriptors.
499 *
500 * @pdesc: pointer HW descriptor struct
501 * @config: Any one of the modes defined in [CC7x-DESC]
502 */
503static inline void set_bytes_swap(struct cc_hw_desc *pdesc, bool config)
504{
505 pdesc->word[4] |= FIELD_PREP(WORD4_BYTES_SWAP, config);
506}
507
508/*
509 * Set CMAC_SIZE0 mode.
510 *
511 * @pdesc: pointer HW descriptor struct
512 */
513static inline void set_cmac_size0_mode(struct cc_hw_desc *pdesc)
514{
515 pdesc->word[4] |= FIELD_PREP(WORD4_CMAC_SIZE0, 1);
516}
517
518/*
519 * Set key size descriptor field.
520 *
521 * @pdesc: pointer HW descriptor struct
522 * @size: key size in bytes (NOT size code)
523 */
524static inline void set_key_size(struct cc_hw_desc *pdesc, u32 size)
525{
526 pdesc->word[4] |= FIELD_PREP(WORD4_KEY_SIZE, size);
527}
528
529/*
530 * Set AES key size.
531 *
532 * @pdesc: pointer HW descriptor struct
533 * @size: key size in bytes (NOT size code)
534 */
535static inline void set_key_size_aes(struct cc_hw_desc *pdesc, u32 size)
536{
537 set_key_size(pdesc, ((size >> 3) - 2));
538}
539
540/*
541 * Set DES key size.
542 *
543 * @pdesc: pointer HW descriptor struct
544 * @size: key size in bytes (NOT size code)
545 */
546static inline void set_key_size_des(struct cc_hw_desc *pdesc, u32 size)
547{
548 set_key_size(pdesc, ((size >> 3) - 1));
549}
550
551/*
552 * Set the descriptor setup mode
553 *
554 * @pdesc: pointer HW descriptor struct
555 * @mode: Any one of the setup modes defined in [CC7x-DESC]
556 */
557static inline void set_setup_mode(struct cc_hw_desc *pdesc,
558 enum cc_setup_op mode)
559{
560 pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, mode);
561}
562
563/*
564 * Set the descriptor cipher DO
565 *
566 * @pdesc: pointer HW descriptor struct
567 * @config: Any one of the cipher do defined in [CC7x-DESC]
568 */
569static inline void set_cipher_do(struct cc_hw_desc *pdesc,
570 enum cc_hash_cipher_pad config)
571{
572 pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_DO,
573 (config & HW_KEY_MASK_CIPHER_DO));
574}
575
576#endif /*__CC_HW_QUEUE_DEFS_H__*/
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
new file mode 100644
index 000000000000..769458323394
--- /dev/null
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -0,0 +1,279 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <crypto/ctr.h>
5#include "cc_driver.h"
6#include "cc_ivgen.h"
7#include "cc_request_mgr.h"
8#include "cc_sram_mgr.h"
9#include "cc_buffer_mgr.h"
10
11/* The max. size of pool *MUST* be <= SRAM total size */
12#define CC_IVPOOL_SIZE 1024
13/* The first 32B fraction of pool are dedicated to the
14 * next encryption "key" & "IV" for pool regeneration
15 */
16#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
17#define CC_IVPOOL_GEN_SEQ_LEN 4
18
19/**
20 * struct cc_ivgen_ctx -IV pool generation context
21 * @pool: the start address of the iv-pool resides in internal RAM
22 * @ctr_key_dma: address of pool's encryption key material in internal RAM
23 * @ctr_iv_dma: address of pool's counter iv in internal RAM
24 * @next_iv_ofs: the offset to the next available IV in pool
25 * @pool_meta: virt. address of the initial enc. key/IV
26 * @pool_meta_dma: phys. address of the initial enc. key/IV
27 */
28struct cc_ivgen_ctx {
29 cc_sram_addr_t pool;
30 cc_sram_addr_t ctr_key;
31 cc_sram_addr_t ctr_iv;
32 u32 next_iv_ofs;
33 u8 *pool_meta;
34 dma_addr_t pool_meta_dma;
35};
36
37/*!
38 * Generates CC_IVPOOL_SIZE of random bytes by
39 * encrypting 0's using AES128-CTR.
40 *
41 * \param ivgen iv-pool context
42 * \param iv_seq IN/OUT array to the descriptors sequence
43 * \param iv_seq_len IN/OUT pointer to the sequence length
44 */
45static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
46 struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
47{
48 unsigned int idx = *iv_seq_len;
49
50 if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
51 /* The sequence will be longer than allowed */
52 return -EINVAL;
53 }
54 /* Setup key */
55 hw_desc_init(&iv_seq[idx]);
56 set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
57 set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
58 set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
59 set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
60 set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
61 set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
62 idx++;
63
64 /* Setup cipher state */
65 hw_desc_init(&iv_seq[idx]);
66 set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
67 set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
68 set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
69 set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
70 set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
71 set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
72 idx++;
73
74 /* Perform dummy encrypt to skip first block */
75 hw_desc_init(&iv_seq[idx]);
76 set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
77 set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
78 set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
79 idx++;
80
81 /* Generate IV pool */
82 hw_desc_init(&iv_seq[idx]);
83 set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
84 set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
85 set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
86 idx++;
87
88 *iv_seq_len = idx; /* Update sequence length */
89
90 /* queue ordering assures pool readiness */
91 ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;
92
93 return 0;
94}
95
96/*!
97 * Generates the initial pool in SRAM.
98 * This function should be invoked when resuming driver.
99 *
100 * \param drvdata
101 *
102 * \return int Zero for success, negative value otherwise.
103 */
104int cc_init_iv_sram(struct cc_drvdata *drvdata)
105{
106 struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
107 struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
108 unsigned int iv_seq_len = 0;
109 int rc;
110
111 /* Generate initial enc. key/iv */
112 get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);
113
114 /* The first 32B reserved for the enc. Key/IV */
115 ivgen_ctx->ctr_key = ivgen_ctx->pool;
116 ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;
117
118 /* Copy initial enc. key and IV to SRAM at a single descriptor */
119 hw_desc_init(&iv_seq[iv_seq_len]);
120 set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
121 CC_IVPOOL_META_SIZE, NS_BIT);
122 set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
123 CC_IVPOOL_META_SIZE);
124 set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
125 iv_seq_len++;
126
127 /* Generate initial pool */
128 rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
129 if (rc)
130 return rc;
131
132 /* Fire-and-forget */
133 return send_request_init(drvdata, iv_seq, iv_seq_len);
134}
135
136/*!
137 * Free iv-pool and ivgen context.
138 *
139 * \param drvdata
140 */
141void cc_ivgen_fini(struct cc_drvdata *drvdata)
142{
143 struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
144 struct device *device = &drvdata->plat_dev->dev;
145
146 if (!ivgen_ctx)
147 return;
148
149 if (ivgen_ctx->pool_meta) {
150 memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
151 dma_free_coherent(device, CC_IVPOOL_META_SIZE,
152 ivgen_ctx->pool_meta,
153 ivgen_ctx->pool_meta_dma);
154 }
155
156 ivgen_ctx->pool = NULL_SRAM_ADDR;
157
158 /* release "this" context */
159 kfree(ivgen_ctx);
160}
161
162/*!
163 * Allocates iv-pool and maps resources.
164 * This function generates the first IV pool.
165 *
166 * \param drvdata Driver's private context
167 *
168 * \return int Zero for success, negative value otherwise.
169 */
170int cc_ivgen_init(struct cc_drvdata *drvdata)
171{
172 struct cc_ivgen_ctx *ivgen_ctx;
173 struct device *device = &drvdata->plat_dev->dev;
174 int rc;
175
176 /* Allocate "this" context */
177 ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
178 if (!ivgen_ctx)
179 return -ENOMEM;
180
181 /* Allocate pool's header for initial enc. key/IV */
182 ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
183 &ivgen_ctx->pool_meta_dma,
184 GFP_KERNEL);
185 if (!ivgen_ctx->pool_meta) {
186 dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
187 CC_IVPOOL_META_SIZE);
188 rc = -ENOMEM;
189 goto out;
190 }
191 /* Allocate IV pool in SRAM */
192 ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
193 if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
194 dev_err(device, "SRAM pool exhausted\n");
195 rc = -ENOMEM;
196 goto out;
197 }
198
199 drvdata->ivgen_handle = ivgen_ctx;
200
201 return cc_init_iv_sram(drvdata);
202
203out:
204 cc_ivgen_fini(drvdata);
205 return rc;
206}
207
208/*!
209 * Acquires 16 Bytes IV from the iv-pool
210 *
211 * \param drvdata Driver private context
212 * \param iv_out_dma Array of physical IV out addresses
213 * \param iv_out_dma_len Length of iv_out_dma array (additional elements
214 * of iv_out_dma array are ignore)
215 * \param iv_out_size May be 8 or 16 bytes long
216 * \param iv_seq IN/OUT array to the descriptors sequence
217 * \param iv_seq_len IN/OUT pointer to the sequence length
218 *
219 * \return int Zero for success, negative value otherwise.
220 */
221int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
222 unsigned int iv_out_dma_len, unsigned int iv_out_size,
223 struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
224{
225 struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
226 unsigned int idx = *iv_seq_len;
227 struct device *dev = drvdata_to_dev(drvdata);
228 unsigned int t;
229
230 if (iv_out_size != CC_AES_IV_SIZE &&
231 iv_out_size != CTR_RFC3686_IV_SIZE) {
232 return -EINVAL;
233 }
234 if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
235 /* The sequence will be longer than allowed */
236 return -EINVAL;
237 }
238
239 /* check that number of generated IV is limited to max dma address
240 * iv buffer size
241 */
242 if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
243 /* The sequence will be longer than allowed */
244 return -EINVAL;
245 }
246
247 for (t = 0; t < iv_out_dma_len; t++) {
248 /* Acquire IV from pool */
249 hw_desc_init(&iv_seq[idx]);
250 set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
251 ivgen_ctx->next_iv_ofs),
252 iv_out_size);
253 set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
254 NS_BIT, 0);
255 set_flow_mode(&iv_seq[idx], BYPASS);
256 idx++;
257 }
258
259 /* Bypass operation is proceeded by crypto sequence, hence must
260 * assure bypass-write-transaction by a memory barrier
261 */
262 hw_desc_init(&iv_seq[idx]);
263 set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
264 set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
265 idx++;
266
267 *iv_seq_len = idx; /* update seq length */
268
269 /* Update iv index */
270 ivgen_ctx->next_iv_ofs += iv_out_size;
271
272 if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
273 dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
274 /* pool is drained -regenerate it! */
275 return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
276 }
277
278 return 0;
279}
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
new file mode 100644
index 000000000000..b6ac16903dda
--- /dev/null
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -0,0 +1,55 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#ifndef __CC_IVGEN_H__
5#define __CC_IVGEN_H__
6
7#include "cc_hw_queue_defs.h"
8
9#define CC_IVPOOL_SEQ_LEN 8
10
11/*!
12 * Allocates iv-pool and maps resources.
13 * This function generates the first IV pool.
14 *
15 * \param drvdata Driver's private context
16 *
17 * \return int Zero for success, negative value otherwise.
18 */
19int cc_ivgen_init(struct cc_drvdata *drvdata);
20
21/*!
22 * Free iv-pool and ivgen context.
23 *
24 * \param drvdata
25 */
26void cc_ivgen_fini(struct cc_drvdata *drvdata);
27
28/*!
29 * Generates the initial pool in SRAM.
30 * This function should be invoked when resuming DX driver.
31 *
32 * \param drvdata
33 *
34 * \return int Zero for success, negative value otherwise.
35 */
36int cc_init_iv_sram(struct cc_drvdata *drvdata);
37
38/*!
39 * Acquires 16 Bytes IV from the iv-pool
40 *
41 * \param drvdata Driver private context
42 * \param iv_out_dma Array of physical IV out addresses
43 * \param iv_out_dma_len Length of iv_out_dma array (additional elements of
44 * iv_out_dma array are ignore)
45 * \param iv_out_size May be 8 or 16 bytes long
46 * \param iv_seq IN/OUT array to the descriptors sequence
47 * \param iv_seq_len IN/OUT pointer to the sequence length
48 *
49 * \return int Zero for success, negative value otherwise.
50 */
51int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
52 unsigned int iv_out_dma_len, unsigned int iv_out_size,
53 struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
54
55#endif /*__CC_IVGEN_H__*/
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
new file mode 100644
index 000000000000..8d7262a35156
--- /dev/null
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -0,0 +1,168 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#ifndef __CC_CRYS_KERNEL_H__
5#define __CC_CRYS_KERNEL_H__
6
7// --------------------------------------
8// BLOCK: DSCRPTR
9// --------------------------------------
10#define CC_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL
11#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL
12#define CC_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL
13#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL
14#define CC_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL
15#define CC_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL
16#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL
17#define CC_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL
18#define CC_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL
19#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL
20#define CC_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL
21#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL
22#define CC_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL
23#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL
24#define CC_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL
25#define CC_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL
26#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL
27#define CC_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL
28#define CC_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL
29#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL
30#define CC_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL
31#define CC_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL
32#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL
33#define CC_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL
34#define CC_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL
35#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL
36#define CC_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL
37#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL
38#define CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL
39#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL
40#define CC_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL
41#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL
42#define CC_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL
43#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL
44#define CC_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL
45#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL
46#define CC_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL
47#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL
48#define CC_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL
49#define CC_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL
50#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL
51#define CC_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL
52#define CC_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL
53#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL
54#define CC_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL
55#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL
56#define CC_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL
57#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL
58#define CC_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL
59#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL
60#define CC_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL
61#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL
62#define CC_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL
63#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL
64#define CC_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL
65#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL
66#define CC_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL
67#define CC_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL
68#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL
69#define CC_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL
70#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL
71#define CC_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL
72#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL
73#define CC_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL
74#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL
75#define CC_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL
76#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL
77#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL
78#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL
79#define CC_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL
80#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL
81#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL
82#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL
83#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL
84#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL
85#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL
86#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL
87#define CC_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL
88#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL
89#define CC_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL
90#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL
91#define CC_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL
92#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL
93#define CC_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
94#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL
95#define CC_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL
96#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL
97#define CC_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL
98#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL
99#define CC_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL
100#define CC_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL
101#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL
102#define CC_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL
103#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL
104#define CC_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL
105#define CC_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL
106#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL
107#define CC_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL
108#define CC_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL
109#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL
110#define CC_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL
111// --------------------------------------
112// BLOCK: AXI_P
113// --------------------------------------
114#define CC_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL
115#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL
116#define CC_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL
117#define CC_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL
118#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL
119#define CC_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL
120#define CC_AXIM_MON_COMP_REG_OFFSET 0xB80UL
121#define CC_AXIM_MON_COMP8_REG_OFFSET 0xBA0UL
122#define CC_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL
123#define CC_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL
124#define CC_AXIM_MON_ERR_REG_OFFSET 0xBC4UL
125#define CC_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL
126#define CC_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL
127#define CC_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL
128#define CC_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL
129#define CC_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL
130#define CC_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL
131#define CC_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL
132#define CC_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL
133#define CC_AXIM_CFG_REG_OFFSET 0xBE8UL
134#define CC_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL
135#define CC_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL
136#define CC_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL
137#define CC_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL
138#define CC_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL
139#define CC_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL
140#define CC_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL
141#define CC_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL
142#define CC_AXIM_ACE_CONST_REG_OFFSET 0xBECUL
143#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL
144#define CC_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL
145#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL
146#define CC_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL
147#define CC_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL
148#define CC_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL
149#define CC_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL
150#define CC_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL
151#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL
152#define CC_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL
153#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL
154#define CC_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL
155#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL
156#define CC_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL
157#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL
158#define CC_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL
159#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL
160#define CC_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL
161#define CC_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL
162#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL
163#define CC_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL
164#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL
165#define CC_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL
166#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL
167#define CC_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL
168#endif // __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
new file mode 100644
index 000000000000..64b15ac9f1d3
--- /dev/null
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -0,0 +1,59 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#ifndef _CC_LLI_DEFS_H_
5#define _CC_LLI_DEFS_H_
6
7#include <linux/types.h>
8
9/* Max DLLI size
10 * AKA CC_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE
11 */
12#define DLLI_SIZE_BIT_SIZE 0x18
13
14#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
15
16#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
17#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
18#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
19#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
20#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
21 (2 * LLI_MAX_NUM_OF_DATA_ENTRIES + \
22 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)
23
24/* Size of entry */
25#define LLI_ENTRY_WORD_SIZE 2
26#define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(u32))
27
28/* Word0[31:0] = ADDR[31:0] */
29#define LLI_WORD0_OFFSET 0
30#define LLI_LADDR_BIT_OFFSET 0
31#define LLI_LADDR_BIT_SIZE 32
32/* Word1[31:16] = ADDR[47:32]; Word1[15:0] = SIZE */
33#define LLI_WORD1_OFFSET 1
34#define LLI_SIZE_BIT_OFFSET 0
35#define LLI_SIZE_BIT_SIZE 16
36#define LLI_HADDR_BIT_OFFSET 16
37#define LLI_HADDR_BIT_SIZE 16
38
39#define LLI_SIZE_MASK GENMASK((LLI_SIZE_BIT_SIZE - 1), LLI_SIZE_BIT_OFFSET)
40#define LLI_HADDR_MASK GENMASK( \
41 (LLI_HADDR_BIT_OFFSET + LLI_HADDR_BIT_SIZE - 1),\
42 LLI_HADDR_BIT_OFFSET)
43
44static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
45{
46 lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
47#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
48 lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
49 lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32));
50#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
51}
52
53static inline void cc_lli_set_size(u32 *lli_p, u16 size)
54{
55 lli_p[LLI_WORD1_OFFSET] &= ~LLI_SIZE_MASK;
56 lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_SIZE_MASK, size);
57}
58
59#endif /*_CC_LLI_DEFS_H_*/
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
new file mode 100644
index 000000000000..d990f472e89f
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -0,0 +1,122 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include <linux/interrupt.h>
6#include <linux/pm_runtime.h>
7#include "cc_driver.h"
8#include "cc_buffer_mgr.h"
9#include "cc_request_mgr.h"
10#include "cc_sram_mgr.h"
11#include "cc_ivgen.h"
12#include "cc_hash.h"
13#include "cc_pm.h"
14
15#define POWER_DOWN_ENABLE 0x01
16#define POWER_DOWN_DISABLE 0x00
17
18const struct dev_pm_ops ccree_pm = {
19 SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
20};
21
22int cc_pm_suspend(struct device *dev)
23{
24 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
25 int rc;
26
27 dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
28 cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
29 rc = cc_suspend_req_queue(drvdata);
30 if (rc) {
31 dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
32 return rc;
33 }
34 fini_cc_regs(drvdata);
35 cc_clk_off(drvdata);
36 return 0;
37}
38
39int cc_pm_resume(struct device *dev)
40{
41 int rc;
42 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
43
44 dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
45 cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
46
47 rc = cc_clk_on(drvdata);
48 if (rc) {
49 dev_err(dev, "failed getting clock back on. We're toast.\n");
50 return rc;
51 }
52
53 rc = init_cc_regs(drvdata, false);
54 if (rc) {
55 dev_err(dev, "init_cc_regs (%x)\n", rc);
56 return rc;
57 }
58
59 rc = cc_resume_req_queue(drvdata);
60 if (rc) {
61 dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
62 return rc;
63 }
64
65 /* must be after the queue resuming as it uses the HW queue*/
66 cc_init_hash_sram(drvdata);
67
68 cc_init_iv_sram(drvdata);
69 return 0;
70}
71
72int cc_pm_get(struct device *dev)
73{
74 int rc = 0;
75 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
76
77 if (cc_req_queue_suspended(drvdata))
78 rc = pm_runtime_get_sync(dev);
79 else
80 pm_runtime_get_noresume(dev);
81
82 return rc;
83}
84
85int cc_pm_put_suspend(struct device *dev)
86{
87 int rc = 0;
88 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
89
90 if (!cc_req_queue_suspended(drvdata)) {
91 pm_runtime_mark_last_busy(dev);
92 rc = pm_runtime_put_autosuspend(dev);
93 } else {
94 /* Something wrong happens*/
95 dev_err(dev, "request to suspend already suspended queue");
96 rc = -EBUSY;
97 }
98 return rc;
99}
100
101int cc_pm_init(struct cc_drvdata *drvdata)
102{
103 int rc = 0;
104 struct device *dev = drvdata_to_dev(drvdata);
105
106 /* must be before the enabling to avoid resdundent suspending */
107 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
108 pm_runtime_use_autosuspend(dev);
109 /* activate the PM module */
110 rc = pm_runtime_set_active(dev);
111 if (rc)
112 return rc;
113 /* enable the PM module*/
114 pm_runtime_enable(dev);
115
116 return rc;
117}
118
119void cc_pm_fini(struct cc_drvdata *drvdata)
120{
121 pm_runtime_disable(drvdata_to_dev(drvdata));
122}
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
new file mode 100644
index 000000000000..020a5403c58b
--- /dev/null
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -0,0 +1,56 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4/* \file cc_pm.h
5 */
6
7#ifndef __CC_POWER_MGR_H__
8#define __CC_POWER_MGR_H__
9
10#include "cc_driver.h"
11
12#define CC_SUSPEND_TIMEOUT 3000
13
14#if defined(CONFIG_PM)
15
16extern const struct dev_pm_ops ccree_pm;
17
18int cc_pm_init(struct cc_drvdata *drvdata);
19void cc_pm_fini(struct cc_drvdata *drvdata);
20int cc_pm_suspend(struct device *dev);
21int cc_pm_resume(struct device *dev);
22int cc_pm_get(struct device *dev);
23int cc_pm_put_suspend(struct device *dev);
24
25#else
26
27static inline int cc_pm_init(struct cc_drvdata *drvdata)
28{
29 return 0;
30}
31
32static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
33
34static inline int cc_pm_suspend(struct device *dev)
35{
36 return 0;
37}
38
39static inline int cc_pm_resume(struct device *dev)
40{
41 return 0;
42}
43
44static inline int cc_pm_get(struct device *dev)
45{
46 return 0;
47}
48
49static inline int cc_pm_put_suspend(struct device *dev)
50{
51 return 0;
52}
53
54#endif
55
56#endif /*__POWER_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
new file mode 100644
index 000000000000..83a8aaae61c7
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -0,0 +1,711 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include <linux/kernel.h>
5#include "cc_driver.h"
6#include "cc_buffer_mgr.h"
7#include "cc_request_mgr.h"
8#include "cc_ivgen.h"
9#include "cc_pm.h"
10
11#define CC_MAX_POLL_ITER 10
12/* The highest descriptor count in used */
13#define CC_MAX_DESC_SEQ_LEN 23
14
15struct cc_req_mgr_handle {
16 /* Request manager resources */
17 unsigned int hw_queue_size; /* HW capability */
18 unsigned int min_free_hw_slots;
19 unsigned int max_used_sw_slots;
20 struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
21 u32 req_queue_head;
22 u32 req_queue_tail;
23 u32 axi_completed;
24 u32 q_free_slots;
25 /* This lock protects access to HW register
26 * that must be single request at a time
27 */
28 spinlock_t hw_lock;
29 struct cc_hw_desc compl_desc;
30 u8 *dummy_comp_buff;
31 dma_addr_t dummy_comp_buff_dma;
32
33 /* backlog queue */
34 struct list_head backlog;
35 unsigned int bl_len;
36 spinlock_t bl_lock; /* protect backlog queue */
37
38#ifdef COMP_IN_WQ
39 struct workqueue_struct *workq;
40 struct delayed_work compwork;
41#else
42 struct tasklet_struct comptask;
43#endif
44 bool is_runtime_suspended;
45};
46
47struct cc_bl_item {
48 struct cc_crypto_req creq;
49 struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
50 unsigned int len;
51 struct list_head list;
52 bool notif;
53};
54
55static void comp_handler(unsigned long devarg);
56#ifdef COMP_IN_WQ
57static void comp_work_handler(struct work_struct *work);
58#endif
59
60void cc_req_mgr_fini(struct cc_drvdata *drvdata)
61{
62 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
63 struct device *dev = drvdata_to_dev(drvdata);
64
65 if (!req_mgr_h)
66 return; /* Not allocated */
67
68 if (req_mgr_h->dummy_comp_buff_dma) {
69 dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
70 req_mgr_h->dummy_comp_buff_dma);
71 }
72
73 dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
74 req_mgr_h->min_free_hw_slots));
75 dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
76
77#ifdef COMP_IN_WQ
78 flush_workqueue(req_mgr_h->workq);
79 destroy_workqueue(req_mgr_h->workq);
80#else
81 /* Kill tasklet */
82 tasklet_kill(&req_mgr_h->comptask);
83#endif
84 kzfree(req_mgr_h);
85 drvdata->request_mgr_handle = NULL;
86}
87
88int cc_req_mgr_init(struct cc_drvdata *drvdata)
89{
90 struct cc_req_mgr_handle *req_mgr_h;
91 struct device *dev = drvdata_to_dev(drvdata);
92 int rc = 0;
93
94 req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
95 if (!req_mgr_h) {
96 rc = -ENOMEM;
97 goto req_mgr_init_err;
98 }
99
100 drvdata->request_mgr_handle = req_mgr_h;
101
102 spin_lock_init(&req_mgr_h->hw_lock);
103 spin_lock_init(&req_mgr_h->bl_lock);
104 INIT_LIST_HEAD(&req_mgr_h->backlog);
105
106#ifdef COMP_IN_WQ
107 dev_dbg(dev, "Initializing completion workqueue\n");
108 req_mgr_h->workq = create_singlethread_workqueue("ccree");
109 if (!req_mgr_h->workq) {
110 dev_err(dev, "Failed creating work queue\n");
111 rc = -ENOMEM;
112 goto req_mgr_init_err;
113 }
114 INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
115#else
116 dev_dbg(dev, "Initializing completion tasklet\n");
117 tasklet_init(&req_mgr_h->comptask, comp_handler,
118 (unsigned long)drvdata);
119#endif
120 req_mgr_h->hw_queue_size = cc_ioread(drvdata,
121 CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
122 dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
123 if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
124 dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
125 req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
126 rc = -ENOMEM;
127 goto req_mgr_init_err;
128 }
129 req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
130 req_mgr_h->max_used_sw_slots = 0;
131
132 /* Allocate DMA word for "dummy" completion descriptor use */
133 req_mgr_h->dummy_comp_buff =
134 dma_alloc_coherent(dev, sizeof(u32),
135 &req_mgr_h->dummy_comp_buff_dma,
136 GFP_KERNEL);
137 if (!req_mgr_h->dummy_comp_buff) {
138 dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
139 sizeof(u32));
140 rc = -ENOMEM;
141 goto req_mgr_init_err;
142 }
143
144 /* Init. "dummy" completion descriptor */
145 hw_desc_init(&req_mgr_h->compl_desc);
146 set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
147 set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
148 sizeof(u32), NS_BIT, 1);
149 set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
150 set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
151
152 return 0;
153
154req_mgr_init_err:
155 cc_req_mgr_fini(drvdata);
156 return rc;
157}
158
159static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
160 unsigned int seq_len)
161{
162 int i, w;
163 void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
164 struct device *dev = drvdata_to_dev(drvdata);
165
166 /*
167 * We do indeed write all 6 command words to the same
168 * register. The HW supports this.
169 */
170
171 for (i = 0; i < seq_len; i++) {
172 for (w = 0; w <= 5; w++)
173 writel_relaxed(seq[i].word[w], reg);
174
175 if (cc_dump_desc)
176 dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
177 i, seq[i].word[0], seq[i].word[1],
178 seq[i].word[2], seq[i].word[3],
179 seq[i].word[4], seq[i].word[5]);
180 }
181}
182
183/*!
184 * Completion will take place if and only if user requested completion
185 * by cc_send_sync_request().
186 *
187 * \param dev
188 * \param dx_compl_h The completion event to signal
189 */
190static void request_mgr_complete(struct device *dev, void *dx_compl_h,
191 int dummy)
192{
193 struct completion *this_compl = dx_compl_h;
194
195 complete(this_compl);
196}
197
198static int cc_queues_status(struct cc_drvdata *drvdata,
199 struct cc_req_mgr_handle *req_mgr_h,
200 unsigned int total_seq_len)
201{
202 unsigned long poll_queue;
203 struct device *dev = drvdata_to_dev(drvdata);
204
205 /* SW queue is checked only once as it will not
206 * be chaned during the poll because the spinlock_bh
207 * is held by the thread
208 */
209 if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
210 req_mgr_h->req_queue_tail) {
211 dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
212 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
213 return -ENOSPC;
214 }
215
216 if (req_mgr_h->q_free_slots >= total_seq_len)
217 return 0;
218
219 /* Wait for space in HW queue. Poll constant num of iterations. */
220 for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
221 req_mgr_h->q_free_slots =
222 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
223 if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
224 req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
225
226 if (req_mgr_h->q_free_slots >= total_seq_len) {
227 /* If there is enough place return */
228 return 0;
229 }
230
231 dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
232 req_mgr_h->q_free_slots, total_seq_len);
233 }
234 /* No room in the HW queue try again later */
235 dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
236 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
237 req_mgr_h->q_free_slots, total_seq_len);
238 return -ENOSPC;
239}
240
241/*!
242 * Enqueue caller request to crypto hardware.
243 * Need to be called with HW lock held and PM running
244 *
245 * \param drvdata
246 * \param cc_req The request to enqueue
247 * \param desc The crypto sequence
248 * \param len The crypto sequence length
249 * \param add_comp If "true": add an artificial dout DMA to mark completion
250 *
251 * \return int Returns -EINPROGRESS or error code
252 */
253static int cc_do_send_request(struct cc_drvdata *drvdata,
254 struct cc_crypto_req *cc_req,
255 struct cc_hw_desc *desc, unsigned int len,
256 bool add_comp, bool ivgen)
257{
258 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
259 unsigned int used_sw_slots;
260 unsigned int iv_seq_len = 0;
261 unsigned int total_seq_len = len; /*initial sequence length*/
262 struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
263 struct device *dev = drvdata_to_dev(drvdata);
264 int rc;
265
266 if (ivgen) {
267 dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
268 cc_req->ivgen_dma_addr_len,
269 &cc_req->ivgen_dma_addr[0],
270 &cc_req->ivgen_dma_addr[1],
271 &cc_req->ivgen_dma_addr[2],
272 cc_req->ivgen_size);
273
274 /* Acquire IV from pool */
275 rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
276 cc_req->ivgen_dma_addr_len,
277 cc_req->ivgen_size, iv_seq, &iv_seq_len);
278
279 if (rc) {
280 dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
281 return rc;
282 }
283
284 total_seq_len += iv_seq_len;
285 }
286
287 used_sw_slots = ((req_mgr_h->req_queue_head -
288 req_mgr_h->req_queue_tail) &
289 (MAX_REQUEST_QUEUE_SIZE - 1));
290 if (used_sw_slots > req_mgr_h->max_used_sw_slots)
291 req_mgr_h->max_used_sw_slots = used_sw_slots;
292
293 /* Enqueue request - must be locked with HW lock*/
294 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
295 req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
296 (MAX_REQUEST_QUEUE_SIZE - 1);
297 /* TODO: Use circ_buf.h ? */
298
299 dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
300
301 /*
302 * We are about to push command to the HW via the command registers
303 * that may refernece hsot memory. We need to issue a memory barrier
304 * to make sure there are no outstnading memory writes
305 */
306 wmb();
307
308 /* STAT_PHASE_4: Push sequence */
309 if (ivgen)
310 enqueue_seq(drvdata, iv_seq, iv_seq_len);
311
312 enqueue_seq(drvdata, desc, len);
313
314 if (add_comp) {
315 enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
316 total_seq_len++;
317 }
318
319 if (req_mgr_h->q_free_slots < total_seq_len) {
320 /* This situation should never occur. Maybe indicating problem
321 * with resuming power. Set the free slot count to 0 and hope
322 * for the best.
323 */
324 dev_err(dev, "HW free slot count mismatch.");
325 req_mgr_h->q_free_slots = 0;
326 } else {
327 /* Update the free slots in HW queue */
328 req_mgr_h->q_free_slots -= total_seq_len;
329 }
330
331 /* Operation still in process */
332 return -EINPROGRESS;
333}
334
335static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
336 struct cc_bl_item *bli)
337{
338 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
339
340 spin_lock_bh(&mgr->bl_lock);
341 list_add_tail(&bli->list, &mgr->backlog);
342 ++mgr->bl_len;
343 spin_unlock_bh(&mgr->bl_lock);
344 tasklet_schedule(&mgr->comptask);
345}
346
347static void cc_proc_backlog(struct cc_drvdata *drvdata)
348{
349 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
350 struct cc_bl_item *bli;
351 struct cc_crypto_req *creq;
352 struct crypto_async_request *req;
353 bool ivgen;
354 unsigned int total_len;
355 struct device *dev = drvdata_to_dev(drvdata);
356 int rc;
357
358 spin_lock(&mgr->bl_lock);
359
360 while (mgr->bl_len) {
361 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
362 spin_unlock(&mgr->bl_lock);
363
364 creq = &bli->creq;
365 req = (struct crypto_async_request *)creq->user_arg;
366
367 /*
368 * Notify the request we're moving out of the backlog
369 * but only if we haven't done so already.
370 */
371 if (!bli->notif) {
372 req->complete(req, -EINPROGRESS);
373 bli->notif = true;
374 }
375
376 ivgen = !!creq->ivgen_dma_addr_len;
377 total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
378
379 spin_lock(&mgr->hw_lock);
380
381 rc = cc_queues_status(drvdata, mgr, total_len);
382 if (rc) {
383 /*
384 * There is still not room in the FIFO for
385 * this request. Bail out. We'll return here
386 * on the next completion irq.
387 */
388 spin_unlock(&mgr->hw_lock);
389 return;
390 }
391
392 rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
393 bli->len, false, ivgen);
394
395 spin_unlock(&mgr->hw_lock);
396
397 if (rc != -EINPROGRESS) {
398 cc_pm_put_suspend(dev);
399 creq->user_cb(dev, req, rc);
400 }
401
402 /* Remove ourselves from the backlog list */
403 spin_lock(&mgr->bl_lock);
404 list_del(&bli->list);
405 --mgr->bl_len;
406 }
407
408 spin_unlock(&mgr->bl_lock);
409}
410
411int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
412 struct cc_hw_desc *desc, unsigned int len,
413 struct crypto_async_request *req)
414{
415 int rc;
416 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
417 bool ivgen = !!cc_req->ivgen_dma_addr_len;
418 unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
419 struct device *dev = drvdata_to_dev(drvdata);
420 bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
421 gfp_t flags = cc_gfp_flags(req);
422 struct cc_bl_item *bli;
423
424 rc = cc_pm_get(dev);
425 if (rc) {
426 dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
427 return rc;
428 }
429
430 spin_lock_bh(&mgr->hw_lock);
431 rc = cc_queues_status(drvdata, mgr, total_len);
432
433#ifdef CC_DEBUG_FORCE_BACKLOG
434 if (backlog_ok)
435 rc = -ENOSPC;
436#endif /* CC_DEBUG_FORCE_BACKLOG */
437
438 if (rc == -ENOSPC && backlog_ok) {
439 spin_unlock_bh(&mgr->hw_lock);
440
441 bli = kmalloc(sizeof(*bli), flags);
442 if (!bli) {
443 cc_pm_put_suspend(dev);
444 return -ENOMEM;
445 }
446
447 memcpy(&bli->creq, cc_req, sizeof(*cc_req));
448 memcpy(&bli->desc, desc, len * sizeof(*desc));
449 bli->len = len;
450 bli->notif = false;
451 cc_enqueue_backlog(drvdata, bli);
452 return -EBUSY;
453 }
454
455 if (!rc)
456 rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
457 ivgen);
458
459 spin_unlock_bh(&mgr->hw_lock);
460 return rc;
461}
462
463int cc_send_sync_request(struct cc_drvdata *drvdata,
464 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
465 unsigned int len)
466{
467 int rc;
468 struct device *dev = drvdata_to_dev(drvdata);
469 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
470
471 init_completion(&cc_req->seq_compl);
472 cc_req->user_cb = request_mgr_complete;
473 cc_req->user_arg = &cc_req->seq_compl;
474
475 rc = cc_pm_get(dev);
476 if (rc) {
477 dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
478 return rc;
479 }
480
481 while (true) {
482 spin_lock_bh(&mgr->hw_lock);
483 rc = cc_queues_status(drvdata, mgr, len + 1);
484
485 if (!rc)
486 break;
487
488 spin_unlock_bh(&mgr->hw_lock);
489 if (rc != -EAGAIN) {
490 cc_pm_put_suspend(dev);
491 return rc;
492 }
493 wait_for_completion_interruptible(&drvdata->hw_queue_avail);
494 reinit_completion(&drvdata->hw_queue_avail);
495 }
496
497 rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
498 spin_unlock_bh(&mgr->hw_lock);
499
500 if (rc != -EINPROGRESS) {
501 cc_pm_put_suspend(dev);
502 return rc;
503 }
504
505 wait_for_completion(&cc_req->seq_compl);
506 return 0;
507}
508
509/*!
510 * Enqueue caller request to crypto hardware during init process.
511 * assume this function is not called in middle of a flow,
512 * since we set QUEUE_LAST_IND flag in the last descriptor.
513 *
514 * \param drvdata
515 * \param desc The crypto sequence
516 * \param len The crypto sequence length
517 *
518 * \return int Returns "0" upon success
519 */
520int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
521 unsigned int len)
522{
523 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
524 unsigned int total_seq_len = len; /*initial sequence length*/
525 int rc = 0;
526
527 /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
528 */
529 rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
530 if (rc)
531 return rc;
532
533 set_queue_last_ind(drvdata, &desc[(len - 1)]);
534
535 /*
536 * We are about to push command to the HW via the command registers
537 * that may refernece hsot memory. We need to issue a memory barrier
538 * to make sure there are no outstnading memory writes
539 */
540 wmb();
541 enqueue_seq(drvdata, desc, len);
542
543 /* Update the free slots in HW queue */
544 req_mgr_h->q_free_slots =
545 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
546
547 return 0;
548}
549
550void complete_request(struct cc_drvdata *drvdata)
551{
552 struct cc_req_mgr_handle *request_mgr_handle =
553 drvdata->request_mgr_handle;
554
555 complete(&drvdata->hw_queue_avail);
556#ifdef COMP_IN_WQ
557 queue_delayed_work(request_mgr_handle->workq,
558 &request_mgr_handle->compwork, 0);
559#else
560 tasklet_schedule(&request_mgr_handle->comptask);
561#endif
562}
563
564#ifdef COMP_IN_WQ
565static void comp_work_handler(struct work_struct *work)
566{
567 struct cc_drvdata *drvdata =
568 container_of(work, struct cc_drvdata, compwork.work);
569
570 comp_handler((unsigned long)drvdata);
571}
572#endif
573
574static void proc_completions(struct cc_drvdata *drvdata)
575{
576 struct cc_crypto_req *cc_req;
577 struct device *dev = drvdata_to_dev(drvdata);
578 struct cc_req_mgr_handle *request_mgr_handle =
579 drvdata->request_mgr_handle;
580 unsigned int *tail = &request_mgr_handle->req_queue_tail;
581 unsigned int *head = &request_mgr_handle->req_queue_head;
582
583 while (request_mgr_handle->axi_completed) {
584 request_mgr_handle->axi_completed--;
585
586 /* Dequeue request */
587 if (*head == *tail) {
588 /* We are supposed to handle a completion but our
589 * queue is empty. This is not normal. Return and
590 * hope for the best.
591 */
592 dev_err(dev, "Request queue is empty head == tail %u\n",
593 *head);
594 break;
595 }
596
597 cc_req = &request_mgr_handle->req_queue[*tail];
598
599 if (cc_req->user_cb)
600 cc_req->user_cb(dev, cc_req->user_arg, 0);
601 *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
602 dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
603 dev_dbg(dev, "Request completed. axi_completed=%d\n",
604 request_mgr_handle->axi_completed);
605 cc_pm_put_suspend(dev);
606 }
607}
608
609static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
610{
611 return FIELD_GET(AXIM_MON_COMP_VALUE,
612 cc_ioread(drvdata, drvdata->axim_mon_offset));
613}
614
615/* Deferred service handler, run as interrupt-fired tasklet */
616static void comp_handler(unsigned long devarg)
617{
618 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
619 struct cc_req_mgr_handle *request_mgr_handle =
620 drvdata->request_mgr_handle;
621
622 u32 irq;
623
624 irq = (drvdata->irq & CC_COMP_IRQ_MASK);
625
626 if (irq & CC_COMP_IRQ_MASK) {
627 /* To avoid the interrupt from firing as we unmask it,
628 * we clear it now
629 */
630 cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
631
632 /* Avoid race with above clear: Test completion counter
633 * once more
634 */
635 request_mgr_handle->axi_completed +=
636 cc_axi_comp_count(drvdata);
637
638 while (request_mgr_handle->axi_completed) {
639 do {
640 proc_completions(drvdata);
641 /* At this point (after proc_completions()),
642 * request_mgr_handle->axi_completed is 0.
643 */
644 request_mgr_handle->axi_completed =
645 cc_axi_comp_count(drvdata);
646 } while (request_mgr_handle->axi_completed > 0);
647
648 cc_iowrite(drvdata, CC_REG(HOST_ICR),
649 CC_COMP_IRQ_MASK);
650
651 request_mgr_handle->axi_completed +=
652 cc_axi_comp_count(drvdata);
653 }
654 }
655 /* after verifing that there is nothing to do,
656 * unmask AXI completion interrupt
657 */
658 cc_iowrite(drvdata, CC_REG(HOST_IMR),
659 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
660
661 cc_proc_backlog(drvdata);
662}
663
664/*
665 * resume the queue configuration - no need to take the lock as this happens
666 * inside the spin lock protection
667 */
668#if defined(CONFIG_PM)
669int cc_resume_req_queue(struct cc_drvdata *drvdata)
670{
671 struct cc_req_mgr_handle *request_mgr_handle =
672 drvdata->request_mgr_handle;
673
674 spin_lock_bh(&request_mgr_handle->hw_lock);
675 request_mgr_handle->is_runtime_suspended = false;
676 spin_unlock_bh(&request_mgr_handle->hw_lock);
677
678 return 0;
679}
680
681/*
682 * suspend the queue configuration. Since it is used for the runtime suspend
683 * only verify that the queue can be suspended.
684 */
685int cc_suspend_req_queue(struct cc_drvdata *drvdata)
686{
687 struct cc_req_mgr_handle *request_mgr_handle =
688 drvdata->request_mgr_handle;
689
690 /* lock the send_request */
691 spin_lock_bh(&request_mgr_handle->hw_lock);
692 if (request_mgr_handle->req_queue_head !=
693 request_mgr_handle->req_queue_tail) {
694 spin_unlock_bh(&request_mgr_handle->hw_lock);
695 return -EBUSY;
696 }
697 request_mgr_handle->is_runtime_suspended = true;
698 spin_unlock_bh(&request_mgr_handle->hw_lock);
699
700 return 0;
701}
702
703bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
704{
705 struct cc_req_mgr_handle *request_mgr_handle =
706 drvdata->request_mgr_handle;
707
708 return request_mgr_handle->is_runtime_suspended;
709}
710
711#endif
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
new file mode 100644
index 000000000000..573cb97af085
--- /dev/null
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -0,0 +1,51 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4/* \file cc_request_mgr.h
5 * Request Manager
6 */
7
8#ifndef __REQUEST_MGR_H__
9#define __REQUEST_MGR_H__
10
11#include "cc_hw_queue_defs.h"
12
13int cc_req_mgr_init(struct cc_drvdata *drvdata);
14
15/*!
16 * Enqueue caller request to crypto hardware.
17 *
18 * \param drvdata
19 * \param cc_req The request to enqueue
20 * \param desc The crypto sequence
21 * \param len The crypto sequence length
22 * \param is_dout If "true": completion is handled by the caller
23 * If "false": this function adds a dummy descriptor completion
24 * and waits upon completion signal.
25 *
26 * \return int Returns -EINPROGRESS or error
27 */
28int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
29 struct cc_hw_desc *desc, unsigned int len,
30 struct crypto_async_request *req);
31
32int cc_send_sync_request(struct cc_drvdata *drvdata,
33 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
34 unsigned int len);
35
36int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
37 unsigned int len);
38
39void complete_request(struct cc_drvdata *drvdata);
40
41void cc_req_mgr_fini(struct cc_drvdata *drvdata);
42
43#if defined(CONFIG_PM)
44int cc_resume_req_queue(struct cc_drvdata *drvdata);
45
46int cc_suspend_req_queue(struct cc_drvdata *drvdata);
47
48bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
49#endif
50
51#endif /*__REQUEST_MGR_H__*/
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
new file mode 100644
index 000000000000..c8c276f6dee9
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -0,0 +1,120 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#include "cc_driver.h"
5#include "cc_sram_mgr.h"
6
7/**
8 * struct cc_sram_ctx -Internal RAM context manager
9 * @sram_free_offset: the offset to the non-allocated area
10 */
11struct cc_sram_ctx {
12 cc_sram_addr_t sram_free_offset;
13};
14
15/**
16 * cc_sram_mgr_fini() - Cleanup SRAM pool.
17 *
18 * @drvdata: Associated device driver context
19 */
20void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
21{
22 /* Free "this" context */
23 kfree(drvdata->sram_mgr_handle);
24}
25
26/**
27 * cc_sram_mgr_init() - Initializes SRAM pool.
28 * The pool starts right at the beginning of SRAM.
29 * Returns zero for success, negative value otherwise.
30 *
31 * @drvdata: Associated device driver context
32 */
33int cc_sram_mgr_init(struct cc_drvdata *drvdata)
34{
35 struct cc_sram_ctx *ctx;
36 dma_addr_t start = 0;
37 struct device *dev = drvdata_to_dev(drvdata);
38
39 if (drvdata->hw_rev < CC_HW_REV_712) {
40 /* Pool starts after ROM bytes */
41 start = (dma_addr_t)cc_ioread(drvdata,
42 CC_REG(HOST_SEP_SRAM_THRESHOLD));
43
44 if ((start & 0x3) != 0) {
45 dev_err(dev, "Invalid SRAM offset %pad\n", &start);
46 return -EINVAL;
47 }
48 }
49
50 /* Allocate "this" context */
51 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
52
53 if (!ctx)
54 return -ENOMEM;
55
56 ctx->sram_free_offset = start;
57 drvdata->sram_mgr_handle = ctx;
58
59 return 0;
60}
61
62/*!
63 * Allocated buffer from SRAM pool.
64 * Note: Caller is responsible to free the LAST allocated buffer.
65 * This function does not taking care of any fragmentation may occur
66 * by the order of calls to alloc/free.
67 *
68 * \param drvdata
69 * \param size The requested bytes to allocate
70 */
71cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
72{
73 struct cc_sram_ctx *smgr_ctx = drvdata->sram_mgr_handle;
74 struct device *dev = drvdata_to_dev(drvdata);
75 cc_sram_addr_t p;
76
77 if ((size & 0x3)) {
78 dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
79 size);
80 return NULL_SRAM_ADDR;
81 }
82 if (size > (CC_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
83 dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
84 size, smgr_ctx->sram_free_offset);
85 return NULL_SRAM_ADDR;
86 }
87
88 p = smgr_ctx->sram_free_offset;
89 smgr_ctx->sram_free_offset += size;
90 dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
91 return p;
92}
93
94/**
95 * cc_set_sram_desc() - Create const descriptors sequence to
96 * set values in given array into SRAM.
97 * Note: each const value can't exceed word size.
98 *
99 * @src: A pointer to array of words to set as consts.
100 * @dst: The target SRAM buffer to set into
101 * @nelements: The number of words in "src" array
102 * @seq: A pointer to the given IN/OUT descriptor sequence
103 * @seq_len: A pointer to the given IN/OUT sequence length
104 */
105void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
106 unsigned int nelement, struct cc_hw_desc *seq,
107 unsigned int *seq_len)
108{
109 u32 i;
110 unsigned int idx = *seq_len;
111
112 for (i = 0; i < nelement; i++, idx++) {
113 hw_desc_init(&seq[idx]);
114 set_din_const(&seq[idx], src[i], sizeof(u32));
115 set_dout_sram(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32));
116 set_flow_mode(&seq[idx], BYPASS);
117 }
118
119 *seq_len = idx;
120}
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
new file mode 100644
index 000000000000..d48649fb3323
--- /dev/null
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -0,0 +1,65 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
3
4#ifndef __CC_SRAM_MGR_H__
5#define __CC_SRAM_MGR_H__
6
7#ifndef CC_CC_SRAM_SIZE
8#define CC_CC_SRAM_SIZE 4096
9#endif
10
11struct cc_drvdata;
12
13/**
14 * Address (offset) within CC internal SRAM
15 */
16
17typedef u64 cc_sram_addr_t;
18
19#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
20
21/*!
22 * Initializes SRAM pool.
23 * The first X bytes of SRAM are reserved for ROM usage, hence, pool
24 * starts right after X bytes.
25 *
26 * \param drvdata
27 *
28 * \return int Zero for success, negative value otherwise.
29 */
30int cc_sram_mgr_init(struct cc_drvdata *drvdata);
31
32/*!
33 * Uninits SRAM pool.
34 *
35 * \param drvdata
36 */
37void cc_sram_mgr_fini(struct cc_drvdata *drvdata);
38
39/*!
40 * Allocated buffer from SRAM pool.
41 * Note: Caller is responsible to free the LAST allocated buffer.
42 * This function does not taking care of any fragmentation may occur
43 * by the order of calls to alloc/free.
44 *
45 * \param drvdata
46 * \param size The requested bytes to allocate
47 */
48cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
49
50/**
51 * cc_set_sram_desc() - Create const descriptors sequence to
52 * set values in given array into SRAM.
53 * Note: each const value can't exceed word size.
54 *
55 * @src: A pointer to array of words to set as consts.
56 * @dst: The target SRAM buffer to set into
57 * @nelements: The number of words in "src" array
58 * @seq: A pointer to the given IN/OUT descriptor sequence
59 * @seq_len: A pointer to the given IN/OUT sequence length
60 */
61void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
62 unsigned int nelement, struct cc_hw_desc *seq,
63 unsigned int *seq_len);
64
65#endif /*__CC_SRAM_MGR_H__*/
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 34a02d690548..59fe6631e73e 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -131,6 +131,11 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
131 return (skb->len <= SGE_MAX_WR_LEN); 131 return (skb->len <= SGE_MAX_WR_LEN);
132} 132}
133 133
134static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135{
136 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137}
138
134static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, 139static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
135 unsigned int entlen, 140 unsigned int entlen,
136 unsigned int skip) 141 unsigned int skip)
@@ -160,41 +165,6 @@ static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
160 return nents; 165 return nents;
161} 166}
162 167
163static inline void chcr_handle_ahash_resp(struct ahash_request *req,
164 unsigned char *input,
165 int err)
166{
167 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
168 int digestsize, updated_digestsize;
169 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
170 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
171
172 if (input == NULL)
173 goto out;
174 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
175 if (reqctx->is_sg_map)
176 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
177 if (reqctx->dma_addr)
178 dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
179 reqctx->dma_len, DMA_TO_DEVICE);
180 reqctx->dma_addr = 0;
181 updated_digestsize = digestsize;
182 if (digestsize == SHA224_DIGEST_SIZE)
183 updated_digestsize = SHA256_DIGEST_SIZE;
184 else if (digestsize == SHA384_DIGEST_SIZE)
185 updated_digestsize = SHA512_DIGEST_SIZE;
186 if (reqctx->result == 1) {
187 reqctx->result = 0;
188 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
189 digestsize);
190 } else {
191 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
192 updated_digestsize);
193 }
194out:
195 req->base.complete(&req->base, err);
196}
197
198static inline int get_aead_subtype(struct crypto_aead *aead) 168static inline int get_aead_subtype(struct crypto_aead *aead)
199{ 169{
200 struct aead_alg *alg = crypto_aead_alg(aead); 170 struct aead_alg *alg = crypto_aead_alg(aead);
@@ -247,34 +217,6 @@ static inline void chcr_handle_aead_resp(struct aead_request *req,
247 req->base.complete(&req->base, err); 217 req->base.complete(&req->base, err);
248} 218}
249 219
250/*
251 * chcr_handle_resp - Unmap the DMA buffers associated with the request
252 * @req: crypto request
253 */
254int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
255 int err)
256{
257 struct crypto_tfm *tfm = req->tfm;
258 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
259 struct adapter *adap = padap(ctx->dev);
260
261 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
262 case CRYPTO_ALG_TYPE_AEAD:
263 chcr_handle_aead_resp(aead_request_cast(req), input, err);
264 break;
265
266 case CRYPTO_ALG_TYPE_ABLKCIPHER:
267 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
268 input, err);
269 break;
270
271 case CRYPTO_ALG_TYPE_AHASH:
272 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
273 }
274 atomic_inc(&adap->chcr_stats.complete);
275 return err;
276}
277
278static void get_aes_decrypt_key(unsigned char *dec_key, 220static void get_aes_decrypt_key(unsigned char *dec_key,
279 const unsigned char *key, 221 const unsigned char *key,
280 unsigned int keylength) 222 unsigned int keylength)
@@ -563,7 +505,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
563 505
564 if (!len) 506 if (!len)
565 return; 507 return;
566
567 while (sg && skip) { 508 while (sg && skip) {
568 if (sg_dma_len(sg) <= skip) { 509 if (sg_dma_len(sg) <= skip) {
569 skip -= sg_dma_len(sg); 510 skip -= sg_dma_len(sg);
@@ -653,6 +594,35 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
653 } 594 }
654 return 0; 595 return 0;
655} 596}
597
598static int chcr_hash_ent_in_wr(struct scatterlist *src,
599 unsigned int minsg,
600 unsigned int space,
601 unsigned int srcskip)
602{
603 int srclen = 0;
604 int srcsg = minsg;
605 int soffset = 0, sless;
606
607 if (sg_dma_len(src) == srcskip) {
608 src = sg_next(src);
609 srcskip = 0;
610 }
611 while (src && space > (sgl_ent_len[srcsg + 1])) {
612 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
613 CHCR_SRC_SG_SIZE);
614 srclen += sless;
615 soffset += sless;
616 srcsg++;
617 if (sg_dma_len(src) == (soffset + srcskip)) {
618 src = sg_next(src);
619 soffset = 0;
620 srcskip = 0;
621 }
622 }
623 return srclen;
624}
625
656static int chcr_sg_ent_in_wr(struct scatterlist *src, 626static int chcr_sg_ent_in_wr(struct scatterlist *src,
657 struct scatterlist *dst, 627 struct scatterlist *dst,
658 unsigned int minsg, 628 unsigned int minsg,
@@ -662,7 +632,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
662{ 632{
663 int srclen = 0, dstlen = 0; 633 int srclen = 0, dstlen = 0;
664 int srcsg = minsg, dstsg = minsg; 634 int srcsg = minsg, dstsg = minsg;
665 int offset = 0, less; 635 int offset = 0, soffset = 0, less, sless = 0;
666 636
667 if (sg_dma_len(src) == srcskip) { 637 if (sg_dma_len(src) == srcskip) {
668 src = sg_next(src); 638 src = sg_next(src);
@@ -676,7 +646,9 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
676 646
677 while (src && dst && 647 while (src && dst &&
678 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { 648 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
679 srclen += (sg_dma_len(src) - srcskip); 649 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
650 CHCR_SRC_SG_SIZE);
651 srclen += sless;
680 srcsg++; 652 srcsg++;
681 offset = 0; 653 offset = 0;
682 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && 654 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
@@ -687,15 +659,20 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
687 dstskip, CHCR_DST_SG_SIZE); 659 dstskip, CHCR_DST_SG_SIZE);
688 dstlen += less; 660 dstlen += less;
689 offset += less; 661 offset += less;
690 if (offset == sg_dma_len(dst)) { 662 if ((offset + dstskip) == sg_dma_len(dst)) {
691 dst = sg_next(dst); 663 dst = sg_next(dst);
692 offset = 0; 664 offset = 0;
693 } 665 }
694 dstsg++; 666 dstsg++;
695 dstskip = 0; 667 dstskip = 0;
696 } 668 }
697 src = sg_next(src); 669 soffset += sless;
698 srcskip = 0; 670 if ((soffset + srcskip) == sg_dma_len(src)) {
671 src = sg_next(src);
672 srcskip = 0;
673 soffset = 0;
674 }
675
699 } 676 }
700 return min(srclen, dstlen); 677 return min(srclen, dstlen);
701} 678}
@@ -784,14 +761,14 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
784 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, 761 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
785 reqctx->dst_ofst); 762 reqctx->dst_ofst);
786 dst_size = get_space_for_phys_dsgl(nents + 1); 763 dst_size = get_space_for_phys_dsgl(nents + 1);
787 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); 764 kctx_len = roundup(ablkctx->enckey_len, 16);
788 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 765 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
789 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, 766 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
790 CHCR_SRC_SG_SIZE, reqctx->src_ofst); 767 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
791 temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16) 768 temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) :
792 * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8); 769 (sgl_len(nents + MIN_CIPHER_SG) * 8);
793 transhdr_len += temp; 770 transhdr_len += temp;
794 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; 771 transhdr_len = roundup(transhdr_len, 16);
795 skb = alloc_skb(SGE_MAX_WR_LEN, flags); 772 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
796 if (!skb) { 773 if (!skb) {
797 error = -ENOMEM; 774 error = -ENOMEM;
@@ -847,6 +824,13 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
847 transhdr_len, temp, 824 transhdr_len, temp,
848 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); 825 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
849 reqctx->skb = skb; 826 reqctx->skb = skb;
827
828 if (reqctx->op && (ablkctx->ciph_mode ==
829 CHCR_SCMD_CIPHER_MODE_AES_CBC))
830 sg_pcopy_to_buffer(wrparam->req->src,
831 sg_nents(wrparam->req->src), wrparam->req->info, 16,
832 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
833
850 return skb; 834 return skb;
851err: 835err:
852 return ERR_PTR(error); 836 return ERR_PTR(error);
@@ -1070,9 +1054,8 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1070 ret = chcr_update_tweak(req, iv, 0); 1054 ret = chcr_update_tweak(req, iv, 0);
1071 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { 1055 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1072 if (reqctx->op) 1056 if (reqctx->op)
1073 sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, 1057 /*Updated before sending last WR*/
1074 16, 1058 memcpy(iv, req->info, AES_BLOCK_SIZE);
1075 reqctx->processed - AES_BLOCK_SIZE);
1076 else 1059 else
1077 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); 1060 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1078 } 1061 }
@@ -1100,11 +1083,8 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1100 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) 1083 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1101 ret = chcr_update_tweak(req, iv, 1); 1084 ret = chcr_update_tweak(req, iv, 1);
1102 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { 1085 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1103 if (reqctx->op) 1086 /*Already updated for Decrypt*/
1104 sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, 1087 if (!reqctx->op)
1105 16,
1106 reqctx->processed - AES_BLOCK_SIZE);
1107 else
1108 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); 1088 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1109 1089
1110 } 1090 }
@@ -1143,12 +1123,12 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1143 } 1123 }
1144 if (!reqctx->imm) { 1124 if (!reqctx->imm) {
1145 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, 1125 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
1146 SPACE_LEFT(ablkctx->enckey_len), 1126 CIP_SPACE_LEFT(ablkctx->enckey_len),
1147 reqctx->src_ofst, reqctx->dst_ofst); 1127 reqctx->src_ofst, reqctx->dst_ofst);
1148 if ((bytes + reqctx->processed) >= req->nbytes) 1128 if ((bytes + reqctx->processed) >= req->nbytes)
1149 bytes = req->nbytes - reqctx->processed; 1129 bytes = req->nbytes - reqctx->processed;
1150 else 1130 else
1151 bytes = ROUND_16(bytes); 1131 bytes = rounddown(bytes, 16);
1152 } else { 1132 } else {
1153 /*CTR mode counter overfloa*/ 1133 /*CTR mode counter overfloa*/
1154 bytes = req->nbytes - reqctx->processed; 1134 bytes = req->nbytes - reqctx->processed;
@@ -1234,7 +1214,7 @@ static int process_cipher(struct ablkcipher_request *req,
1234 CHCR_DST_SG_SIZE, 0); 1214 CHCR_DST_SG_SIZE, 0);
1235 dnents += 1; // IV 1215 dnents += 1; // IV
1236 phys_dsgl = get_space_for_phys_dsgl(dnents); 1216 phys_dsgl = get_space_for_phys_dsgl(dnents);
1237 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); 1217 kctx_len = roundup(ablkctx->enckey_len, 16);
1238 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); 1218 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1239 reqctx->imm = (transhdr_len + IV + req->nbytes) <= 1219 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1240 SGE_MAX_WR_LEN; 1220 SGE_MAX_WR_LEN;
@@ -1247,12 +1227,12 @@ static int process_cipher(struct ablkcipher_request *req,
1247 if (!reqctx->imm) { 1227 if (!reqctx->imm) {
1248 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 1228 bytes = chcr_sg_ent_in_wr(req->src, req->dst,
1249 MIN_CIPHER_SG, 1229 MIN_CIPHER_SG,
1250 SPACE_LEFT(ablkctx->enckey_len), 1230 CIP_SPACE_LEFT(ablkctx->enckey_len),
1251 0, 0); 1231 0, 0);
1252 if ((bytes + reqctx->processed) >= req->nbytes) 1232 if ((bytes + reqctx->processed) >= req->nbytes)
1253 bytes = req->nbytes - reqctx->processed; 1233 bytes = req->nbytes - reqctx->processed;
1254 else 1234 else
1255 bytes = ROUND_16(bytes); 1235 bytes = rounddown(bytes, 16);
1256 } else { 1236 } else {
1257 bytes = req->nbytes; 1237 bytes = req->nbytes;
1258 } 1238 }
@@ -1282,7 +1262,7 @@ static int process_cipher(struct ablkcipher_request *req,
1282 req->src, 1262 req->src,
1283 req->dst, 1263 req->dst,
1284 req->nbytes, 1264 req->nbytes,
1285 req->info, 1265 reqctx->iv,
1286 op_type); 1266 op_type);
1287 goto error; 1267 goto error;
1288 } 1268 }
@@ -1503,35 +1483,24 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
1503 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); 1483 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1504 struct chcr_wr *chcr_req; 1484 struct chcr_wr *chcr_req;
1505 struct ulptx_sgl *ulptx; 1485 struct ulptx_sgl *ulptx;
1506 unsigned int nents = 0, transhdr_len, iopad_alignment = 0; 1486 unsigned int nents = 0, transhdr_len;
1507 unsigned int digestsize = crypto_ahash_digestsize(tfm); 1487 unsigned int temp = 0;
1508 unsigned int kctx_len = 0, temp = 0;
1509 u8 hash_size_in_response = 0;
1510 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1488 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1511 GFP_ATOMIC; 1489 GFP_ATOMIC;
1512 struct adapter *adap = padap(h_ctx(tfm)->dev); 1490 struct adapter *adap = padap(h_ctx(tfm)->dev);
1513 int error = 0; 1491 int error = 0;
1514 1492
1515 iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); 1493 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1516 kctx_len = param->alg_prm.result_size + iopad_alignment; 1494 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1517 if (param->opad_needed) 1495 param->sg_len) <= SGE_MAX_WR_LEN;
1518 kctx_len += param->alg_prm.result_size + iopad_alignment; 1496 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1519 1497 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1520 if (req_ctx->result)
1521 hash_size_in_response = digestsize;
1522 else
1523 hash_size_in_response = param->alg_prm.result_size;
1524 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
1525 req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
1526 SGE_MAX_WR_LEN;
1527 nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
1528 nents += param->bfr_len ? 1 : 0; 1498 nents += param->bfr_len ? 1 : 0;
1529 transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len + 1499 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1530 param->sg_len), 16) * 16) : 1500 param->sg_len, 16) : (sgl_len(nents) * 8);
1531 (sgl_len(nents) * 8); 1501 transhdr_len = roundup(transhdr_len, 16);
1532 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
1533 1502
1534 skb = alloc_skb(SGE_MAX_WR_LEN, flags); 1503 skb = alloc_skb(transhdr_len, flags);
1535 if (!skb) 1504 if (!skb)
1536 return ERR_PTR(-ENOMEM); 1505 return ERR_PTR(-ENOMEM);
1537 chcr_req = __skb_put_zero(skb, transhdr_len); 1506 chcr_req = __skb_put_zero(skb, transhdr_len);
@@ -1563,33 +1532,33 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
1563 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, 1532 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1564 param->alg_prm.mk_size, 0, 1533 param->alg_prm.mk_size, 0,
1565 param->opad_needed, 1534 param->opad_needed,
1566 ((kctx_len + 1535 ((param->kctx_len +
1567 sizeof(chcr_req->key_ctx)) >> 4)); 1536 sizeof(chcr_req->key_ctx)) >> 4));
1568 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); 1537 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1569 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len + 1538 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1570 DUMMY_BYTES); 1539 DUMMY_BYTES);
1571 if (param->bfr_len != 0) { 1540 if (param->bfr_len != 0) {
1572 req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev, 1541 req_ctx->hctx_wr.dma_addr =
1573 req_ctx->reqbfr, param->bfr_len, 1542 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1574 DMA_TO_DEVICE); 1543 param->bfr_len, DMA_TO_DEVICE);
1575 if (dma_mapping_error(&u_ctx->lldi.pdev->dev, 1544 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1576 req_ctx->dma_addr)) { 1545 req_ctx->hctx_wr. dma_addr)) {
1577 error = -ENOMEM; 1546 error = -ENOMEM;
1578 goto err; 1547 goto err;
1579 } 1548 }
1580 req_ctx->dma_len = param->bfr_len; 1549 req_ctx->hctx_wr.dma_len = param->bfr_len;
1581 } else { 1550 } else {
1582 req_ctx->dma_addr = 0; 1551 req_ctx->hctx_wr.dma_addr = 0;
1583 } 1552 }
1584 chcr_add_hash_src_ent(req, ulptx, param); 1553 chcr_add_hash_src_ent(req, ulptx, param);
1585 /* Request upto max wr size */ 1554 /* Request upto max wr size */
1586 temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len 1555 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1587 + param->bfr_len) : 0); 1556 (param->sg_len + param->bfr_len) : 0);
1588 atomic_inc(&adap->chcr_stats.digest_rqst); 1557 atomic_inc(&adap->chcr_stats.digest_rqst);
1589 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm, 1558 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1590 hash_size_in_response, transhdr_len, 1559 param->hash_size, transhdr_len,
1591 temp, 0); 1560 temp, 0);
1592 req_ctx->skb = skb; 1561 req_ctx->hctx_wr.skb = skb;
1593 return skb; 1562 return skb;
1594err: 1563err:
1595 kfree_skb(skb); 1564 kfree_skb(skb);
@@ -1608,7 +1577,6 @@ static int chcr_ahash_update(struct ahash_request *req)
1608 int error; 1577 int error;
1609 1578
1610 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1579 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1611
1612 u_ctx = ULD_CTX(h_ctx(rtfm)); 1580 u_ctx = ULD_CTX(h_ctx(rtfm));
1613 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1581 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1614 h_ctx(rtfm)->tx_qidx))) { 1582 h_ctx(rtfm)->tx_qidx))) {
@@ -1625,17 +1593,26 @@ static int chcr_ahash_update(struct ahash_request *req)
1625 req_ctx->reqlen += nbytes; 1593 req_ctx->reqlen += nbytes;
1626 return 0; 1594 return 0;
1627 } 1595 }
1596 chcr_init_hctx_per_wr(req_ctx);
1628 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); 1597 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1629 if (error) 1598 if (error)
1630 return -ENOMEM; 1599 return -ENOMEM;
1600 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1601 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1602 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1603 HASH_SPACE_LEFT(params.kctx_len), 0);
1604 if (params.sg_len > req->nbytes)
1605 params.sg_len = req->nbytes;
1606 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1607 req_ctx->reqlen;
1631 params.opad_needed = 0; 1608 params.opad_needed = 0;
1632 params.more = 1; 1609 params.more = 1;
1633 params.last = 0; 1610 params.last = 0;
1634 params.sg_len = nbytes - req_ctx->reqlen;
1635 params.bfr_len = req_ctx->reqlen; 1611 params.bfr_len = req_ctx->reqlen;
1636 params.scmd1 = 0; 1612 params.scmd1 = 0;
1637 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm)); 1613 req_ctx->hctx_wr.srcsg = req->src;
1638 req_ctx->result = 0; 1614
1615 params.hash_size = params.alg_prm.result_size;
1639 req_ctx->data_len += params.sg_len + params.bfr_len; 1616 req_ctx->data_len += params.sg_len + params.bfr_len;
1640 skb = create_hash_wr(req, &params); 1617 skb = create_hash_wr(req, &params);
1641 if (IS_ERR(skb)) { 1618 if (IS_ERR(skb)) {
@@ -1643,6 +1620,7 @@ static int chcr_ahash_update(struct ahash_request *req)
1643 goto unmap; 1620 goto unmap;
1644 } 1621 }
1645 1622
1623 req_ctx->hctx_wr.processed += params.sg_len;
1646 if (remainder) { 1624 if (remainder) {
1647 /* Swap buffers */ 1625 /* Swap buffers */
1648 swap(req_ctx->reqbfr, req_ctx->skbfr); 1626 swap(req_ctx->reqbfr, req_ctx->skbfr);
@@ -1680,16 +1658,27 @@ static int chcr_ahash_final(struct ahash_request *req)
1680 struct uld_ctx *u_ctx = NULL; 1658 struct uld_ctx *u_ctx = NULL;
1681 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1659 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1682 1660
1661 chcr_init_hctx_per_wr(req_ctx);
1683 u_ctx = ULD_CTX(h_ctx(rtfm)); 1662 u_ctx = ULD_CTX(h_ctx(rtfm));
1684 if (is_hmac(crypto_ahash_tfm(rtfm))) 1663 if (is_hmac(crypto_ahash_tfm(rtfm)))
1685 params.opad_needed = 1; 1664 params.opad_needed = 1;
1686 else 1665 else
1687 params.opad_needed = 0; 1666 params.opad_needed = 0;
1688 params.sg_len = 0; 1667 params.sg_len = 0;
1668 req_ctx->hctx_wr.isfinal = 1;
1689 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm)); 1669 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1690 req_ctx->result = 1; 1670 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1671 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1672 params.opad_needed = 1;
1673 params.kctx_len *= 2;
1674 } else {
1675 params.opad_needed = 0;
1676 }
1677
1678 req_ctx->hctx_wr.result = 1;
1691 params.bfr_len = req_ctx->reqlen; 1679 params.bfr_len = req_ctx->reqlen;
1692 req_ctx->data_len += params.bfr_len + params.sg_len; 1680 req_ctx->data_len += params.bfr_len + params.sg_len;
1681 req_ctx->hctx_wr.srcsg = req->src;
1693 if (req_ctx->reqlen == 0) { 1682 if (req_ctx->reqlen == 0) {
1694 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); 1683 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1695 params.last = 0; 1684 params.last = 0;
@@ -1702,10 +1691,11 @@ static int chcr_ahash_final(struct ahash_request *req)
1702 params.last = 1; 1691 params.last = 1;
1703 params.more = 0; 1692 params.more = 0;
1704 } 1693 }
1694 params.hash_size = crypto_ahash_digestsize(rtfm);
1705 skb = create_hash_wr(req, &params); 1695 skb = create_hash_wr(req, &params);
1706 if (IS_ERR(skb)) 1696 if (IS_ERR(skb))
1707 return PTR_ERR(skb); 1697 return PTR_ERR(skb);
1708 1698 req_ctx->reqlen = 0;
1709 skb->dev = u_ctx->lldi.ports[0]; 1699 skb->dev = u_ctx->lldi.ports[0];
1710 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); 1700 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1711 chcr_send_wr(skb); 1701 chcr_send_wr(skb);
@@ -1730,37 +1720,59 @@ static int chcr_ahash_finup(struct ahash_request *req)
1730 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 1720 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1731 return -EBUSY; 1721 return -EBUSY;
1732 } 1722 }
1723 chcr_init_hctx_per_wr(req_ctx);
1724 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1725 if (error)
1726 return -ENOMEM;
1733 1727
1734 if (is_hmac(crypto_ahash_tfm(rtfm))) 1728 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1729 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1730 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1731 params.kctx_len *= 2;
1735 params.opad_needed = 1; 1732 params.opad_needed = 1;
1736 else 1733 } else {
1737 params.opad_needed = 0; 1734 params.opad_needed = 0;
1735 }
1738 1736
1739 params.sg_len = req->nbytes; 1737 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1738 HASH_SPACE_LEFT(params.kctx_len), 0);
1739 if (params.sg_len < req->nbytes) {
1740 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1741 params.kctx_len /= 2;
1742 params.opad_needed = 0;
1743 }
1744 params.last = 0;
1745 params.more = 1;
1746 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1747 - req_ctx->reqlen;
1748 params.hash_size = params.alg_prm.result_size;
1749 params.scmd1 = 0;
1750 } else {
1751 params.last = 1;
1752 params.more = 0;
1753 params.sg_len = req->nbytes;
1754 params.hash_size = crypto_ahash_digestsize(rtfm);
1755 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1756 params.sg_len;
1757 }
1740 params.bfr_len = req_ctx->reqlen; 1758 params.bfr_len = req_ctx->reqlen;
1741 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1742 req_ctx->data_len += params.bfr_len + params.sg_len; 1759 req_ctx->data_len += params.bfr_len + params.sg_len;
1743 req_ctx->result = 1; 1760 req_ctx->hctx_wr.result = 1;
1761 req_ctx->hctx_wr.srcsg = req->src;
1744 if ((req_ctx->reqlen + req->nbytes) == 0) { 1762 if ((req_ctx->reqlen + req->nbytes) == 0) {
1745 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); 1763 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1746 params.last = 0; 1764 params.last = 0;
1747 params.more = 1; 1765 params.more = 1;
1748 params.scmd1 = 0; 1766 params.scmd1 = 0;
1749 params.bfr_len = bs; 1767 params.bfr_len = bs;
1750 } else {
1751 params.scmd1 = req_ctx->data_len;
1752 params.last = 1;
1753 params.more = 0;
1754 } 1768 }
1755 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1756 if (error)
1757 return -ENOMEM;
1758
1759 skb = create_hash_wr(req, &params); 1769 skb = create_hash_wr(req, &params);
1760 if (IS_ERR(skb)) { 1770 if (IS_ERR(skb)) {
1761 error = PTR_ERR(skb); 1771 error = PTR_ERR(skb);
1762 goto unmap; 1772 goto unmap;
1763 } 1773 }
1774 req_ctx->reqlen = 0;
1775 req_ctx->hctx_wr.processed += params.sg_len;
1764 skb->dev = u_ctx->lldi.ports[0]; 1776 skb->dev = u_ctx->lldi.ports[0];
1765 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); 1777 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1766 chcr_send_wr(skb); 1778 chcr_send_wr(skb);
@@ -1791,21 +1803,42 @@ static int chcr_ahash_digest(struct ahash_request *req)
1791 return -EBUSY; 1803 return -EBUSY;
1792 } 1804 }
1793 1805
1794 if (is_hmac(crypto_ahash_tfm(rtfm))) 1806 chcr_init_hctx_per_wr(req_ctx);
1795 params.opad_needed = 1;
1796 else
1797 params.opad_needed = 0;
1798 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); 1807 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1799 if (error) 1808 if (error)
1800 return -ENOMEM; 1809 return -ENOMEM;
1801 1810
1802 params.last = 0;
1803 params.more = 0;
1804 params.sg_len = req->nbytes;
1805 params.bfr_len = 0;
1806 params.scmd1 = 0;
1807 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm)); 1811 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1808 req_ctx->result = 1; 1812 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1813 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1814 params.kctx_len *= 2;
1815 params.opad_needed = 1;
1816 } else {
1817 params.opad_needed = 0;
1818 }
1819 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1820 HASH_SPACE_LEFT(params.kctx_len), 0);
1821 if (params.sg_len < req->nbytes) {
1822 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1823 params.kctx_len /= 2;
1824 params.opad_needed = 0;
1825 }
1826 params.last = 0;
1827 params.more = 1;
1828 params.scmd1 = 0;
1829 params.sg_len = rounddown(params.sg_len, bs);
1830 params.hash_size = params.alg_prm.result_size;
1831 } else {
1832 params.sg_len = req->nbytes;
1833 params.hash_size = crypto_ahash_digestsize(rtfm);
1834 params.last = 1;
1835 params.more = 0;
1836 params.scmd1 = req->nbytes + req_ctx->data_len;
1837
1838 }
1839 params.bfr_len = 0;
1840 req_ctx->hctx_wr.result = 1;
1841 req_ctx->hctx_wr.srcsg = req->src;
1809 req_ctx->data_len += params.bfr_len + params.sg_len; 1842 req_ctx->data_len += params.bfr_len + params.sg_len;
1810 1843
1811 if (req->nbytes == 0) { 1844 if (req->nbytes == 0) {
@@ -1819,6 +1852,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
1819 error = PTR_ERR(skb); 1852 error = PTR_ERR(skb);
1820 goto unmap; 1853 goto unmap;
1821 } 1854 }
1855 req_ctx->hctx_wr.processed += params.sg_len;
1822 skb->dev = u_ctx->lldi.ports[0]; 1856 skb->dev = u_ctx->lldi.ports[0];
1823 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); 1857 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1824 chcr_send_wr(skb); 1858 chcr_send_wr(skb);
@@ -1828,6 +1862,151 @@ unmap:
1828 return error; 1862 return error;
1829} 1863}
1830 1864
1865static int chcr_ahash_continue(struct ahash_request *req)
1866{
1867 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1868 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1869 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1870 struct uld_ctx *u_ctx = NULL;
1871 struct sk_buff *skb;
1872 struct hash_wr_param params;
1873 u8 bs;
1874 int error;
1875
1876 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1877 u_ctx = ULD_CTX(h_ctx(rtfm));
1878 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1879 h_ctx(rtfm)->tx_qidx))) {
1880 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1881 return -EBUSY;
1882 }
1883 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1884 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1885 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1886 params.kctx_len *= 2;
1887 params.opad_needed = 1;
1888 } else {
1889 params.opad_needed = 0;
1890 }
1891 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1892 HASH_SPACE_LEFT(params.kctx_len),
1893 hctx_wr->src_ofst);
1894 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1895 params.sg_len = req->nbytes - hctx_wr->processed;
1896 if (!hctx_wr->result ||
1897 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1898 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1899 params.kctx_len /= 2;
1900 params.opad_needed = 0;
1901 }
1902 params.last = 0;
1903 params.more = 1;
1904 params.sg_len = rounddown(params.sg_len, bs);
1905 params.hash_size = params.alg_prm.result_size;
1906 params.scmd1 = 0;
1907 } else {
1908 params.last = 1;
1909 params.more = 0;
1910 params.hash_size = crypto_ahash_digestsize(rtfm);
1911 params.scmd1 = reqctx->data_len + params.sg_len;
1912 }
1913 params.bfr_len = 0;
1914 reqctx->data_len += params.sg_len;
1915 skb = create_hash_wr(req, &params);
1916 if (IS_ERR(skb)) {
1917 error = PTR_ERR(skb);
1918 goto err;
1919 }
1920 hctx_wr->processed += params.sg_len;
1921 skb->dev = u_ctx->lldi.ports[0];
1922 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1923 chcr_send_wr(skb);
1924 return 0;
1925err:
1926 return error;
1927}
1928
1929static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1930 unsigned char *input,
1931 int err)
1932{
1933 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1934 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1935 int digestsize, updated_digestsize;
1936 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1937 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1938
1939 if (input == NULL)
1940 goto out;
1941 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1942 updated_digestsize = digestsize;
1943 if (digestsize == SHA224_DIGEST_SIZE)
1944 updated_digestsize = SHA256_DIGEST_SIZE;
1945 else if (digestsize == SHA384_DIGEST_SIZE)
1946 updated_digestsize = SHA512_DIGEST_SIZE;
1947
1948 if (hctx_wr->dma_addr) {
1949 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
1950 hctx_wr->dma_len, DMA_TO_DEVICE);
1951 hctx_wr->dma_addr = 0;
1952 }
1953 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
1954 req->nbytes)) {
1955 if (hctx_wr->result == 1) {
1956 hctx_wr->result = 0;
1957 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
1958 digestsize);
1959 } else {
1960 memcpy(reqctx->partial_hash,
1961 input + sizeof(struct cpl_fw6_pld),
1962 updated_digestsize);
1963
1964 }
1965 goto unmap;
1966 }
1967 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
1968 updated_digestsize);
1969
1970 err = chcr_ahash_continue(req);
1971 if (err)
1972 goto unmap;
1973 return;
1974unmap:
1975 if (hctx_wr->is_sg_map)
1976 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1977
1978
1979out:
1980 req->base.complete(&req->base, err);
1981}
1982
1983/*
1984 * chcr_handle_resp - Unmap the DMA buffers associated with the request
1985 * @req: crypto request
1986 */
1987int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
1988 int err)
1989{
1990 struct crypto_tfm *tfm = req->tfm;
1991 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1992 struct adapter *adap = padap(ctx->dev);
1993
1994 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1995 case CRYPTO_ALG_TYPE_AEAD:
1996 chcr_handle_aead_resp(aead_request_cast(req), input, err);
1997 break;
1998
1999 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2000 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2001 input, err);
2002 break;
2003
2004 case CRYPTO_ALG_TYPE_AHASH:
2005 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2006 }
2007 atomic_inc(&adap->chcr_stats.complete);
2008 return err;
2009}
1831static int chcr_ahash_export(struct ahash_request *areq, void *out) 2010static int chcr_ahash_export(struct ahash_request *areq, void *out)
1832{ 2011{
1833 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2012 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
@@ -1835,11 +2014,10 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
1835 2014
1836 state->reqlen = req_ctx->reqlen; 2015 state->reqlen = req_ctx->reqlen;
1837 state->data_len = req_ctx->data_len; 2016 state->data_len = req_ctx->data_len;
1838 state->is_sg_map = 0;
1839 state->result = 0;
1840 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); 2017 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1841 memcpy(state->partial_hash, req_ctx->partial_hash, 2018 memcpy(state->partial_hash, req_ctx->partial_hash,
1842 CHCR_HASH_MAX_DIGEST_SIZE); 2019 CHCR_HASH_MAX_DIGEST_SIZE);
2020 chcr_init_hctx_per_wr(state);
1843 return 0; 2021 return 0;
1844} 2022}
1845 2023
@@ -1852,11 +2030,10 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1852 req_ctx->data_len = state->data_len; 2030 req_ctx->data_len = state->data_len;
1853 req_ctx->reqbfr = req_ctx->bfr1; 2031 req_ctx->reqbfr = req_ctx->bfr1;
1854 req_ctx->skbfr = req_ctx->bfr2; 2032 req_ctx->skbfr = req_ctx->bfr2;
1855 req_ctx->is_sg_map = 0;
1856 req_ctx->result = 0;
1857 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); 2033 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1858 memcpy(req_ctx->partial_hash, state->partial_hash, 2034 memcpy(req_ctx->partial_hash, state->partial_hash,
1859 CHCR_HASH_MAX_DIGEST_SIZE); 2035 CHCR_HASH_MAX_DIGEST_SIZE);
2036 chcr_init_hctx_per_wr(req_ctx);
1860 return 0; 2037 return 0;
1861} 2038}
1862 2039
@@ -1953,10 +2130,8 @@ static int chcr_sha_init(struct ahash_request *areq)
1953 req_ctx->reqlen = 0; 2130 req_ctx->reqlen = 0;
1954 req_ctx->reqbfr = req_ctx->bfr1; 2131 req_ctx->reqbfr = req_ctx->bfr1;
1955 req_ctx->skbfr = req_ctx->bfr2; 2132 req_ctx->skbfr = req_ctx->bfr2;
1956 req_ctx->skb = NULL;
1957 req_ctx->result = 0;
1958 req_ctx->is_sg_map = 0;
1959 copy_hash_init_values(req_ctx->partial_hash, digestsize); 2133 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2134
1960 return 0; 2135 return 0;
1961} 2136}
1962 2137
@@ -2124,11 +2299,11 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
2124 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 2299 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2125 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) < 2300 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2126 SGE_MAX_WR_LEN; 2301 SGE_MAX_WR_LEN;
2127 temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16) 2302 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
2128 * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents 2303 : (sgl_len(reqctx->src_nents + reqctx->aad_nents
2129 + MIN_GCM_SG) * 8); 2304 + MIN_GCM_SG) * 8);
2130 transhdr_len += temp; 2305 transhdr_len += temp;
2131 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; 2306 transhdr_len = roundup(transhdr_len, 16);
2132 2307
2133 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, 2308 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2134 transhdr_len, op_type)) { 2309 transhdr_len, op_type)) {
@@ -2187,9 +2362,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
2187 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey, 2362 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2188 aeadctx->enckey_len); 2363 aeadctx->enckey_len);
2189 2364
2190 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 2365 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2191 4), actx->h_iopad, kctx_len - 2366 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2192 (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
2193 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 2367 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2194 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 2368 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2195 memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); 2369 memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
@@ -2398,22 +2572,26 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
2398 struct ulptx_walk ulp_walk; 2572 struct ulptx_walk ulp_walk;
2399 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); 2573 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2400 2574
2401 if (reqctx->imm) { 2575 if (reqctx->hctx_wr.imm) {
2402 u8 *buf = (u8 *)ulptx; 2576 u8 *buf = (u8 *)ulptx;
2403 2577
2404 if (param->bfr_len) { 2578 if (param->bfr_len) {
2405 memcpy(buf, reqctx->reqbfr, param->bfr_len); 2579 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2406 buf += param->bfr_len; 2580 buf += param->bfr_len;
2407 } 2581 }
2408 sg_pcopy_to_buffer(req->src, sg_nents(req->src), 2582
2409 buf, param->sg_len, 0); 2583 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2584 sg_nents(reqctx->hctx_wr.srcsg), buf,
2585 param->sg_len, 0);
2410 } else { 2586 } else {
2411 ulptx_walk_init(&ulp_walk, ulptx); 2587 ulptx_walk_init(&ulp_walk, ulptx);
2412 if (param->bfr_len) 2588 if (param->bfr_len)
2413 ulptx_walk_add_page(&ulp_walk, param->bfr_len, 2589 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2414 &reqctx->dma_addr); 2590 &reqctx->hctx_wr.dma_addr);
2415 ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len, 2591 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2416 0); 2592 param->sg_len, reqctx->hctx_wr.src_ofst);
2593 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2594 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2417 ulptx_walk_end(&ulp_walk); 2595 ulptx_walk_end(&ulp_walk);
2418 } 2596 }
2419} 2597}
@@ -2430,7 +2608,7 @@ int chcr_hash_dma_map(struct device *dev,
2430 DMA_TO_DEVICE); 2608 DMA_TO_DEVICE);
2431 if (!error) 2609 if (!error)
2432 return -ENOMEM; 2610 return -ENOMEM;
2433 req_ctx->is_sg_map = 1; 2611 req_ctx->hctx_wr.is_sg_map = 1;
2434 return 0; 2612 return 0;
2435} 2613}
2436 2614
@@ -2444,7 +2622,7 @@ void chcr_hash_dma_unmap(struct device *dev,
2444 2622
2445 dma_unmap_sg(dev, req->src, sg_nents(req->src), 2623 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2446 DMA_TO_DEVICE); 2624 DMA_TO_DEVICE);
2447 req_ctx->is_sg_map = 0; 2625 req_ctx->hctx_wr.is_sg_map = 0;
2448 2626
2449} 2627}
2450 2628
@@ -2636,10 +2814,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2636 0, dst_size); 2814 0, dst_size);
2637} 2815}
2638 2816
2639int aead_ccm_validate_input(unsigned short op_type, 2817static int aead_ccm_validate_input(unsigned short op_type,
2640 struct aead_request *req, 2818 struct aead_request *req,
2641 struct chcr_aead_ctx *aeadctx, 2819 struct chcr_aead_ctx *aeadctx,
2642 unsigned int sub_type) 2820 unsigned int sub_type)
2643{ 2821{
2644 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { 2822 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2645 if (crypto_ccm_check_iv(req->iv)) { 2823 if (crypto_ccm_check_iv(req->iv)) {
@@ -2696,16 +2874,16 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2696 CHCR_DST_SG_SIZE, req->assoclen); 2874 CHCR_DST_SG_SIZE, req->assoclen);
2697 dnents += MIN_CCM_SG; // For IV and B0 2875 dnents += MIN_CCM_SG; // For IV and B0
2698 dst_size = get_space_for_phys_dsgl(dnents); 2876 dst_size = get_space_for_phys_dsgl(dnents);
2699 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; 2877 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2700 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 2878 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2701 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen + 2879 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2702 reqctx->b0_len) <= SGE_MAX_WR_LEN; 2880 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2703 temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen + 2881 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
2704 reqctx->b0_len), 16) * 16) : 2882 reqctx->b0_len, 16) :
2705 (sgl_len(reqctx->src_nents + reqctx->aad_nents + 2883 (sgl_len(reqctx->src_nents + reqctx->aad_nents +
2706 MIN_CCM_SG) * 8); 2884 MIN_CCM_SG) * 8);
2707 transhdr_len += temp; 2885 transhdr_len += temp;
2708 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; 2886 transhdr_len = roundup(transhdr_len, 16);
2709 2887
2710 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - 2888 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2711 reqctx->b0_len, transhdr_len, op_type)) { 2889 reqctx->b0_len, transhdr_len, op_type)) {
@@ -2727,8 +2905,8 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2727 2905
2728 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 2906 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2729 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); 2907 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2730 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * 2908 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2731 16), aeadctx->key, aeadctx->enckey_len); 2909 aeadctx->key, aeadctx->enckey_len);
2732 2910
2733 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 2911 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2734 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 2912 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
@@ -2798,16 +2976,15 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
2798 CHCR_DST_SG_SIZE, req->assoclen); 2976 CHCR_DST_SG_SIZE, req->assoclen);
2799 dnents += MIN_GCM_SG; // For IV 2977 dnents += MIN_GCM_SG; // For IV
2800 dst_size = get_space_for_phys_dsgl(dnents); 2978 dst_size = get_space_for_phys_dsgl(dnents);
2801 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + 2979 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
2802 AEAD_H_SIZE;
2803 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 2980 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2804 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <= 2981 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2805 SGE_MAX_WR_LEN; 2982 SGE_MAX_WR_LEN;
2806 temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + 2983 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
2807 req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents + 2984 (sgl_len(reqctx->src_nents +
2808 reqctx->aad_nents + MIN_GCM_SG) * 8); 2985 reqctx->aad_nents + MIN_GCM_SG) * 8);
2809 transhdr_len += temp; 2986 transhdr_len += temp;
2810 transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; 2987 transhdr_len = roundup(transhdr_len, 16);
2811 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, 2988 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2812 transhdr_len, op_type)) { 2989 transhdr_len, op_type)) {
2813 atomic_inc(&adap->chcr_stats.fallback); 2990 atomic_inc(&adap->chcr_stats.fallback);
@@ -2846,8 +3023,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
2846 0, 0, dst_size); 3023 0, 0, dst_size);
2847 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 3024 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2848 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); 3025 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2849 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * 3026 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2850 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); 3027 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2851 3028
2852 /* prepare a 16 byte iv */ 3029 /* prepare a 16 byte iv */
2853 /* S A L T | IV | 0x00000001 */ 3030 /* S A L T | IV | 0x00000001 */
@@ -3067,11 +3244,10 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3067 unsigned char ck_size, mk_size; 3244 unsigned char ck_size, mk_size;
3068 int key_ctx_size = 0; 3245 int key_ctx_size = 0;
3069 3246
3070 key_ctx_size = sizeof(struct _key_ctx) + 3247 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3071 ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
3072 if (keylen == AES_KEYSIZE_128) { 3248 if (keylen == AES_KEYSIZE_128) {
3073 mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3074 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 3249 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3250 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3075 } else if (keylen == AES_KEYSIZE_192) { 3251 } else if (keylen == AES_KEYSIZE_192) {
3076 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 3252 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3077 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; 3253 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
@@ -3178,10 +3354,9 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3178 3354
3179 memcpy(aeadctx->key, key, keylen); 3355 memcpy(aeadctx->key, key, keylen);
3180 aeadctx->enckey_len = keylen; 3356 aeadctx->enckey_len = keylen;
3181 key_ctx_size = sizeof(struct _key_ctx) + 3357 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3182 ((DIV_ROUND_UP(keylen, 16)) << 4) +
3183 AEAD_H_SIZE; 3358 AEAD_H_SIZE;
3184 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, 3359 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3185 CHCR_KEYCTX_MAC_KEY_SIZE_128, 3360 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3186 0, 0, 3361 0, 0,
3187 key_ctx_size >> 4); 3362 key_ctx_size >> 4);
@@ -3281,6 +3456,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3281 if (IS_ERR(base_hash)) { 3456 if (IS_ERR(base_hash)) {
3282 pr_err("chcr : Base driver cannot be loaded\n"); 3457 pr_err("chcr : Base driver cannot be loaded\n");
3283 aeadctx->enckey_len = 0; 3458 aeadctx->enckey_len = 0;
3459 memzero_explicit(&keys, sizeof(keys));
3284 return -EINVAL; 3460 return -EINVAL;
3285 } 3461 }
3286 { 3462 {
@@ -3325,17 +3501,19 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3325 chcr_change_order(actx->h_iopad, param.result_size); 3501 chcr_change_order(actx->h_iopad, param.result_size);
3326 chcr_change_order(o_ptr, param.result_size); 3502 chcr_change_order(o_ptr, param.result_size);
3327 key_ctx_len = sizeof(struct _key_ctx) + 3503 key_ctx_len = sizeof(struct _key_ctx) +
3328 ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) + 3504 roundup(keys.enckeylen, 16) +
3329 (param.result_size + align) * 2; 3505 (param.result_size + align) * 2;
3330 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 3506 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3331 0, 1, key_ctx_len >> 4); 3507 0, 1, key_ctx_len >> 4);
3332 actx->auth_mode = param.auth_mode; 3508 actx->auth_mode = param.auth_mode;
3333 chcr_free_shash(base_hash); 3509 chcr_free_shash(base_hash);
3334 3510
3511 memzero_explicit(&keys, sizeof(keys));
3335 return 0; 3512 return 0;
3336 } 3513 }
3337out: 3514out:
3338 aeadctx->enckey_len = 0; 3515 aeadctx->enckey_len = 0;
3516 memzero_explicit(&keys, sizeof(keys));
3339 if (!IS_ERR(base_hash)) 3517 if (!IS_ERR(base_hash))
3340 chcr_free_shash(base_hash); 3518 chcr_free_shash(base_hash);
3341 return -EINVAL; 3519 return -EINVAL;
@@ -3393,15 +3571,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3393 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, 3571 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3394 aeadctx->enckey_len << 3); 3572 aeadctx->enckey_len << 3);
3395 } 3573 }
3396 key_ctx_len = sizeof(struct _key_ctx) 3574 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3397 + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
3398 3575
3399 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, 3576 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3400 0, key_ctx_len >> 4); 3577 0, key_ctx_len >> 4);
3401 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP; 3578 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3579 memzero_explicit(&keys, sizeof(keys));
3402 return 0; 3580 return 0;
3403out: 3581out:
3404 aeadctx->enckey_len = 0; 3582 aeadctx->enckey_len = 0;
3583 memzero_explicit(&keys, sizeof(keys));
3405 return -EINVAL; 3584 return -EINVAL;
3406} 3585}
3407 3586
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index f263cd42a84f..dba3dff1e209 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -258,15 +258,16 @@
258#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\ 258#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
259 ULP_TX_SC_MORE_V((immdatalen))) 259 ULP_TX_SC_MORE_V((immdatalen)))
260#define MAX_NK 8 260#define MAX_NK 8
261#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0)
262#define MAX_DSGL_ENT 32 261#define MAX_DSGL_ENT 32
263#define MIN_CIPHER_SG 1 /* IV */ 262#define MIN_CIPHER_SG 1 /* IV */
264#define MIN_AUTH_SG 1 /* IV */ 263#define MIN_AUTH_SG 1 /* IV */
265#define MIN_GCM_SG 1 /* IV */ 264#define MIN_GCM_SG 1 /* IV */
266#define MIN_DIGEST_SG 1 /*Partial Buffer*/ 265#define MIN_DIGEST_SG 1 /*Partial Buffer*/
267#define MIN_CCM_SG 2 /*IV+B0*/ 266#define MIN_CCM_SG 2 /*IV+B0*/
268#define SPACE_LEFT(len) \ 267#define CIP_SPACE_LEFT(len) \
269 ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len))) 268 ((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
269#define HASH_SPACE_LEFT(len) \
270 ((SGE_MAX_WR_LEN - HASH_WR_MIN_LEN - (len)))
270 271
271struct algo_param { 272struct algo_param {
272 unsigned int auth_mode; 273 unsigned int auth_mode;
@@ -275,12 +276,14 @@ struct algo_param {
275}; 276};
276 277
277struct hash_wr_param { 278struct hash_wr_param {
279 struct algo_param alg_prm;
278 unsigned int opad_needed; 280 unsigned int opad_needed;
279 unsigned int more; 281 unsigned int more;
280 unsigned int last; 282 unsigned int last;
281 struct algo_param alg_prm; 283 unsigned int kctx_len;
282 unsigned int sg_len; 284 unsigned int sg_len;
283 unsigned int bfr_len; 285 unsigned int bfr_len;
286 unsigned int hash_size;
284 u64 scmd1; 287 u64 scmd1;
285}; 288};
286 289
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 77056a90c8e1..1a20424e18c6 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -54,10 +54,14 @@
54#define MAC_ERROR_BIT 0 54#define MAC_ERROR_BIT 0
55#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) 55#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1)
56#define MAX_SALT 4 56#define MAX_SALT 4
57#define WR_MIN_LEN (sizeof(struct chcr_wr) + \ 57#define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \
58 sizeof(struct cpl_rx_phys_dsgl) + \ 58 sizeof(struct cpl_rx_phys_dsgl) + \
59 sizeof(struct ulptx_sgl)) 59 sizeof(struct ulptx_sgl))
60 60
61#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
62 DUMMY_BYTES + \
63 sizeof(struct ulptx_sgl))
64
61#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev) 65#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
62 66
63struct uld_ctx; 67struct uld_ctx;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 7daf0a17a7d2..c8e8972af283 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -258,21 +258,32 @@ struct chcr_context {
258 struct __crypto_ctx crypto_ctx[0]; 258 struct __crypto_ctx crypto_ctx[0];
259}; 259};
260 260
261struct chcr_ahash_req_ctx { 261struct chcr_hctx_per_wr {
262 struct scatterlist *srcsg;
263 struct sk_buff *skb;
264 dma_addr_t dma_addr;
265 u32 dma_len;
266 unsigned int src_ofst;
267 unsigned int processed;
262 u32 result; 268 u32 result;
263 u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128]; 269 u8 is_sg_map;
264 u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128]; 270 u8 imm;
271 /*Final callback called. Driver cannot rely on nbytes to decide
272 * final call
273 */
274 u8 isfinal;
275};
276
277struct chcr_ahash_req_ctx {
278 struct chcr_hctx_per_wr hctx_wr;
265 u8 *reqbfr; 279 u8 *reqbfr;
266 u8 *skbfr; 280 u8 *skbfr;
267 dma_addr_t dma_addr; 281 /* SKB which is being sent to the hardware for processing */
268 u32 dma_len; 282 u64 data_len; /* Data len till time */
269 u8 reqlen; 283 u8 reqlen;
270 u8 imm;
271 u8 is_sg_map;
272 u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; 284 u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
273 u64 data_len; /* Data len till time */ 285 u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
274 /* SKB which is being sent to the hardware for processing */ 286 u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
275 struct sk_buff *skb;
276}; 287};
277 288
278struct chcr_blkcipher_req_ctx { 289struct chcr_blkcipher_req_ctx {
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index db1e241104ed..8e0aa3f175c9 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -360,8 +360,7 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
360 360
361 cpl = (struct cpl_tx_pkt_core *)pos; 361 cpl = (struct cpl_tx_pkt_core *)pos;
362 362
363 if (skb->ip_summed == CHECKSUM_PARTIAL) 363 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
364 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
365 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 364 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
366 TXPKT_PF_V(adap->pf); 365 TXPKT_PF_V(adap->pf);
367 if (skb_vlan_tag_present(skb)) { 366 if (skb_vlan_tag_present(skb)) {
@@ -475,7 +474,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
475 wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1); 474 wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
476 475
477 /* Sub-command */ 476 /* Sub-command */
478 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen); 477 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
479 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + 478 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
480 sizeof(wr->req.key_ctx) + 479 sizeof(wr->req.key_ctx) +
481 kctx_len + 480 kctx_len +
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 225e74a7f724..d4a81be0d7d2 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -235,7 +235,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
235 /* Configure DMA tx control */ 235 /* Configure DMA tx control */
236 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); 236 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
237 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); 237 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
238 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG; 238 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
239 writel(val, 239 writel(val,
240 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); 240 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
241 241
@@ -332,7 +332,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
332 val = EIP197_HIA_DSE_CFG_DIS_DEBUG; 332 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
333 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); 333 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
334 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); 334 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
335 val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE; 335 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
336 /* FIXME: instability issues can occur for EIP97 but disabling it impact 336 /* FIXME: instability issues can occur for EIP97 but disabling it impact
337 * performances. 337 * performances.
338 */ 338 */
@@ -354,7 +354,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
354 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; 354 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
355 val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; 355 val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
356 val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; 356 val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
357 val |= EIP197_ALG_SHA2; 357 val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
358 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN); 358 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
359 359
360 /* Command Descriptor Rings prepare */ 360 /* Command Descriptor Rings prepare */
@@ -432,20 +432,18 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
432} 432}
433 433
434/* Called with ring's lock taken */ 434/* Called with ring's lock taken */
435static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv, 435static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
436 int ring, int reqs) 436 int ring)
437{ 437{
438 int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ); 438 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
439 439
440 if (!coal) 440 if (!coal)
441 return 0; 441 return;
442 442
443 /* Configure when we want an interrupt */ 443 /* Configure when we want an interrupt */
444 writel(EIP197_HIA_RDR_THRESH_PKT_MODE | 444 writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
445 EIP197_HIA_RDR_THRESH_PROC_PKT(coal), 445 EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
446 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH); 446 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
447
448 return coal;
449} 447}
450 448
451void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) 449void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
@@ -490,6 +488,15 @@ handle_req:
490 if (backlog) 488 if (backlog)
491 backlog->complete(backlog, -EINPROGRESS); 489 backlog->complete(backlog, -EINPROGRESS);
492 490
491 /* In case the send() helper did not issue any command to push
492 * to the engine because the input data was cached, continue to
493 * dequeue other requests as this is valid and not an error.
494 */
495 if (!commands && !results) {
496 kfree(request);
497 continue;
498 }
499
493 spin_lock_bh(&priv->ring[ring].egress_lock); 500 spin_lock_bh(&priv->ring[ring].egress_lock);
494 list_add_tail(&request->list, &priv->ring[ring].list); 501 list_add_tail(&request->list, &priv->ring[ring].list);
495 spin_unlock_bh(&priv->ring[ring].egress_lock); 502 spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -512,14 +519,13 @@ finalize:
512 519
513 spin_lock_bh(&priv->ring[ring].egress_lock); 520 spin_lock_bh(&priv->ring[ring].egress_lock);
514 521
522 priv->ring[ring].requests += nreq;
523
515 if (!priv->ring[ring].busy) { 524 if (!priv->ring[ring].busy) {
516 nreq -= safexcel_try_push_requests(priv, ring, nreq); 525 safexcel_try_push_requests(priv, ring);
517 if (nreq) 526 priv->ring[ring].busy = true;
518 priv->ring[ring].busy = true;
519 } 527 }
520 528
521 priv->ring[ring].requests_left += nreq;
522
523 spin_unlock_bh(&priv->ring[ring].egress_lock); 529 spin_unlock_bh(&priv->ring[ring].egress_lock);
524 530
525 /* let the RDR know we have pending descriptors */ 531 /* let the RDR know we have pending descriptors */
@@ -531,25 +537,6 @@ finalize:
531 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); 537 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
532} 538}
533 539
534void safexcel_free_context(struct safexcel_crypto_priv *priv,
535 struct crypto_async_request *req,
536 int result_sz)
537{
538 struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
539
540 if (ctx->result_dma)
541 dma_unmap_single(priv->dev, ctx->result_dma, result_sz,
542 DMA_FROM_DEVICE);
543
544 if (ctx->cache) {
545 dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
546 DMA_TO_DEVICE);
547 kfree(ctx->cache);
548 ctx->cache = NULL;
549 ctx->cache_sz = 0;
550 }
551}
552
553void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) 540void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
554{ 541{
555 struct safexcel_command_desc *cdesc; 542 struct safexcel_command_desc *cdesc;
@@ -623,7 +610,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
623{ 610{
624 struct safexcel_request *sreq; 611 struct safexcel_request *sreq;
625 struct safexcel_context *ctx; 612 struct safexcel_context *ctx;
626 int ret, i, nreq, ndesc, tot_descs, done; 613 int ret, i, nreq, ndesc, tot_descs, handled = 0;
627 bool should_complete; 614 bool should_complete;
628 615
629handle_results: 616handle_results:
@@ -659,6 +646,7 @@ handle_results:
659 646
660 kfree(sreq); 647 kfree(sreq);
661 tot_descs += ndesc; 648 tot_descs += ndesc;
649 handled++;
662 } 650 }
663 651
664acknowledge: 652acknowledge:
@@ -677,11 +665,10 @@ acknowledge:
677requests_left: 665requests_left:
678 spin_lock_bh(&priv->ring[ring].egress_lock); 666 spin_lock_bh(&priv->ring[ring].egress_lock);
679 667
680 done = safexcel_try_push_requests(priv, ring, 668 priv->ring[ring].requests -= handled;
681 priv->ring[ring].requests_left); 669 safexcel_try_push_requests(priv, ring);
682 670
683 priv->ring[ring].requests_left -= done; 671 if (!priv->ring[ring].requests)
684 if (!done && !priv->ring[ring].requests_left)
685 priv->ring[ring].busy = false; 672 priv->ring[ring].busy = false;
686 673
687 spin_unlock_bh(&priv->ring[ring].egress_lock); 674 spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -781,6 +768,8 @@ static struct safexcel_alg_template *safexcel_algs[] = {
781 &safexcel_alg_sha224, 768 &safexcel_alg_sha224,
782 &safexcel_alg_sha256, 769 &safexcel_alg_sha256,
783 &safexcel_alg_hmac_sha1, 770 &safexcel_alg_hmac_sha1,
771 &safexcel_alg_hmac_sha224,
772 &safexcel_alg_hmac_sha256,
784}; 773};
785 774
786static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) 775static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
@@ -894,29 +883,44 @@ static int safexcel_probe(struct platform_device *pdev)
894 return PTR_ERR(priv->base); 883 return PTR_ERR(priv->base);
895 } 884 }
896 885
897 priv->clk = of_clk_get(dev->of_node, 0); 886 priv->clk = devm_clk_get(&pdev->dev, NULL);
898 if (!IS_ERR(priv->clk)) { 887 ret = PTR_ERR_OR_ZERO(priv->clk);
888 /* The clock isn't mandatory */
889 if (ret != -ENOENT) {
890 if (ret)
891 return ret;
892
899 ret = clk_prepare_enable(priv->clk); 893 ret = clk_prepare_enable(priv->clk);
900 if (ret) { 894 if (ret) {
901 dev_err(dev, "unable to enable clk (%d)\n", ret); 895 dev_err(dev, "unable to enable clk (%d)\n", ret);
902 return ret; 896 return ret;
903 } 897 }
904 } else { 898 }
905 /* The clock isn't mandatory */ 899
906 if (PTR_ERR(priv->clk) == -EPROBE_DEFER) 900 priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
907 return -EPROBE_DEFER; 901 ret = PTR_ERR_OR_ZERO(priv->reg_clk);
902 /* The clock isn't mandatory */
903 if (ret != -ENOENT) {
904 if (ret)
905 goto err_core_clk;
906
907 ret = clk_prepare_enable(priv->reg_clk);
908 if (ret) {
909 dev_err(dev, "unable to enable reg clk (%d)\n", ret);
910 goto err_core_clk;
911 }
908 } 912 }
909 913
910 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 914 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
911 if (ret) 915 if (ret)
912 goto err_clk; 916 goto err_reg_clk;
913 917
914 priv->context_pool = dmam_pool_create("safexcel-context", dev, 918 priv->context_pool = dmam_pool_create("safexcel-context", dev,
915 sizeof(struct safexcel_context_record), 919 sizeof(struct safexcel_context_record),
916 1, 0); 920 1, 0);
917 if (!priv->context_pool) { 921 if (!priv->context_pool) {
918 ret = -ENOMEM; 922 ret = -ENOMEM;
919 goto err_clk; 923 goto err_reg_clk;
920 } 924 }
921 925
922 safexcel_configure(priv); 926 safexcel_configure(priv);
@@ -931,12 +935,12 @@ static int safexcel_probe(struct platform_device *pdev)
931 &priv->ring[i].cdr, 935 &priv->ring[i].cdr,
932 &priv->ring[i].rdr); 936 &priv->ring[i].rdr);
933 if (ret) 937 if (ret)
934 goto err_clk; 938 goto err_reg_clk;
935 939
936 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); 940 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
937 if (!ring_irq) { 941 if (!ring_irq) {
938 ret = -ENOMEM; 942 ret = -ENOMEM;
939 goto err_clk; 943 goto err_reg_clk;
940 } 944 }
941 945
942 ring_irq->priv = priv; 946 ring_irq->priv = priv;
@@ -948,7 +952,7 @@ static int safexcel_probe(struct platform_device *pdev)
948 ring_irq); 952 ring_irq);
949 if (irq < 0) { 953 if (irq < 0) {
950 ret = irq; 954 ret = irq;
951 goto err_clk; 955 goto err_reg_clk;
952 } 956 }
953 957
954 priv->ring[i].work_data.priv = priv; 958 priv->ring[i].work_data.priv = priv;
@@ -959,10 +963,10 @@ static int safexcel_probe(struct platform_device *pdev)
959 priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); 963 priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
960 if (!priv->ring[i].workqueue) { 964 if (!priv->ring[i].workqueue) {
961 ret = -ENOMEM; 965 ret = -ENOMEM;
962 goto err_clk; 966 goto err_reg_clk;
963 } 967 }
964 968
965 priv->ring[i].requests_left = 0; 969 priv->ring[i].requests = 0;
966 priv->ring[i].busy = false; 970 priv->ring[i].busy = false;
967 971
968 crypto_init_queue(&priv->ring[i].queue, 972 crypto_init_queue(&priv->ring[i].queue,
@@ -980,18 +984,20 @@ static int safexcel_probe(struct platform_device *pdev)
980 ret = safexcel_hw_init(priv); 984 ret = safexcel_hw_init(priv);
981 if (ret) { 985 if (ret) {
982 dev_err(dev, "EIP h/w init failed (%d)\n", ret); 986 dev_err(dev, "EIP h/w init failed (%d)\n", ret);
983 goto err_clk; 987 goto err_reg_clk;
984 } 988 }
985 989
986 ret = safexcel_register_algorithms(priv); 990 ret = safexcel_register_algorithms(priv);
987 if (ret) { 991 if (ret) {
988 dev_err(dev, "Failed to register algorithms (%d)\n", ret); 992 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
989 goto err_clk; 993 goto err_reg_clk;
990 } 994 }
991 995
992 return 0; 996 return 0;
993 997
994err_clk: 998err_reg_clk:
999 clk_disable_unprepare(priv->reg_clk);
1000err_core_clk:
995 clk_disable_unprepare(priv->clk); 1001 clk_disable_unprepare(priv->clk);
996 return ret; 1002 return ret;
997} 1003}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 4e219c21608b..b470a849721f 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -135,7 +135,7 @@
135 135
136/* EIP197_HIA_xDR_DMA_CFG */ 136/* EIP197_HIA_xDR_DMA_CFG */
137#define EIP197_HIA_xDR_WR_RES_BUF BIT(22) 137#define EIP197_HIA_xDR_WR_RES_BUF BIT(22)
138#define EIP197_HIA_xDR_WR_CTRL_BUG BIT(23) 138#define EIP197_HIA_xDR_WR_CTRL_BUF BIT(23)
139#define EIP197_HIA_xDR_WR_OWN_BUF BIT(24) 139#define EIP197_HIA_xDR_WR_OWN_BUF BIT(24)
140#define EIP197_HIA_xDR_CFG_WR_CACHE(n) (((n) & 0x7) << 25) 140#define EIP197_HIA_xDR_CFG_WR_CACHE(n) (((n) & 0x7) << 25)
141#define EIP197_HIA_xDR_CFG_RD_CACHE(n) (((n) & 0x7) << 29) 141#define EIP197_HIA_xDR_CFG_RD_CACHE(n) (((n) & 0x7) << 29)
@@ -179,7 +179,7 @@
179#define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n) ((n) << 0) 179#define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n) ((n) << 0)
180#define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n) (((n) & 0x7) << 4) 180#define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n) (((n) & 0x7) << 4)
181#define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n) ((n) << 8) 181#define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n) ((n) << 8)
182#define EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE GENMASK(15, 14) 182#define EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE GENMASK(15, 14)
183#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16) 183#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16)
184#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20) 184#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20)
185#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24) 185#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24)
@@ -525,6 +525,7 @@ struct safexcel_crypto_priv {
525 void __iomem *base; 525 void __iomem *base;
526 struct device *dev; 526 struct device *dev;
527 struct clk *clk; 527 struct clk *clk;
528 struct clk *reg_clk;
528 struct safexcel_config config; 529 struct safexcel_config config;
529 530
530 enum safexcel_eip_version version; 531 enum safexcel_eip_version version;
@@ -551,10 +552,8 @@ struct safexcel_crypto_priv {
551 struct crypto_queue queue; 552 struct crypto_queue queue;
552 spinlock_t queue_lock; 553 spinlock_t queue_lock;
553 554
554 /* Number of requests in the engine that needs the threshold 555 /* Number of requests in the engine. */
555 * interrupt to be set up. 556 int requests;
556 */
557 int requests_left;
558 557
559 /* The ring is currently handling at least one request */ 558 /* The ring is currently handling at least one request */
560 bool busy; 559 bool busy;
@@ -580,12 +579,6 @@ struct safexcel_context {
580 int ring; 579 int ring;
581 bool needs_inv; 580 bool needs_inv;
582 bool exit_inv; 581 bool exit_inv;
583
584 /* Used for ahash requests */
585 dma_addr_t result_dma;
586 void *cache;
587 dma_addr_t cache_dma;
588 unsigned int cache_sz;
589}; 582};
590 583
591/* 584/*
@@ -609,9 +602,6 @@ struct safexcel_inv_result {
609 602
610void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring); 603void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
611void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); 604void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
612void safexcel_free_context(struct safexcel_crypto_priv *priv,
613 struct crypto_async_request *req,
614 int result_sz);
615int safexcel_invalidate_cache(struct crypto_async_request *async, 605int safexcel_invalidate_cache(struct crypto_async_request *async,
616 struct safexcel_crypto_priv *priv, 606 struct safexcel_crypto_priv *priv,
617 dma_addr_t ctxr_dma, int ring, 607 dma_addr_t ctxr_dma, int ring,
@@ -643,5 +633,7 @@ extern struct safexcel_alg_template safexcel_alg_sha1;
643extern struct safexcel_alg_template safexcel_alg_sha224; 633extern struct safexcel_alg_template safexcel_alg_sha224;
644extern struct safexcel_alg_template safexcel_alg_sha256; 634extern struct safexcel_alg_template safexcel_alg_sha256;
645extern struct safexcel_alg_template safexcel_alg_hmac_sha1; 635extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
636extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
637extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
646 638
647#endif 639#endif
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 63a8768ed2ae..bafb60505fab 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -58,7 +58,8 @@ static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
58 58
59 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; 59 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
60 token[0].packet_length = length; 60 token[0].packet_length = length;
61 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET; 61 token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
62 EIP197_TOKEN_STAT_LAST_HASH;
62 token[0].instructions = EIP197_TOKEN_INS_LAST | 63 token[0].instructions = EIP197_TOKEN_INS_LAST |
63 EIP197_TOKEN_INS_TYPE_CRYTO | 64 EIP197_TOKEN_INS_TYPE_CRYTO |
64 EIP197_TOKEN_INS_TYPE_OUTPUT; 65 EIP197_TOKEN_INS_TYPE_OUTPUT;
@@ -456,7 +457,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
456 queue_work(priv->ring[ring].workqueue, 457 queue_work(priv->ring[ring].workqueue,
457 &priv->ring[ring].work_data.work); 458 &priv->ring[ring].work_data.work);
458 459
459 wait_for_completion_interruptible(&result.completion); 460 wait_for_completion(&result.completion);
460 461
461 if (result.error) { 462 if (result.error) {
462 dev_warn(priv->dev, 463 dev_warn(priv->dev,
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 122a2a58e98f..317b9e480312 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -21,10 +21,9 @@ struct safexcel_ahash_ctx {
21 struct safexcel_crypto_priv *priv; 21 struct safexcel_crypto_priv *priv;
22 22
23 u32 alg; 23 u32 alg;
24 u32 digest;
25 24
26 u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)]; 25 u32 ipad[SHA256_DIGEST_SIZE / sizeof(u32)];
27 u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)]; 26 u32 opad[SHA256_DIGEST_SIZE / sizeof(u32)];
28}; 27};
29 28
30struct safexcel_ahash_req { 29struct safexcel_ahash_req {
@@ -34,6 +33,9 @@ struct safexcel_ahash_req {
34 bool needs_inv; 33 bool needs_inv;
35 34
36 int nents; 35 int nents;
36 dma_addr_t result_dma;
37
38 u32 digest;
37 39
38 u8 state_sz; /* expected sate size, only set once */ 40 u8 state_sz; /* expected sate size, only set once */
39 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); 41 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
@@ -42,6 +44,9 @@ struct safexcel_ahash_req {
42 u64 processed; 44 u64 processed;
43 45
44 u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); 46 u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
47 dma_addr_t cache_dma;
48 unsigned int cache_sz;
49
45 u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); 50 u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
46}; 51};
47 52
@@ -49,6 +54,8 @@ struct safexcel_ahash_export_state {
49 u64 len; 54 u64 len;
50 u64 processed; 55 u64 processed;
51 56
57 u32 digest;
58
52 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; 59 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
53 u8 cache[SHA256_BLOCK_SIZE]; 60 u8 cache[SHA256_BLOCK_SIZE];
54}; 61};
@@ -82,9 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
82 89
83 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT; 90 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
84 cdesc->control_data.control0 |= ctx->alg; 91 cdesc->control_data.control0 |= ctx->alg;
85 cdesc->control_data.control0 |= ctx->digest; 92 cdesc->control_data.control0 |= req->digest;
86 93
87 if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { 94 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
88 if (req->processed) { 95 if (req->processed) {
89 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) 96 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
90 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); 97 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
@@ -112,12 +119,12 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
112 if (req->finish) 119 if (req->finish)
113 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize); 120 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
114 } 121 }
115 } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) { 122 } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
116 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10); 123 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
117 124
118 memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize); 125 memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
119 memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32), 126 memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
120 ctx->opad, digestsize); 127 ctx->opad, req->state_sz);
121 } 128 }
122} 129}
123 130
@@ -149,16 +156,26 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
149 safexcel_complete(priv, ring); 156 safexcel_complete(priv, ring);
150 spin_unlock_bh(&priv->ring[ring].egress_lock); 157 spin_unlock_bh(&priv->ring[ring].egress_lock);
151 158
152 if (sreq->finish)
153 memcpy(areq->result, sreq->state,
154 crypto_ahash_digestsize(ahash));
155
156 if (sreq->nents) { 159 if (sreq->nents) {
157 dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); 160 dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
158 sreq->nents = 0; 161 sreq->nents = 0;
159 } 162 }
160 163
161 safexcel_free_context(priv, async, sreq->state_sz); 164 if (sreq->result_dma) {
165 dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
166 DMA_FROM_DEVICE);
167 sreq->result_dma = 0;
168 }
169
170 if (sreq->cache_dma) {
171 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
172 DMA_TO_DEVICE);
173 sreq->cache_dma = 0;
174 }
175
176 if (sreq->finish)
177 memcpy(areq->result, sreq->state,
178 crypto_ahash_digestsize(ahash));
162 179
163 cache_len = sreq->len - sreq->processed; 180 cache_len = sreq->len - sreq->processed;
164 if (cache_len) 181 if (cache_len)
@@ -184,7 +201,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
184 int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; 201 int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
185 202
186 queued = len = req->len - req->processed; 203 queued = len = req->len - req->processed;
187 if (queued < crypto_ahash_blocksize(ahash)) 204 if (queued <= crypto_ahash_blocksize(ahash))
188 cache_len = queued; 205 cache_len = queued;
189 else 206 else
190 cache_len = queued - areq->nbytes; 207 cache_len = queued - areq->nbytes;
@@ -198,7 +215,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
198 /* If this is not the last request and the queued data 215 /* If this is not the last request and the queued data
199 * is a multiple of a block, cache the last one for now. 216 * is a multiple of a block, cache the last one for now.
200 */ 217 */
201 extra = queued - crypto_ahash_blocksize(ahash); 218 extra = crypto_ahash_blocksize(ahash);
202 219
203 if (extra) { 220 if (extra) {
204 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 221 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
@@ -220,24 +237,17 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
220 237
221 /* Add a command descriptor for the cached data, if any */ 238 /* Add a command descriptor for the cached data, if any */
222 if (cache_len) { 239 if (cache_len) {
223 ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async)); 240 req->cache_dma = dma_map_single(priv->dev, req->cache,
224 if (!ctx->base.cache) { 241 cache_len, DMA_TO_DEVICE);
225 ret = -ENOMEM; 242 if (dma_mapping_error(priv->dev, req->cache_dma)) {
226 goto unlock; 243 spin_unlock_bh(&priv->ring[ring].egress_lock);
227 } 244 return -EINVAL;
228 memcpy(ctx->base.cache, req->cache, cache_len);
229 ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
230 cache_len, DMA_TO_DEVICE);
231 if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
232 ret = -EINVAL;
233 goto free_cache;
234 } 245 }
235 246
236 ctx->base.cache_sz = cache_len; 247 req->cache_sz = cache_len;
237 first_cdesc = safexcel_add_cdesc(priv, ring, 1, 248 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
238 (cache_len == len), 249 (cache_len == len),
239 ctx->base.cache_dma, 250 req->cache_dma, cache_len, len,
240 cache_len, len,
241 ctx->base.ctxr_dma); 251 ctx->base.ctxr_dma);
242 if (IS_ERR(first_cdesc)) { 252 if (IS_ERR(first_cdesc)) {
243 ret = PTR_ERR(first_cdesc); 253 ret = PTR_ERR(first_cdesc);
@@ -271,7 +281,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
271 sglen, len, ctx->base.ctxr_dma); 281 sglen, len, ctx->base.ctxr_dma);
272 if (IS_ERR(cdesc)) { 282 if (IS_ERR(cdesc)) {
273 ret = PTR_ERR(cdesc); 283 ret = PTR_ERR(cdesc);
274 goto cdesc_rollback; 284 goto unmap_sg;
275 } 285 }
276 n_cdesc++; 286 n_cdesc++;
277 287
@@ -291,19 +301,19 @@ send_command:
291 /* Add the token */ 301 /* Add the token */
292 safexcel_hash_token(first_cdesc, len, req->state_sz); 302 safexcel_hash_token(first_cdesc, len, req->state_sz);
293 303
294 ctx->base.result_dma = dma_map_single(priv->dev, req->state, 304 req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
295 req->state_sz, DMA_FROM_DEVICE); 305 DMA_FROM_DEVICE);
296 if (dma_mapping_error(priv->dev, ctx->base.result_dma)) { 306 if (dma_mapping_error(priv->dev, req->result_dma)) {
297 ret = -EINVAL; 307 ret = -EINVAL;
298 goto cdesc_rollback; 308 goto unmap_sg;
299 } 309 }
300 310
301 /* Add a result descriptor */ 311 /* Add a result descriptor */
302 rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma, 312 rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
303 req->state_sz); 313 req->state_sz);
304 if (IS_ERR(rdesc)) { 314 if (IS_ERR(rdesc)) {
305 ret = PTR_ERR(rdesc); 315 ret = PTR_ERR(rdesc);
306 goto cdesc_rollback; 316 goto unmap_result;
307 } 317 }
308 318
309 spin_unlock_bh(&priv->ring[ring].egress_lock); 319 spin_unlock_bh(&priv->ring[ring].egress_lock);
@@ -315,20 +325,21 @@ send_command:
315 *results = 1; 325 *results = 1;
316 return 0; 326 return 0;
317 327
328unmap_result:
329 dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
330 DMA_FROM_DEVICE);
331unmap_sg:
332 dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
318cdesc_rollback: 333cdesc_rollback:
319 for (i = 0; i < n_cdesc; i++) 334 for (i = 0; i < n_cdesc; i++)
320 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); 335 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
321unmap_cache: 336unmap_cache:
322 if (ctx->base.cache_dma) { 337 if (req->cache_dma) {
323 dma_unmap_single(priv->dev, ctx->base.cache_dma, 338 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
324 ctx->base.cache_sz, DMA_TO_DEVICE); 339 DMA_TO_DEVICE);
325 ctx->base.cache_sz = 0; 340 req->cache_sz = 0;
326 } 341 }
327free_cache:
328 kfree(ctx->base.cache);
329 ctx->base.cache = NULL;
330 342
331unlock:
332 spin_unlock_bh(&priv->ring[ring].egress_lock); 343 spin_unlock_bh(&priv->ring[ring].egress_lock);
333 return ret; 344 return ret;
334} 345}
@@ -493,7 +504,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
493 queue_work(priv->ring[ring].workqueue, 504 queue_work(priv->ring[ring].workqueue,
494 &priv->ring[ring].work_data.work); 505 &priv->ring[ring].work_data.work);
495 506
496 wait_for_completion_interruptible(&result.completion); 507 wait_for_completion(&result.completion);
497 508
498 if (result.error) { 509 if (result.error) {
499 dev_warn(priv->dev, "hash: completion error (%d)\n", 510 dev_warn(priv->dev, "hash: completion error (%d)\n",
@@ -550,7 +561,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
550 if (ctx->base.ctxr) { 561 if (ctx->base.ctxr) {
551 if (priv->version == EIP197 && 562 if (priv->version == EIP197 &&
552 !ctx->base.needs_inv && req->processed && 563 !ctx->base.needs_inv && req->processed &&
553 ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) 564 req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
554 /* We're still setting needs_inv here, even though it is 565 /* We're still setting needs_inv here, even though it is
555 * cleared right away, because the needs_inv flag can be 566 * cleared right away, because the needs_inv flag can be
556 * set in other functions and we want to keep the same 567 * set in other functions and we want to keep the same
@@ -585,7 +596,6 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
585 596
586static int safexcel_ahash_update(struct ahash_request *areq) 597static int safexcel_ahash_update(struct ahash_request *areq)
587{ 598{
588 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
589 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 599 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
590 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 600 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
591 601
@@ -601,7 +611,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
601 * We're not doing partial updates when performing an hmac request. 611 * We're not doing partial updates when performing an hmac request.
602 * Everything will be handled by the final() call. 612 * Everything will be handled by the final() call.
603 */ 613 */
604 if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) 614 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
605 return 0; 615 return 0;
606 616
607 if (req->hmac) 617 if (req->hmac)
@@ -660,6 +670,8 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
660 export->len = req->len; 670 export->len = req->len;
661 export->processed = req->processed; 671 export->processed = req->processed;
662 672
673 export->digest = req->digest;
674
663 memcpy(export->state, req->state, req->state_sz); 675 memcpy(export->state, req->state, req->state_sz);
664 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); 676 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
665 677
@@ -680,6 +692,8 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
680 req->len = export->len; 692 req->len = export->len;
681 req->processed = export->processed; 693 req->processed = export->processed;
682 694
695 req->digest = export->digest;
696
683 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); 697 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
684 memcpy(req->state, export->state, req->state_sz); 698 memcpy(req->state, export->state, req->state_sz);
685 699
@@ -716,7 +730,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
716 req->state[4] = SHA1_H4; 730 req->state[4] = SHA1_H4;
717 731
718 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; 732 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
719 ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; 733 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
720 req->state_sz = SHA1_DIGEST_SIZE; 734 req->state_sz = SHA1_DIGEST_SIZE;
721 735
722 return 0; 736 return 0;
@@ -783,10 +797,10 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
783 797
784static int safexcel_hmac_sha1_init(struct ahash_request *areq) 798static int safexcel_hmac_sha1_init(struct ahash_request *areq)
785{ 799{
786 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); 800 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
787 801
788 safexcel_sha1_init(areq); 802 safexcel_sha1_init(areq);
789 ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC; 803 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
790 return 0; 804 return 0;
791} 805}
792 806
@@ -839,7 +853,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
839 init_completion(&result.completion); 853 init_completion(&result.completion);
840 854
841 ret = crypto_ahash_digest(areq); 855 ret = crypto_ahash_digest(areq);
842 if (ret == -EINPROGRESS) { 856 if (ret == -EINPROGRESS || ret == -EBUSY) {
843 wait_for_completion_interruptible(&result.completion); 857 wait_for_completion_interruptible(&result.completion);
844 ret = result.error; 858 ret = result.error;
845 } 859 }
@@ -949,20 +963,21 @@ free_ahash:
949 return ret; 963 return ret;
950} 964}
951 965
952static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, 966static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
953 unsigned int keylen) 967 unsigned int keylen, const char *alg,
968 unsigned int state_sz)
954{ 969{
955 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 970 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
956 struct safexcel_crypto_priv *priv = ctx->priv; 971 struct safexcel_crypto_priv *priv = ctx->priv;
957 struct safexcel_ahash_export_state istate, ostate; 972 struct safexcel_ahash_export_state istate, ostate;
958 int ret, i; 973 int ret, i;
959 974
960 ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate); 975 ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
961 if (ret) 976 if (ret)
962 return ret; 977 return ret;
963 978
964 if (priv->version == EIP197 && ctx->base.ctxr) { 979 if (priv->version == EIP197 && ctx->base.ctxr) {
965 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { 980 for (i = 0; i < state_sz / sizeof(u32); i++) {
966 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || 981 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
967 ctx->opad[i] != le32_to_cpu(ostate.state[i])) { 982 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
968 ctx->base.needs_inv = true; 983 ctx->base.needs_inv = true;
@@ -971,12 +986,19 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
971 } 986 }
972 } 987 }
973 988
974 memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); 989 memcpy(ctx->ipad, &istate.state, state_sz);
975 memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); 990 memcpy(ctx->opad, &ostate.state, state_sz);
976 991
977 return 0; 992 return 0;
978} 993}
979 994
995static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
996 unsigned int keylen)
997{
998 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
999 SHA1_DIGEST_SIZE);
1000}
1001
980struct safexcel_alg_template safexcel_alg_hmac_sha1 = { 1002struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
981 .type = SAFEXCEL_ALG_TYPE_AHASH, 1003 .type = SAFEXCEL_ALG_TYPE_AHASH,
982 .alg.ahash = { 1004 .alg.ahash = {
@@ -1024,7 +1046,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
1024 req->state[7] = SHA256_H7; 1046 req->state[7] = SHA256_H7;
1025 1047
1026 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; 1048 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1027 ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; 1049 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1028 req->state_sz = SHA256_DIGEST_SIZE; 1050 req->state_sz = SHA256_DIGEST_SIZE;
1029 1051
1030 return 0; 1052 return 0;
@@ -1086,7 +1108,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
1086 req->state[7] = SHA224_H7; 1108 req->state[7] = SHA224_H7;
1087 1109
1088 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; 1110 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1089 ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; 1111 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1090 req->state_sz = SHA256_DIGEST_SIZE; 1112 req->state_sz = SHA256_DIGEST_SIZE;
1091 1113
1092 return 0; 1114 return 0;
@@ -1130,3 +1152,115 @@ struct safexcel_alg_template safexcel_alg_sha224 = {
1130 }, 1152 },
1131 }, 1153 },
1132}; 1154};
1155
1156static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
1157 unsigned int keylen)
1158{
1159 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
1160 SHA256_DIGEST_SIZE);
1161}
1162
1163static int safexcel_hmac_sha224_init(struct ahash_request *areq)
1164{
1165 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1166
1167 safexcel_sha224_init(areq);
1168 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1169 return 0;
1170}
1171
1172static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
1173{
1174 int ret = safexcel_hmac_sha224_init(areq);
1175
1176 if (ret)
1177 return ret;
1178
1179 return safexcel_ahash_finup(areq);
1180}
1181
1182struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
1183 .type = SAFEXCEL_ALG_TYPE_AHASH,
1184 .alg.ahash = {
1185 .init = safexcel_hmac_sha224_init,
1186 .update = safexcel_ahash_update,
1187 .final = safexcel_ahash_final,
1188 .finup = safexcel_ahash_finup,
1189 .digest = safexcel_hmac_sha224_digest,
1190 .setkey = safexcel_hmac_sha224_setkey,
1191 .export = safexcel_ahash_export,
1192 .import = safexcel_ahash_import,
1193 .halg = {
1194 .digestsize = SHA224_DIGEST_SIZE,
1195 .statesize = sizeof(struct safexcel_ahash_export_state),
1196 .base = {
1197 .cra_name = "hmac(sha224)",
1198 .cra_driver_name = "safexcel-hmac-sha224",
1199 .cra_priority = 300,
1200 .cra_flags = CRYPTO_ALG_ASYNC |
1201 CRYPTO_ALG_KERN_DRIVER_ONLY,
1202 .cra_blocksize = SHA224_BLOCK_SIZE,
1203 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1204 .cra_init = safexcel_ahash_cra_init,
1205 .cra_exit = safexcel_ahash_cra_exit,
1206 .cra_module = THIS_MODULE,
1207 },
1208 },
1209 },
1210};
1211
1212static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1213 unsigned int keylen)
1214{
1215 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
1216 SHA256_DIGEST_SIZE);
1217}
1218
1219static int safexcel_hmac_sha256_init(struct ahash_request *areq)
1220{
1221 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1222
1223 safexcel_sha256_init(areq);
1224 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1225 return 0;
1226}
1227
1228static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
1229{
1230 int ret = safexcel_hmac_sha256_init(areq);
1231
1232 if (ret)
1233 return ret;
1234
1235 return safexcel_ahash_finup(areq);
1236}
1237
1238struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
1239 .type = SAFEXCEL_ALG_TYPE_AHASH,
1240 .alg.ahash = {
1241 .init = safexcel_hmac_sha256_init,
1242 .update = safexcel_ahash_update,
1243 .final = safexcel_ahash_final,
1244 .finup = safexcel_ahash_finup,
1245 .digest = safexcel_hmac_sha256_digest,
1246 .setkey = safexcel_hmac_sha256_setkey,
1247 .export = safexcel_ahash_export,
1248 .import = safexcel_ahash_import,
1249 .halg = {
1250 .digestsize = SHA256_DIGEST_SIZE,
1251 .statesize = sizeof(struct safexcel_ahash_export_state),
1252 .base = {
1253 .cra_name = "hmac(sha256)",
1254 .cra_driver_name = "safexcel-hmac-sha256",
1255 .cra_priority = 300,
1256 .cra_flags = CRYPTO_ALG_ASYNC |
1257 CRYPTO_ALG_KERN_DRIVER_ONLY,
1258 .cra_blocksize = SHA256_BLOCK_SIZE,
1259 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1260 .cra_init = safexcel_ahash_cra_init,
1261 .cra_exit = safexcel_ahash_cra_exit,
1262 .cra_module = THIS_MODULE,
1263 },
1264 },
1265 },
1266};
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 717a26607bdb..27f7dad2d45d 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1167,9 +1167,11 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1167 ctx->authkey_len = keys.authkeylen; 1167 ctx->authkey_len = keys.authkeylen;
1168 ctx->enckey_len = keys.enckeylen; 1168 ctx->enckey_len = keys.enckeylen;
1169 1169
1170 memzero_explicit(&keys, sizeof(keys));
1170 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1171 return aead_setup(tfm, crypto_aead_authsize(tfm));
1171badkey: 1172badkey:
1172 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1173 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1174 memzero_explicit(&keys, sizeof(keys));
1173 return -EINVAL; 1175 return -EINVAL;
1174} 1176}
1175 1177
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index aca2373fa1de..f81fa4a3e66b 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -25,7 +25,6 @@
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
29#include <linux/clk.h> 28#include <linux/clk.h>
30#include <linux/of.h> 29#include <linux/of.h>
31#include <linux/of_platform.h> 30#include <linux/of_platform.h>
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 764be3e6933c..a10c418d4e5c 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -759,6 +759,16 @@ static int dcp_sha_digest(struct ahash_request *req)
759 return dcp_sha_finup(req); 759 return dcp_sha_finup(req);
760} 760}
761 761
762static int dcp_sha_noimport(struct ahash_request *req, const void *in)
763{
764 return -ENOSYS;
765}
766
767static int dcp_sha_noexport(struct ahash_request *req, void *out)
768{
769 return -ENOSYS;
770}
771
762static int dcp_sha_cra_init(struct crypto_tfm *tfm) 772static int dcp_sha_cra_init(struct crypto_tfm *tfm)
763{ 773{
764 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 774 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -829,6 +839,8 @@ static struct ahash_alg dcp_sha1_alg = {
829 .final = dcp_sha_final, 839 .final = dcp_sha_final,
830 .finup = dcp_sha_finup, 840 .finup = dcp_sha_finup,
831 .digest = dcp_sha_digest, 841 .digest = dcp_sha_digest,
842 .import = dcp_sha_noimport,
843 .export = dcp_sha_noexport,
832 .halg = { 844 .halg = {
833 .digestsize = SHA1_DIGEST_SIZE, 845 .digestsize = SHA1_DIGEST_SIZE,
834 .base = { 846 .base = {
@@ -853,6 +865,8 @@ static struct ahash_alg dcp_sha256_alg = {
853 .final = dcp_sha_final, 865 .final = dcp_sha_final,
854 .finup = dcp_sha_finup, 866 .finup = dcp_sha_finup,
855 .digest = dcp_sha_digest, 867 .digest = dcp_sha_digest,
868 .import = dcp_sha_noimport,
869 .export = dcp_sha_noexport,
856 .halg = { 870 .halg = {
857 .digestsize = SHA256_DIGEST_SIZE, 871 .digestsize = SHA256_DIGEST_SIZE,
858 .base = { 872 .base = {
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 662e709812cc..80e9c842aad4 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -359,6 +359,16 @@ static int n2_hash_async_finup(struct ahash_request *req)
359 return crypto_ahash_finup(&rctx->fallback_req); 359 return crypto_ahash_finup(&rctx->fallback_req);
360} 360}
361 361
362static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
363{
364 return -ENOSYS;
365}
366
367static int n2_hash_async_noexport(struct ahash_request *req, void *out)
368{
369 return -ENOSYS;
370}
371
362static int n2_hash_cra_init(struct crypto_tfm *tfm) 372static int n2_hash_cra_init(struct crypto_tfm *tfm)
363{ 373{
364 const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 374 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
@@ -1467,6 +1477,8 @@ static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1467 ahash->final = n2_hash_async_final; 1477 ahash->final = n2_hash_async_final;
1468 ahash->finup = n2_hash_async_finup; 1478 ahash->finup = n2_hash_async_finup;
1469 ahash->digest = n2_hash_async_digest; 1479 ahash->digest = n2_hash_async_digest;
1480 ahash->export = n2_hash_async_noexport;
1481 ahash->import = n2_hash_async_noimport;
1470 1482
1471 halg = &ahash->halg; 1483 halg = &ahash->halg;
1472 halg->digestsize = tmpl->digest_size; 1484 halg->digestsize = tmpl->digest_size;
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index bf52cd1d7fca..66869976cfa2 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -1105,10 +1105,9 @@ static int __init nx842_pseries_init(void)
1105 1105
1106 RCU_INIT_POINTER(devdata, NULL); 1106 RCU_INIT_POINTER(devdata, NULL);
1107 new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); 1107 new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1108 if (!new_devdata) { 1108 if (!new_devdata)
1109 pr_err("Could not allocate memory for device data\n");
1110 return -ENOMEM; 1109 return -ENOMEM;
1111 } 1110
1112 RCU_INIT_POINTER(devdata, new_devdata); 1111 RCU_INIT_POINTER(devdata, new_devdata);
1113 1112
1114 ret = vio_register_driver(&nx842_vio_driver); 1113 ret = vio_register_driver(&nx842_vio_driver);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index fbec0a2e76dd..9019f6b67986 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -47,6 +47,8 @@
47static LIST_HEAD(dev_list); 47static LIST_HEAD(dev_list);
48static DEFINE_SPINLOCK(list_lock); 48static DEFINE_SPINLOCK(list_lock);
49 49
50static int aes_fallback_sz = 200;
51
50#ifdef DEBUG 52#ifdef DEBUG
51#define omap_aes_read(dd, offset) \ 53#define omap_aes_read(dd, offset) \
52({ \ 54({ \
@@ -388,7 +390,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
388 390
389 pr_debug("err: %d\n", err); 391 pr_debug("err: %d\n", err);
390 392
391 crypto_finalize_cipher_request(dd->engine, req, err); 393 crypto_finalize_ablkcipher_request(dd->engine, req, err);
392 394
393 pm_runtime_mark_last_busy(dd->dev); 395 pm_runtime_mark_last_busy(dd->dev);
394 pm_runtime_put_autosuspend(dd->dev); 396 pm_runtime_put_autosuspend(dd->dev);
@@ -408,14 +410,15 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
408 struct ablkcipher_request *req) 410 struct ablkcipher_request *req)
409{ 411{
410 if (req) 412 if (req)
411 return crypto_transfer_cipher_request_to_engine(dd->engine, req); 413 return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
412 414
413 return 0; 415 return 0;
414} 416}
415 417
416static int omap_aes_prepare_req(struct crypto_engine *engine, 418static int omap_aes_prepare_req(struct crypto_engine *engine,
417 struct ablkcipher_request *req) 419 void *areq)
418{ 420{
421 struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
419 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( 422 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
420 crypto_ablkcipher_reqtfm(req)); 423 crypto_ablkcipher_reqtfm(req));
421 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); 424 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
@@ -468,8 +471,9 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
468} 471}
469 472
470static int omap_aes_crypt_req(struct crypto_engine *engine, 473static int omap_aes_crypt_req(struct crypto_engine *engine,
471 struct ablkcipher_request *req) 474 void *areq)
472{ 475{
476 struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
473 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); 477 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
474 struct omap_aes_dev *dd = rctx->dd; 478 struct omap_aes_dev *dd = rctx->dd;
475 479
@@ -517,7 +521,7 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
517 !!(mode & FLAGS_ENCRYPT), 521 !!(mode & FLAGS_ENCRYPT),
518 !!(mode & FLAGS_CBC)); 522 !!(mode & FLAGS_CBC));
519 523
520 if (req->nbytes < 200) { 524 if (req->nbytes < aes_fallback_sz) {
521 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 525 SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
522 526
523 skcipher_request_set_tfm(subreq, ctx->fallback); 527 skcipher_request_set_tfm(subreq, ctx->fallback);
@@ -601,6 +605,11 @@ static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
601 return omap_aes_crypt(req, FLAGS_CTR); 605 return omap_aes_crypt(req, FLAGS_CTR);
602} 606}
603 607
608static int omap_aes_prepare_req(struct crypto_engine *engine,
609 void *req);
610static int omap_aes_crypt_req(struct crypto_engine *engine,
611 void *req);
612
604static int omap_aes_cra_init(struct crypto_tfm *tfm) 613static int omap_aes_cra_init(struct crypto_tfm *tfm)
605{ 614{
606 const char *name = crypto_tfm_alg_name(tfm); 615 const char *name = crypto_tfm_alg_name(tfm);
@@ -616,6 +625,10 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm)
616 625
617 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); 626 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
618 627
628 ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
629 ctx->enginectx.op.unprepare_request = NULL;
630 ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
631
619 return 0; 632 return 0;
620} 633}
621 634
@@ -1029,6 +1042,87 @@ err:
1029 return err; 1042 return err;
1030} 1043}
1031 1044
1045static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1046 char *buf)
1047{
1048 return sprintf(buf, "%d\n", aes_fallback_sz);
1049}
1050
1051static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
1052 const char *buf, size_t size)
1053{
1054 ssize_t status;
1055 long value;
1056
1057 status = kstrtol(buf, 0, &value);
1058 if (status)
1059 return status;
1060
1061 /* HW accelerator only works with buffers > 9 */
1062 if (value < 9) {
1063 dev_err(dev, "minimum fallback size 9\n");
1064 return -EINVAL;
1065 }
1066
1067 aes_fallback_sz = value;
1068
1069 return size;
1070}
1071
1072static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
1073 char *buf)
1074{
1075 struct omap_aes_dev *dd = dev_get_drvdata(dev);
1076
1077 return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
1078}
1079
1080static ssize_t queue_len_store(struct device *dev,
1081 struct device_attribute *attr, const char *buf,
1082 size_t size)
1083{
1084 struct omap_aes_dev *dd;
1085 ssize_t status;
1086 long value;
1087 unsigned long flags;
1088
1089 status = kstrtol(buf, 0, &value);
1090 if (status)
1091 return status;
1092
1093 if (value < 1)
1094 return -EINVAL;
1095
1096 /*
1097 * Changing the queue size in fly is safe, if size becomes smaller
1098 * than current size, it will just not accept new entries until
1099 * it has shrank enough.
1100 */
1101 spin_lock_bh(&list_lock);
1102 list_for_each_entry(dd, &dev_list, list) {
1103 spin_lock_irqsave(&dd->lock, flags);
1104 dd->engine->queue.max_qlen = value;
1105 dd->aead_queue.base.max_qlen = value;
1106 spin_unlock_irqrestore(&dd->lock, flags);
1107 }
1108 spin_unlock_bh(&list_lock);
1109
1110 return size;
1111}
1112
1113static DEVICE_ATTR_RW(queue_len);
1114static DEVICE_ATTR_RW(fallback);
1115
1116static struct attribute *omap_aes_attrs[] = {
1117 &dev_attr_queue_len.attr,
1118 &dev_attr_fallback.attr,
1119 NULL,
1120};
1121
1122static struct attribute_group omap_aes_attr_group = {
1123 .attrs = omap_aes_attrs,
1124};
1125
1032static int omap_aes_probe(struct platform_device *pdev) 1126static int omap_aes_probe(struct platform_device *pdev)
1033{ 1127{
1034 struct device *dev = &pdev->dev; 1128 struct device *dev = &pdev->dev;
@@ -1119,8 +1213,6 @@ static int omap_aes_probe(struct platform_device *pdev)
1119 goto err_engine; 1213 goto err_engine;
1120 } 1214 }
1121 1215
1122 dd->engine->prepare_cipher_request = omap_aes_prepare_req;
1123 dd->engine->cipher_one_request = omap_aes_crypt_req;
1124 err = crypto_engine_start(dd->engine); 1216 err = crypto_engine_start(dd->engine);
1125 if (err) 1217 if (err)
1126 goto err_engine; 1218 goto err_engine;
@@ -1159,6 +1251,12 @@ static int omap_aes_probe(struct platform_device *pdev)
1159 } 1251 }
1160 } 1252 }
1161 1253
1254 err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
1255 if (err) {
1256 dev_err(dev, "could not create sysfs device attrs\n");
1257 goto err_aead_algs;
1258 }
1259
1162 return 0; 1260 return 0;
1163err_aead_algs: 1261err_aead_algs:
1164 for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { 1262 for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 8906342e2b9a..fc3b46a85809 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -13,6 +13,8 @@
13#ifndef __OMAP_AES_H__ 13#ifndef __OMAP_AES_H__
14#define __OMAP_AES_H__ 14#define __OMAP_AES_H__
15 15
16#include <crypto/engine.h>
17
16#define DST_MAXBURST 4 18#define DST_MAXBURST 4
17#define DMA_MIN (DST_MAXBURST * sizeof(u32)) 19#define DMA_MIN (DST_MAXBURST * sizeof(u32))
18 20
@@ -95,6 +97,7 @@ struct omap_aes_gcm_result {
95}; 97};
96 98
97struct omap_aes_ctx { 99struct omap_aes_ctx {
100 struct crypto_engine_ctx enginectx;
98 int keylen; 101 int keylen;
99 u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 102 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
100 u8 nonce[4]; 103 u8 nonce[4];
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index 23e37779317e..2c42e4b4a6e9 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -104,6 +104,10 @@ static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs,
104 return OMAP_CRYPTO_NOT_ALIGNED; 104 return OMAP_CRYPTO_NOT_ALIGNED;
105 if (!IS_ALIGNED(sg->length, bs)) 105 if (!IS_ALIGNED(sg->length, bs))
106 return OMAP_CRYPTO_NOT_ALIGNED; 106 return OMAP_CRYPTO_NOT_ALIGNED;
107#ifdef CONFIG_ZONE_DMA
108 if (page_zonenum(sg_page(sg)) != ZONE_DMA)
109 return OMAP_CRYPTO_NOT_ALIGNED;
110#endif
107 111
108 len += sg->length; 112 len += sg->length;
109 sg = sg_next(sg); 113 sg = sg_next(sg);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index ebc5c0f11f03..eb95b0d7f184 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -86,6 +86,7 @@
86#define FLAGS_OUT_DATA_ST_SHIFT 10 86#define FLAGS_OUT_DATA_ST_SHIFT 10
87 87
88struct omap_des_ctx { 88struct omap_des_ctx {
89 struct crypto_engine_ctx enginectx;
89 struct omap_des_dev *dd; 90 struct omap_des_dev *dd;
90 91
91 int keylen; 92 int keylen;
@@ -498,7 +499,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
498 499
499 pr_debug("err: %d\n", err); 500 pr_debug("err: %d\n", err);
500 501
501 crypto_finalize_cipher_request(dd->engine, req, err); 502 crypto_finalize_ablkcipher_request(dd->engine, req, err);
502 503
503 pm_runtime_mark_last_busy(dd->dev); 504 pm_runtime_mark_last_busy(dd->dev);
504 pm_runtime_put_autosuspend(dd->dev); 505 pm_runtime_put_autosuspend(dd->dev);
@@ -520,14 +521,15 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
520 struct ablkcipher_request *req) 521 struct ablkcipher_request *req)
521{ 522{
522 if (req) 523 if (req)
523 return crypto_transfer_cipher_request_to_engine(dd->engine, req); 524 return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
524 525
525 return 0; 526 return 0;
526} 527}
527 528
528static int omap_des_prepare_req(struct crypto_engine *engine, 529static int omap_des_prepare_req(struct crypto_engine *engine,
529 struct ablkcipher_request *req) 530 void *areq)
530{ 531{
532 struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
531 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx( 533 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
532 crypto_ablkcipher_reqtfm(req)); 534 crypto_ablkcipher_reqtfm(req));
533 struct omap_des_dev *dd = omap_des_find_dev(ctx); 535 struct omap_des_dev *dd = omap_des_find_dev(ctx);
@@ -582,8 +584,9 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
582} 584}
583 585
584static int omap_des_crypt_req(struct crypto_engine *engine, 586static int omap_des_crypt_req(struct crypto_engine *engine,
585 struct ablkcipher_request *req) 587 void *areq)
586{ 588{
589 struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
587 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx( 590 struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
588 crypto_ablkcipher_reqtfm(req)); 591 crypto_ablkcipher_reqtfm(req));
589 struct omap_des_dev *dd = omap_des_find_dev(ctx); 592 struct omap_des_dev *dd = omap_des_find_dev(ctx);
@@ -695,12 +698,23 @@ static int omap_des_cbc_decrypt(struct ablkcipher_request *req)
695 return omap_des_crypt(req, FLAGS_CBC); 698 return omap_des_crypt(req, FLAGS_CBC);
696} 699}
697 700
701static int omap_des_prepare_req(struct crypto_engine *engine,
702 void *areq);
703static int omap_des_crypt_req(struct crypto_engine *engine,
704 void *areq);
705
698static int omap_des_cra_init(struct crypto_tfm *tfm) 706static int omap_des_cra_init(struct crypto_tfm *tfm)
699{ 707{
708 struct omap_des_ctx *ctx = crypto_tfm_ctx(tfm);
709
700 pr_debug("enter\n"); 710 pr_debug("enter\n");
701 711
702 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx); 712 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_des_reqctx);
703 713
714 ctx->enginectx.op.prepare_request = omap_des_prepare_req;
715 ctx->enginectx.op.unprepare_request = NULL;
716 ctx->enginectx.op.do_one_request = omap_des_crypt_req;
717
704 return 0; 718 return 0;
705} 719}
706 720
@@ -1046,8 +1060,6 @@ static int omap_des_probe(struct platform_device *pdev)
1046 goto err_engine; 1060 goto err_engine;
1047 } 1061 }
1048 1062
1049 dd->engine->prepare_cipher_request = omap_des_prepare_req;
1050 dd->engine->cipher_one_request = omap_des_crypt_req;
1051 err = crypto_engine_start(dd->engine); 1063 err = crypto_engine_start(dd->engine);
1052 if (err) 1064 if (err)
1053 goto err_engine; 1065 goto err_engine;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 86b89ace836f..ad02aa63b519 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -229,6 +229,7 @@ struct omap_sham_dev {
229 u8 xmit_buf[BUFLEN] OMAP_ALIGNED; 229 u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
230 230
231 unsigned long flags; 231 unsigned long flags;
232 int fallback_sz;
232 struct crypto_queue queue; 233 struct crypto_queue queue;
233 struct ahash_request *req; 234 struct ahash_request *req;
234 235
@@ -759,6 +760,13 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
759 while (nbytes > 0 && sg_tmp) { 760 while (nbytes > 0 && sg_tmp) {
760 n++; 761 n++;
761 762
763#ifdef CONFIG_ZONE_DMA
764 if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
765 aligned = false;
766 break;
767 }
768#endif
769
762 if (offset < sg_tmp->length) { 770 if (offset < sg_tmp->length) {
763 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) { 771 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
764 aligned = false; 772 aligned = false;
@@ -809,9 +817,6 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
809 bool final = rctx->flags & BIT(FLAGS_FINUP); 817 bool final = rctx->flags & BIT(FLAGS_FINUP);
810 int xmit_len, hash_later; 818 int xmit_len, hash_later;
811 819
812 if (!req)
813 return 0;
814
815 bs = get_block_size(rctx); 820 bs = get_block_size(rctx);
816 821
817 if (update) 822 if (update)
@@ -1002,7 +1007,7 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
1002 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); 1007 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
1003 1008
1004 if (ctx->total < get_block_size(ctx) || 1009 if (ctx->total < get_block_size(ctx) ||
1005 ctx->total < OMAP_SHA_DMA_THRESHOLD) 1010 ctx->total < dd->fallback_sz)
1006 ctx->flags |= BIT(FLAGS_CPU); 1011 ctx->flags |= BIT(FLAGS_CPU);
1007 1012
1008 if (ctx->flags & BIT(FLAGS_CPU)) 1013 if (ctx->flags & BIT(FLAGS_CPU))
@@ -1258,11 +1263,11 @@ static int omap_sham_final(struct ahash_request *req)
1258 /* 1263 /*
1259 * OMAP HW accel works only with buffers >= 9. 1264 * OMAP HW accel works only with buffers >= 9.
1260 * HMAC is always >= 9 because ipad == block size. 1265 * HMAC is always >= 9 because ipad == block size.
1261 * If buffersize is less than DMA_THRESHOLD, we use fallback 1266 * If buffersize is less than fallback_sz, we use fallback
1262 * SW encoding, as using DMA + HW in this case doesn't provide 1267 * SW encoding, as using DMA + HW in this case doesn't provide
1263 * any benefit. 1268 * any benefit.
1264 */ 1269 */
1265 if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD) 1270 if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1266 return omap_sham_final_shash(req); 1271 return omap_sham_final_shash(req);
1267 else if (ctx->bufcnt) 1272 else if (ctx->bufcnt)
1268 return omap_sham_enqueue(req, OP_FINAL); 1273 return omap_sham_enqueue(req, OP_FINAL);
@@ -1761,7 +1766,7 @@ static void omap_sham_done_task(unsigned long data)
1761 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { 1766 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1762 /* hash or semi-hash ready */ 1767 /* hash or semi-hash ready */
1763 clear_bit(FLAGS_DMA_READY, &dd->flags); 1768 clear_bit(FLAGS_DMA_READY, &dd->flags);
1764 goto finish; 1769 goto finish;
1765 } 1770 }
1766 } 1771 }
1767 1772
@@ -2013,6 +2018,85 @@ err:
2013 return err; 2018 return err;
2014} 2019}
2015 2020
2021static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
2022 char *buf)
2023{
2024 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2025
2026 return sprintf(buf, "%d\n", dd->fallback_sz);
2027}
2028
2029static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
2030 const char *buf, size_t size)
2031{
2032 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2033 ssize_t status;
2034 long value;
2035
2036 status = kstrtol(buf, 0, &value);
2037 if (status)
2038 return status;
2039
2040 /* HW accelerator only works with buffers > 9 */
2041 if (value < 9) {
2042 dev_err(dev, "minimum fallback size 9\n");
2043 return -EINVAL;
2044 }
2045
2046 dd->fallback_sz = value;
2047
2048 return size;
2049}
2050
2051static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2052 char *buf)
2053{
2054 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2055
2056 return sprintf(buf, "%d\n", dd->queue.max_qlen);
2057}
2058
2059static ssize_t queue_len_store(struct device *dev,
2060 struct device_attribute *attr, const char *buf,
2061 size_t size)
2062{
2063 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2064 ssize_t status;
2065 long value;
2066 unsigned long flags;
2067
2068 status = kstrtol(buf, 0, &value);
2069 if (status)
2070 return status;
2071
2072 if (value < 1)
2073 return -EINVAL;
2074
2075 /*
2076 * Changing the queue size in fly is safe, if size becomes smaller
2077 * than current size, it will just not accept new entries until
2078 * it has shrank enough.
2079 */
2080 spin_lock_irqsave(&dd->lock, flags);
2081 dd->queue.max_qlen = value;
2082 spin_unlock_irqrestore(&dd->lock, flags);
2083
2084 return size;
2085}
2086
2087static DEVICE_ATTR_RW(queue_len);
2088static DEVICE_ATTR_RW(fallback);
2089
2090static struct attribute *omap_sham_attrs[] = {
2091 &dev_attr_queue_len.attr,
2092 &dev_attr_fallback.attr,
2093 NULL,
2094};
2095
2096static struct attribute_group omap_sham_attr_group = {
2097 .attrs = omap_sham_attrs,
2098};
2099
2016static int omap_sham_probe(struct platform_device *pdev) 2100static int omap_sham_probe(struct platform_device *pdev)
2017{ 2101{
2018 struct omap_sham_dev *dd; 2102 struct omap_sham_dev *dd;
@@ -2074,6 +2158,8 @@ static int omap_sham_probe(struct platform_device *pdev)
2074 pm_runtime_use_autosuspend(dev); 2158 pm_runtime_use_autosuspend(dev);
2075 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); 2159 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2076 2160
2161 dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2162
2077 pm_runtime_enable(dev); 2163 pm_runtime_enable(dev);
2078 pm_runtime_irq_safe(dev); 2164 pm_runtime_irq_safe(dev);
2079 2165
@@ -2111,6 +2197,12 @@ static int omap_sham_probe(struct platform_device *pdev)
2111 } 2197 }
2112 } 2198 }
2113 2199
2200 err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2201 if (err) {
2202 dev_err(dev, "could not create sysfs device attrs\n");
2203 goto err_algs;
2204 }
2205
2114 return 0; 2206 return 0;
2115 2207
2116err_algs: 2208err_algs:
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 4ef52c9d72fc..a4df966adbf6 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -499,10 +499,12 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
499 memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); 499 memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
500 ctx->hash_key_len = keys.authkeylen; 500 ctx->hash_key_len = keys.authkeylen;
501 501
502 memzero_explicit(&keys, sizeof(keys));
502 return 0; 503 return 0;
503 504
504badkey: 505badkey:
505 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 506 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
507 memzero_explicit(&keys, sizeof(keys));
506 return -EINVAL; 508 return -EINVAL;
507} 509}
508 510
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index baffae817259..1138e41d6805 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -546,11 +546,14 @@ static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
546 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode)) 546 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
547 goto error; 547 goto error;
548 548
549 memzero_explicit(&keys, sizeof(keys));
549 return 0; 550 return 0;
550bad_key: 551bad_key:
551 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 552 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
553 memzero_explicit(&keys, sizeof(keys));
552 return -EINVAL; 554 return -EINVAL;
553error: 555error:
556 memzero_explicit(&keys, sizeof(keys));
554 return -EFAULT; 557 return -EFAULT;
555} 558}
556 559
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 13c52d6bf630..320e7854b4ee 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -969,7 +969,8 @@ unmap_src:
969 return ret; 969 return ret;
970} 970}
971 971
972int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) 972static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
973 size_t vlen)
973{ 974{
974 struct qat_crypto_instance *inst = ctx->inst; 975 struct qat_crypto_instance *inst = ctx->inst;
975 struct device *dev = &GET_DEV(inst->accel_dev); 976 struct device *dev = &GET_DEV(inst->accel_dev);
@@ -1000,7 +1001,8 @@ err:
1000 return ret; 1001 return ret;
1001} 1002}
1002 1003
1003int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) 1004static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
1005 size_t vlen)
1004{ 1006{
1005 struct qat_crypto_instance *inst = ctx->inst; 1007 struct qat_crypto_instance *inst = ctx->inst;
1006 struct device *dev = &GET_DEV(inst->accel_dev); 1008 struct device *dev = &GET_DEV(inst->accel_dev);
@@ -1024,7 +1026,8 @@ int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
1024 return 0; 1026 return 0;
1025} 1027}
1026 1028
1027int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) 1029static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
1030 size_t vlen)
1028{ 1031{
1029 struct qat_crypto_instance *inst = ctx->inst; 1032 struct qat_crypto_instance *inst = ctx->inst;
1030 struct device *dev = &GET_DEV(inst->accel_dev); 1033 struct device *dev = &GET_DEV(inst->accel_dev);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 5d64c08b7f47..bf7163042569 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -404,29 +404,31 @@ static const struct of_device_id s5p_sss_dt_match[] = {
404}; 404};
405MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); 405MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
406 406
407static inline struct samsung_aes_variant *find_s5p_sss_version 407static inline const struct samsung_aes_variant *find_s5p_sss_version
408 (struct platform_device *pdev) 408 (const struct platform_device *pdev)
409{ 409{
410 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) { 410 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
411 const struct of_device_id *match; 411 const struct of_device_id *match;
412 412
413 match = of_match_node(s5p_sss_dt_match, 413 match = of_match_node(s5p_sss_dt_match,
414 pdev->dev.of_node); 414 pdev->dev.of_node);
415 return (struct samsung_aes_variant *)match->data; 415 return (const struct samsung_aes_variant *)match->data;
416 } 416 }
417 return (struct samsung_aes_variant *) 417 return (const struct samsung_aes_variant *)
418 platform_get_device_id(pdev)->driver_data; 418 platform_get_device_id(pdev)->driver_data;
419} 419}
420 420
421static struct s5p_aes_dev *s5p_dev; 421static struct s5p_aes_dev *s5p_dev;
422 422
423static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 423static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
424 const struct scatterlist *sg)
424{ 425{
425 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); 426 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
426 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg)); 427 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
427} 428}
428 429
429static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) 430static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
431 const struct scatterlist *sg)
430{ 432{
431 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg)); 433 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
432 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg)); 434 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
@@ -619,7 +621,7 @@ static inline void s5p_hash_write(struct s5p_aes_dev *dd,
619 * @sg: scatterlist ready to DMA transmit 621 * @sg: scatterlist ready to DMA transmit
620 */ 622 */
621static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev, 623static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
622 struct scatterlist *sg) 624 const struct scatterlist *sg)
623{ 625{
624 dev->hash_sg_cnt--; 626 dev->hash_sg_cnt--;
625 SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg)); 627 SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
@@ -792,9 +794,9 @@ static void s5p_hash_read_msg(struct ahash_request *req)
792 * @ctx: request context 794 * @ctx: request context
793 */ 795 */
794static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd, 796static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
795 struct s5p_hash_reqctx *ctx) 797 const struct s5p_hash_reqctx *ctx)
796{ 798{
797 u32 *hash = (u32 *)ctx->digest; 799 const u32 *hash = (const u32 *)ctx->digest;
798 unsigned int i; 800 unsigned int i;
799 801
800 for (i = 0; i < ctx->nregs; i++) 802 for (i = 0; i < ctx->nregs; i++)
@@ -818,7 +820,7 @@ static void s5p_hash_write_iv(struct ahash_request *req)
818 */ 820 */
819static void s5p_hash_copy_result(struct ahash_request *req) 821static void s5p_hash_copy_result(struct ahash_request *req)
820{ 822{
821 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 823 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
822 824
823 if (!req->result) 825 if (!req->result)
824 return; 826 return;
@@ -1210,9 +1212,6 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
1210 int xmit_len, hash_later, nbytes; 1212 int xmit_len, hash_later, nbytes;
1211 int ret; 1213 int ret;
1212 1214
1213 if (!req)
1214 return 0;
1215
1216 if (update) 1215 if (update)
1217 nbytes = req->nbytes; 1216 nbytes = req->nbytes;
1218 else 1217 else
@@ -1293,7 +1292,7 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
1293 */ 1292 */
1294static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd) 1293static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
1295{ 1294{
1296 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); 1295 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
1297 1296
1298 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); 1297 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1299 clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags); 1298 clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
@@ -1720,7 +1719,7 @@ static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
1720 */ 1719 */
1721static int s5p_hash_export(struct ahash_request *req, void *out) 1720static int s5p_hash_export(struct ahash_request *req, void *out)
1722{ 1721{
1723 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1722 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1724 1723
1725 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt); 1724 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1726 1725
@@ -1834,7 +1833,8 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
1834}; 1833};
1835 1834
1836static void s5p_set_aes(struct s5p_aes_dev *dev, 1835static void s5p_set_aes(struct s5p_aes_dev *dev,
1837 uint8_t *key, uint8_t *iv, unsigned int keylen) 1836 const uint8_t *key, const uint8_t *iv,
1837 unsigned int keylen)
1838{ 1838{
1839 void __iomem *keystart; 1839 void __iomem *keystart;
1840 1840
@@ -2153,7 +2153,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
2153{ 2153{
2154 struct device *dev = &pdev->dev; 2154 struct device *dev = &pdev->dev;
2155 int i, j, err = -ENODEV; 2155 int i, j, err = -ENODEV;
2156 struct samsung_aes_variant *variant; 2156 const struct samsung_aes_variant *variant;
2157 struct s5p_aes_dev *pdata; 2157 struct s5p_aes_dev *pdata;
2158 struct resource *res; 2158 struct resource *res;
2159 unsigned int hash_i; 2159 unsigned int hash_i;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 08e7bdcaa6e3..0f2245e1af2b 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1397,11 +1397,9 @@ static int sahara_probe(struct platform_device *pdev)
1397 int err; 1397 int err;
1398 int i; 1398 int i;
1399 1399
1400 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL); 1400 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1401 if (dev == NULL) { 1401 if (!dev)
1402 dev_err(&pdev->dev, "unable to alloc data struct.\n");
1403 return -ENOMEM; 1402 return -ENOMEM;
1404 }
1405 1403
1406 dev->device = &pdev->dev; 1404 dev->device = &pdev->dev;
1407 platform_set_drvdata(pdev, dev); 1405 platform_set_drvdata(pdev, dev);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 4a06a7a665ee..c5d3efc54a4f 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -17,6 +17,7 @@
17#include <crypto/des.h> 17#include <crypto/des.h>
18#include <crypto/engine.h> 18#include <crypto/engine.h>
19#include <crypto/scatterwalk.h> 19#include <crypto/scatterwalk.h>
20#include <crypto/internal/aead.h>
20 21
21#define DRIVER_NAME "stm32-cryp" 22#define DRIVER_NAME "stm32-cryp"
22 23
@@ -29,8 +30,12 @@
29#define FLG_ECB BIT(4) 30#define FLG_ECB BIT(4)
30#define FLG_CBC BIT(5) 31#define FLG_CBC BIT(5)
31#define FLG_CTR BIT(6) 32#define FLG_CTR BIT(6)
33#define FLG_GCM BIT(7)
34#define FLG_CCM BIT(8)
32/* Mode mask = bits [15..0] */ 35/* Mode mask = bits [15..0] */
33#define FLG_MODE_MASK GENMASK(15, 0) 36#define FLG_MODE_MASK GENMASK(15, 0)
37/* Bit [31..16] status */
38#define FLG_CCM_PADDED_WA BIT(16)
34 39
35/* Registers */ 40/* Registers */
36#define CRYP_CR 0x00000000 41#define CRYP_CR 0x00000000
@@ -53,6 +58,8 @@
53#define CRYP_IV0RR 0x00000044 58#define CRYP_IV0RR 0x00000044
54#define CRYP_IV1LR 0x00000048 59#define CRYP_IV1LR 0x00000048
55#define CRYP_IV1RR 0x0000004C 60#define CRYP_IV1RR 0x0000004C
61#define CRYP_CSGCMCCM0R 0x00000050
62#define CRYP_CSGCM0R 0x00000070
56 63
57/* Registers values */ 64/* Registers values */
58#define CR_DEC_NOT_ENC 0x00000004 65#define CR_DEC_NOT_ENC 0x00000004
@@ -64,6 +71,8 @@
64#define CR_AES_CBC 0x00000028 71#define CR_AES_CBC 0x00000028
65#define CR_AES_CTR 0x00000030 72#define CR_AES_CTR 0x00000030
66#define CR_AES_KP 0x00000038 73#define CR_AES_KP 0x00000038
74#define CR_AES_GCM 0x00080000
75#define CR_AES_CCM 0x00080008
67#define CR_AES_UNKNOWN 0xFFFFFFFF 76#define CR_AES_UNKNOWN 0xFFFFFFFF
68#define CR_ALGO_MASK 0x00080038 77#define CR_ALGO_MASK 0x00080038
69#define CR_DATA32 0x00000000 78#define CR_DATA32 0x00000000
@@ -75,6 +84,12 @@
75#define CR_KEY256 0x00000200 84#define CR_KEY256 0x00000200
76#define CR_FFLUSH 0x00004000 85#define CR_FFLUSH 0x00004000
77#define CR_CRYPEN 0x00008000 86#define CR_CRYPEN 0x00008000
87#define CR_PH_INIT 0x00000000
88#define CR_PH_HEADER 0x00010000
89#define CR_PH_PAYLOAD 0x00020000
90#define CR_PH_FINAL 0x00030000
91#define CR_PH_MASK 0x00030000
92#define CR_NBPBL_SHIFT 20
78 93
79#define SR_BUSY 0x00000010 94#define SR_BUSY 0x00000010
80#define SR_OFNE 0x00000004 95#define SR_OFNE 0x00000004
@@ -87,10 +102,17 @@
87 102
88/* Misc */ 103/* Misc */
89#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) 104#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
105#define GCM_CTR_INIT 2
90#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset) 106#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
91#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset) 107#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
92 108
109struct stm32_cryp_caps {
110 bool swap_final;
111 bool padding_wa;
112};
113
93struct stm32_cryp_ctx { 114struct stm32_cryp_ctx {
115 struct crypto_engine_ctx enginectx;
94 struct stm32_cryp *cryp; 116 struct stm32_cryp *cryp;
95 int keylen; 117 int keylen;
96 u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 118 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
@@ -108,13 +130,16 @@ struct stm32_cryp {
108 struct clk *clk; 130 struct clk *clk;
109 unsigned long flags; 131 unsigned long flags;
110 u32 irq_status; 132 u32 irq_status;
133 const struct stm32_cryp_caps *caps;
111 struct stm32_cryp_ctx *ctx; 134 struct stm32_cryp_ctx *ctx;
112 135
113 struct crypto_engine *engine; 136 struct crypto_engine *engine;
114 137
115 struct mutex lock; /* protects req */ 138 struct mutex lock; /* protects req / areq */
116 struct ablkcipher_request *req; 139 struct ablkcipher_request *req;
140 struct aead_request *areq;
117 141
142 size_t authsize;
118 size_t hw_blocksize; 143 size_t hw_blocksize;
119 144
120 size_t total_in; 145 size_t total_in;
@@ -137,6 +162,7 @@ struct stm32_cryp {
137 struct scatter_walk out_walk; 162 struct scatter_walk out_walk;
138 163
139 u32 last_ctr[4]; 164 u32 last_ctr[4];
165 u32 gcm_ctr;
140}; 166};
141 167
142struct stm32_cryp_list { 168struct stm32_cryp_list {
@@ -179,6 +205,16 @@ static inline bool is_ctr(struct stm32_cryp *cryp)
179 return cryp->flags & FLG_CTR; 205 return cryp->flags & FLG_CTR;
180} 206}
181 207
208static inline bool is_gcm(struct stm32_cryp *cryp)
209{
210 return cryp->flags & FLG_GCM;
211}
212
213static inline bool is_ccm(struct stm32_cryp *cryp)
214{
215 return cryp->flags & FLG_CCM;
216}
217
182static inline bool is_encrypt(struct stm32_cryp *cryp) 218static inline bool is_encrypt(struct stm32_cryp *cryp)
183{ 219{
184 return cryp->flags & FLG_ENCRYPT; 220 return cryp->flags & FLG_ENCRYPT;
@@ -207,6 +243,24 @@ static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
207 !(status & SR_BUSY), 10, 100000); 243 !(status & SR_BUSY), 10, 100000);
208} 244}
209 245
246static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp)
247{
248 u32 status;
249
250 return readl_relaxed_poll_timeout(cryp->regs + CRYP_CR, status,
251 !(status & CR_CRYPEN), 10, 100000);
252}
253
254static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp)
255{
256 u32 status;
257
258 return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
259 status & SR_OFNE, 10, 100000);
260}
261
262static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp);
263
210static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) 264static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
211{ 265{
212 struct stm32_cryp *tmp, *cryp = NULL; 266 struct stm32_cryp *tmp, *cryp = NULL;
@@ -365,6 +419,12 @@ static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
365 if (is_aes(cryp) && is_ctr(cryp)) 419 if (is_aes(cryp) && is_ctr(cryp))
366 return CR_AES_CTR; 420 return CR_AES_CTR;
367 421
422 if (is_aes(cryp) && is_gcm(cryp))
423 return CR_AES_GCM;
424
425 if (is_aes(cryp) && is_ccm(cryp))
426 return CR_AES_CCM;
427
368 if (is_des(cryp) && is_ecb(cryp)) 428 if (is_des(cryp) && is_ecb(cryp))
369 return CR_DES_ECB; 429 return CR_DES_ECB;
370 430
@@ -381,6 +441,79 @@ static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
381 return CR_AES_UNKNOWN; 441 return CR_AES_UNKNOWN;
382} 442}
383 443
444static unsigned int stm32_cryp_get_input_text_len(struct stm32_cryp *cryp)
445{
446 return is_encrypt(cryp) ? cryp->areq->cryptlen :
447 cryp->areq->cryptlen - cryp->authsize;
448}
449
450static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg)
451{
452 int ret;
453 u32 iv[4];
454
455 /* Phase 1 : init */
456 memcpy(iv, cryp->areq->iv, 12);
457 iv[3] = cpu_to_be32(GCM_CTR_INIT);
458 cryp->gcm_ctr = GCM_CTR_INIT;
459 stm32_cryp_hw_write_iv(cryp, iv);
460
461 stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
462
463 /* Wait for end of processing */
464 ret = stm32_cryp_wait_enable(cryp);
465 if (ret)
466 dev_err(cryp->dev, "Timeout (gcm init)\n");
467
468 return ret;
469}
470
471static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg)
472{
473 int ret;
474 u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
475 u32 *d;
476 unsigned int i, textlen;
477
478 /* Phase 1 : init. Firstly set the CTR value to 1 (not 0) */
479 memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
480 memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
481 iv[AES_BLOCK_SIZE - 1] = 1;
482 stm32_cryp_hw_write_iv(cryp, (u32 *)iv);
483
484 /* Build B0 */
485 memcpy(b0, iv, AES_BLOCK_SIZE);
486
487 b0[0] |= (8 * ((cryp->authsize - 2) / 2));
488
489 if (cryp->areq->assoclen)
490 b0[0] |= 0x40;
491
492 textlen = stm32_cryp_get_input_text_len(cryp);
493
494 b0[AES_BLOCK_SIZE - 2] = textlen >> 8;
495 b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF;
496
497 /* Enable HW */
498 stm32_cryp_write(cryp, CRYP_CR, cfg | CR_PH_INIT | CR_CRYPEN);
499
500 /* Write B0 */
501 d = (u32 *)b0;
502
503 for (i = 0; i < AES_BLOCK_32; i++) {
504 if (!cryp->caps->padding_wa)
505 *d = cpu_to_be32(*d);
506 stm32_cryp_write(cryp, CRYP_DIN, *d++);
507 }
508
509 /* Wait for end of processing */
510 ret = stm32_cryp_wait_enable(cryp);
511 if (ret)
512 dev_err(cryp->dev, "Timeout (ccm init)\n");
513
514 return ret;
515}
516
384static int stm32_cryp_hw_init(struct stm32_cryp *cryp) 517static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
385{ 518{
386 int ret; 519 int ret;
@@ -436,6 +569,29 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
436 stm32_cryp_write(cryp, CRYP_CR, cfg); 569 stm32_cryp_write(cryp, CRYP_CR, cfg);
437 570
438 switch (hw_mode) { 571 switch (hw_mode) {
572 case CR_AES_GCM:
573 case CR_AES_CCM:
574 /* Phase 1 : init */
575 if (hw_mode == CR_AES_CCM)
576 ret = stm32_cryp_ccm_init(cryp, cfg);
577 else
578 ret = stm32_cryp_gcm_init(cryp, cfg);
579
580 if (ret)
581 return ret;
582
583 /* Phase 2 : header (authenticated data) */
584 if (cryp->areq->assoclen) {
585 cfg |= CR_PH_HEADER;
586 } else if (stm32_cryp_get_input_text_len(cryp)) {
587 cfg |= CR_PH_PAYLOAD;
588 stm32_cryp_write(cryp, CRYP_CR, cfg);
589 } else {
590 cfg |= CR_PH_INIT;
591 }
592
593 break;
594
439 case CR_DES_CBC: 595 case CR_DES_CBC:
440 case CR_TDES_CBC: 596 case CR_TDES_CBC:
441 case CR_AES_CBC: 597 case CR_AES_CBC:
@@ -452,12 +608,16 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
452 608
453 stm32_cryp_write(cryp, CRYP_CR, cfg); 609 stm32_cryp_write(cryp, CRYP_CR, cfg);
454 610
611 cryp->flags &= ~FLG_CCM_PADDED_WA;
612
455 return 0; 613 return 0;
456} 614}
457 615
458static void stm32_cryp_finish_req(struct stm32_cryp *cryp) 616static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
459{ 617{
460 int err = 0; 618 if (!err && (is_gcm(cryp) || is_ccm(cryp)))
619 /* Phase 4 : output tag */
620 err = stm32_cryp_read_auth_tag(cryp);
461 621
462 if (cryp->sgs_copied) { 622 if (cryp->sgs_copied) {
463 void *buf_in, *buf_out; 623 void *buf_in, *buf_out;
@@ -478,8 +638,14 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
478 free_pages((unsigned long)buf_out, pages); 638 free_pages((unsigned long)buf_out, pages);
479 } 639 }
480 640
481 crypto_finalize_cipher_request(cryp->engine, cryp->req, err); 641 if (is_gcm(cryp) || is_ccm(cryp)) {
482 cryp->req = NULL; 642 crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
643 cryp->areq = NULL;
644 } else {
645 crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
646 err);
647 cryp->req = NULL;
648 }
483 649
484 memset(cryp->ctx->key, 0, cryp->ctx->keylen); 650 memset(cryp->ctx->key, 0, cryp->ctx->keylen);
485 651
@@ -494,10 +660,36 @@ static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
494 return 0; 660 return 0;
495} 661}
496 662
663static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
664static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
665 void *areq);
666
497static int stm32_cryp_cra_init(struct crypto_tfm *tfm) 667static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
498{ 668{
669 struct stm32_cryp_ctx *ctx = crypto_tfm_ctx(tfm);
670
499 tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx); 671 tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
500 672
673 ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
674 ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
675 ctx->enginectx.op.unprepare_request = NULL;
676 return 0;
677}
678
679static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
680static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine,
681 void *areq);
682
683static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
684{
685 struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
686
687 tfm->reqsize = sizeof(struct stm32_cryp_reqctx);
688
689 ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req;
690 ctx->enginectx.op.prepare_request = stm32_cryp_prepare_aead_req;
691 ctx->enginectx.op.unprepare_request = NULL;
692
501 return 0; 693 return 0;
502} 694}
503 695
@@ -513,7 +705,21 @@ static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
513 705
514 rctx->mode = mode; 706 rctx->mode = mode;
515 707
516 return crypto_transfer_cipher_request_to_engine(cryp->engine, req); 708 return crypto_transfer_ablkcipher_request_to_engine(cryp->engine, req);
709}
710
711static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode)
712{
713 struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
714 struct stm32_cryp_reqctx *rctx = aead_request_ctx(req);
715 struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
716
717 if (!cryp)
718 return -ENODEV;
719
720 rctx->mode = mode;
721
722 return crypto_transfer_aead_request_to_engine(cryp->engine, req);
517} 723}
518 724
519static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 725static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -555,6 +761,46 @@ static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
555 return stm32_cryp_setkey(tfm, key, keylen); 761 return stm32_cryp_setkey(tfm, key, keylen);
556} 762}
557 763
764static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
765 unsigned int keylen)
766{
767 struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
768
769 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
770 keylen != AES_KEYSIZE_256)
771 return -EINVAL;
772
773 memcpy(ctx->key, key, keylen);
774 ctx->keylen = keylen;
775
776 return 0;
777}
778
779static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm,
780 unsigned int authsize)
781{
782 return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL;
783}
784
785static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm,
786 unsigned int authsize)
787{
788 switch (authsize) {
789 case 4:
790 case 6:
791 case 8:
792 case 10:
793 case 12:
794 case 14:
795 case 16:
796 break;
797 default:
798 return -EINVAL;
799 }
800
801 return 0;
802}
803
558static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req) 804static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
559{ 805{
560 return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT); 806 return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
@@ -585,6 +831,26 @@ static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
585 return stm32_cryp_crypt(req, FLG_AES | FLG_CTR); 831 return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
586} 832}
587 833
834static int stm32_cryp_aes_gcm_encrypt(struct aead_request *req)
835{
836 return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM | FLG_ENCRYPT);
837}
838
839static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req)
840{
841 return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM);
842}
843
844static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req)
845{
846 return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT);
847}
848
849static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req)
850{
851 return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM);
852}
853
588static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req) 854static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
589{ 855{
590 return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT); 856 return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
@@ -625,18 +891,19 @@ static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
625 return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC); 891 return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
626} 892}
627 893
628static int stm32_cryp_prepare_req(struct crypto_engine *engine, 894static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
629 struct ablkcipher_request *req) 895 struct aead_request *areq)
630{ 896{
631 struct stm32_cryp_ctx *ctx; 897 struct stm32_cryp_ctx *ctx;
632 struct stm32_cryp *cryp; 898 struct stm32_cryp *cryp;
633 struct stm32_cryp_reqctx *rctx; 899 struct stm32_cryp_reqctx *rctx;
634 int ret; 900 int ret;
635 901
636 if (!req) 902 if (!req && !areq)
637 return -EINVAL; 903 return -EINVAL;
638 904
639 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 905 ctx = req ? crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)) :
906 crypto_aead_ctx(crypto_aead_reqtfm(areq));
640 907
641 cryp = ctx->cryp; 908 cryp = ctx->cryp;
642 909
@@ -645,7 +912,7 @@ static int stm32_cryp_prepare_req(struct crypto_engine *engine,
645 912
646 mutex_lock(&cryp->lock); 913 mutex_lock(&cryp->lock);
647 914
648 rctx = ablkcipher_request_ctx(req); 915 rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
649 rctx->mode &= FLG_MODE_MASK; 916 rctx->mode &= FLG_MODE_MASK;
650 917
651 ctx->cryp = cryp; 918 ctx->cryp = cryp;
@@ -654,15 +921,48 @@ static int stm32_cryp_prepare_req(struct crypto_engine *engine,
654 cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE; 921 cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
655 cryp->ctx = ctx; 922 cryp->ctx = ctx;
656 923
657 cryp->req = req; 924 if (req) {
658 cryp->total_in = req->nbytes; 925 cryp->req = req;
659 cryp->total_out = cryp->total_in; 926 cryp->total_in = req->nbytes;
927 cryp->total_out = cryp->total_in;
928 } else {
929 /*
930 * Length of input and output data:
931 * Encryption case:
932 * INPUT = AssocData || PlainText
933 * <- assoclen -> <- cryptlen ->
934 * <------- total_in ----------->
935 *
936 * OUTPUT = AssocData || CipherText || AuthTag
937 * <- assoclen -> <- cryptlen -> <- authsize ->
938 * <---------------- total_out ----------------->
939 *
940 * Decryption case:
941 * INPUT = AssocData || CipherText || AuthTag
942 * <- assoclen -> <--------- cryptlen --------->
943 * <- authsize ->
944 * <---------------- total_in ------------------>
945 *
946 * OUTPUT = AssocData || PlainText
947 * <- assoclen -> <- crypten - authsize ->
948 * <---------- total_out ----------------->
949 */
950 cryp->areq = areq;
951 cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
952 cryp->total_in = areq->assoclen + areq->cryptlen;
953 if (is_encrypt(cryp))
954 /* Append auth tag to output */
955 cryp->total_out = cryp->total_in + cryp->authsize;
956 else
957 /* No auth tag in output */
958 cryp->total_out = cryp->total_in - cryp->authsize;
959 }
660 960
661 cryp->total_in_save = cryp->total_in; 961 cryp->total_in_save = cryp->total_in;
662 cryp->total_out_save = cryp->total_out; 962 cryp->total_out_save = cryp->total_out;
663 963
664 cryp->in_sg = req->src; 964 cryp->in_sg = req ? req->src : areq->src;
665 cryp->out_sg = req->dst; 965 cryp->out_sg = req ? req->dst : areq->dst;
666 cryp->out_sg_save = cryp->out_sg; 966 cryp->out_sg_save = cryp->out_sg;
667 967
668 cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in); 968 cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
@@ -686,6 +986,12 @@ static int stm32_cryp_prepare_req(struct crypto_engine *engine,
686 scatterwalk_start(&cryp->in_walk, cryp->in_sg); 986 scatterwalk_start(&cryp->in_walk, cryp->in_sg);
687 scatterwalk_start(&cryp->out_walk, cryp->out_sg); 987 scatterwalk_start(&cryp->out_walk, cryp->out_sg);
688 988
989 if (is_gcm(cryp) || is_ccm(cryp)) {
990 /* In output, jump after assoc data */
991 scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen);
992 cryp->total_out -= cryp->areq->assoclen;
993 }
994
689 ret = stm32_cryp_hw_init(cryp); 995 ret = stm32_cryp_hw_init(cryp);
690out: 996out:
691 if (ret) 997 if (ret)
@@ -695,14 +1001,20 @@ out:
695} 1001}
696 1002
697static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine, 1003static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
698 struct ablkcipher_request *req) 1004 void *areq)
699{ 1005{
700 return stm32_cryp_prepare_req(engine, req); 1006 struct ablkcipher_request *req = container_of(areq,
1007 struct ablkcipher_request,
1008 base);
1009
1010 return stm32_cryp_prepare_req(req, NULL);
701} 1011}
702 1012
703static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, 1013static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
704 struct ablkcipher_request *req)
705{ 1014{
1015 struct ablkcipher_request *req = container_of(areq,
1016 struct ablkcipher_request,
1017 base);
706 struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx( 1018 struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
707 crypto_ablkcipher_reqtfm(req)); 1019 crypto_ablkcipher_reqtfm(req));
708 struct stm32_cryp *cryp = ctx->cryp; 1020 struct stm32_cryp *cryp = ctx->cryp;
@@ -713,6 +1025,34 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
713 return stm32_cryp_cpu_start(cryp); 1025 return stm32_cryp_cpu_start(cryp);
714} 1026}
715 1027
1028static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq)
1029{
1030 struct aead_request *req = container_of(areq, struct aead_request,
1031 base);
1032
1033 return stm32_cryp_prepare_req(NULL, req);
1034}
1035
1036static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
1037{
1038 struct aead_request *req = container_of(areq, struct aead_request,
1039 base);
1040 struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1041 struct stm32_cryp *cryp = ctx->cryp;
1042
1043 if (!cryp)
1044 return -ENODEV;
1045
1046 if (unlikely(!cryp->areq->assoclen &&
1047 !stm32_cryp_get_input_text_len(cryp))) {
1048 /* No input data to process: get tag and finish */
1049 stm32_cryp_finish_req(cryp, 0);
1050 return 0;
1051 }
1052
1053 return stm32_cryp_cpu_start(cryp);
1054}
1055
716static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst, 1056static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
717 unsigned int n) 1057 unsigned int n)
718{ 1058{
@@ -745,6 +1085,111 @@ static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
745 return (u32 *)((u8 *)src + n); 1085 return (u32 *)((u8 *)src + n);
746} 1086}
747 1087
1088static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
1089{
1090 u32 cfg, size_bit, *dst, d32;
1091 u8 *d8;
1092 unsigned int i, j;
1093 int ret = 0;
1094
1095 /* Update Config */
1096 cfg = stm32_cryp_read(cryp, CRYP_CR);
1097
1098 cfg &= ~CR_PH_MASK;
1099 cfg |= CR_PH_FINAL;
1100 cfg &= ~CR_DEC_NOT_ENC;
1101 cfg |= CR_CRYPEN;
1102
1103 stm32_cryp_write(cryp, CRYP_CR, cfg);
1104
1105 if (is_gcm(cryp)) {
1106 /* GCM: write aad and payload size (in bits) */
1107 size_bit = cryp->areq->assoclen * 8;
1108 if (cryp->caps->swap_final)
1109 size_bit = cpu_to_be32(size_bit);
1110
1111 stm32_cryp_write(cryp, CRYP_DIN, 0);
1112 stm32_cryp_write(cryp, CRYP_DIN, size_bit);
1113
1114 size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen :
1115 cryp->areq->cryptlen - AES_BLOCK_SIZE;
1116 size_bit *= 8;
1117 if (cryp->caps->swap_final)
1118 size_bit = cpu_to_be32(size_bit);
1119
1120 stm32_cryp_write(cryp, CRYP_DIN, 0);
1121 stm32_cryp_write(cryp, CRYP_DIN, size_bit);
1122 } else {
1123 /* CCM: write CTR0 */
1124 u8 iv[AES_BLOCK_SIZE];
1125 u32 *iv32 = (u32 *)iv;
1126
1127 memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE);
1128 memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
1129
1130 for (i = 0; i < AES_BLOCK_32; i++) {
1131 if (!cryp->caps->padding_wa)
1132 *iv32 = cpu_to_be32(*iv32);
1133 stm32_cryp_write(cryp, CRYP_DIN, *iv32++);
1134 }
1135 }
1136
1137 /* Wait for output data */
1138 ret = stm32_cryp_wait_output(cryp);
1139 if (ret) {
1140 dev_err(cryp->dev, "Timeout (read tag)\n");
1141 return ret;
1142 }
1143
1144 if (is_encrypt(cryp)) {
1145 /* Get and write tag */
1146 dst = sg_virt(cryp->out_sg) + _walked_out;
1147
1148 for (i = 0; i < AES_BLOCK_32; i++) {
1149 if (cryp->total_out >= sizeof(u32)) {
1150 /* Read a full u32 */
1151 *dst = stm32_cryp_read(cryp, CRYP_DOUT);
1152
1153 dst = stm32_cryp_next_out(cryp, dst,
1154 sizeof(u32));
1155 cryp->total_out -= sizeof(u32);
1156 } else if (!cryp->total_out) {
1157 /* Empty fifo out (data from input padding) */
1158 stm32_cryp_read(cryp, CRYP_DOUT);
1159 } else {
1160 /* Read less than an u32 */
1161 d32 = stm32_cryp_read(cryp, CRYP_DOUT);
1162 d8 = (u8 *)&d32;
1163
1164 for (j = 0; j < cryp->total_out; j++) {
1165 *((u8 *)dst) = *(d8++);
1166 dst = stm32_cryp_next_out(cryp, dst, 1);
1167 }
1168 cryp->total_out = 0;
1169 }
1170 }
1171 } else {
1172 /* Get and check tag */
1173 u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
1174
1175 scatterwalk_map_and_copy(in_tag, cryp->in_sg,
1176 cryp->total_in_save - cryp->authsize,
1177 cryp->authsize, 0);
1178
1179 for (i = 0; i < AES_BLOCK_32; i++)
1180 out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT);
1181
1182 if (crypto_memneq(in_tag, out_tag, cryp->authsize))
1183 ret = -EBADMSG;
1184 }
1185
1186 /* Disable cryp */
1187 cfg &= ~CR_CRYPEN;
1188 stm32_cryp_write(cryp, CRYP_CR, cfg);
1189
1190 return ret;
1191}
1192
748static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) 1193static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
749{ 1194{
750 u32 cr; 1195 u32 cr;
@@ -777,17 +1222,24 @@ static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
777 unsigned int i, j; 1222 unsigned int i, j;
778 u32 d32, *dst; 1223 u32 d32, *dst;
779 u8 *d8; 1224 u8 *d8;
1225 size_t tag_size;
1226
1227 /* Do no read tag now (if any) */
1228 if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
1229 tag_size = cryp->authsize;
1230 else
1231 tag_size = 0;
780 1232
781 dst = sg_virt(cryp->out_sg) + _walked_out; 1233 dst = sg_virt(cryp->out_sg) + _walked_out;
782 1234
783 for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { 1235 for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
784 if (likely(cryp->total_out >= sizeof(u32))) { 1236 if (likely(cryp->total_out - tag_size >= sizeof(u32))) {
785 /* Read a full u32 */ 1237 /* Read a full u32 */
786 *dst = stm32_cryp_read(cryp, CRYP_DOUT); 1238 *dst = stm32_cryp_read(cryp, CRYP_DOUT);
787 1239
788 dst = stm32_cryp_next_out(cryp, dst, sizeof(u32)); 1240 dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
789 cryp->total_out -= sizeof(u32); 1241 cryp->total_out -= sizeof(u32);
790 } else if (!cryp->total_out) { 1242 } else if (cryp->total_out == tag_size) {
791 /* Empty fifo out (data from input padding) */ 1243 /* Empty fifo out (data from input padding) */
792 d32 = stm32_cryp_read(cryp, CRYP_DOUT); 1244 d32 = stm32_cryp_read(cryp, CRYP_DOUT);
793 } else { 1245 } else {
@@ -795,15 +1247,15 @@ static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
795 d32 = stm32_cryp_read(cryp, CRYP_DOUT); 1247 d32 = stm32_cryp_read(cryp, CRYP_DOUT);
796 d8 = (u8 *)&d32; 1248 d8 = (u8 *)&d32;
797 1249
798 for (j = 0; j < cryp->total_out; j++) { 1250 for (j = 0; j < cryp->total_out - tag_size; j++) {
799 *((u8 *)dst) = *(d8++); 1251 *((u8 *)dst) = *(d8++);
800 dst = stm32_cryp_next_out(cryp, dst, 1); 1252 dst = stm32_cryp_next_out(cryp, dst, 1);
801 } 1253 }
802 cryp->total_out = 0; 1254 cryp->total_out = tag_size;
803 } 1255 }
804 } 1256 }
805 1257
806 return !cryp->total_out || !cryp->total_in; 1258 return !(cryp->total_out - tag_size) || !cryp->total_in;
807} 1259}
808 1260
809static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) 1261static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
@@ -811,33 +1263,219 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
811 unsigned int i, j; 1263 unsigned int i, j;
812 u32 *src; 1264 u32 *src;
813 u8 d8[4]; 1265 u8 d8[4];
1266 size_t tag_size;
1267
1268 /* Do no write tag (if any) */
1269 if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp)))
1270 tag_size = cryp->authsize;
1271 else
1272 tag_size = 0;
814 1273
815 src = sg_virt(cryp->in_sg) + _walked_in; 1274 src = sg_virt(cryp->in_sg) + _walked_in;
816 1275
817 for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { 1276 for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
818 if (likely(cryp->total_in >= sizeof(u32))) { 1277 if (likely(cryp->total_in - tag_size >= sizeof(u32))) {
819 /* Write a full u32 */ 1278 /* Write a full u32 */
820 stm32_cryp_write(cryp, CRYP_DIN, *src); 1279 stm32_cryp_write(cryp, CRYP_DIN, *src);
821 1280
822 src = stm32_cryp_next_in(cryp, src, sizeof(u32)); 1281 src = stm32_cryp_next_in(cryp, src, sizeof(u32));
823 cryp->total_in -= sizeof(u32); 1282 cryp->total_in -= sizeof(u32);
824 } else if (!cryp->total_in) { 1283 } else if (cryp->total_in == tag_size) {
825 /* Write padding data */ 1284 /* Write padding data */
826 stm32_cryp_write(cryp, CRYP_DIN, 0); 1285 stm32_cryp_write(cryp, CRYP_DIN, 0);
827 } else { 1286 } else {
828 /* Write less than an u32 */ 1287 /* Write less than an u32 */
829 memset(d8, 0, sizeof(u32)); 1288 memset(d8, 0, sizeof(u32));
830 for (j = 0; j < cryp->total_in; j++) { 1289 for (j = 0; j < cryp->total_in - tag_size; j++) {
831 d8[j] = *((u8 *)src); 1290 d8[j] = *((u8 *)src);
832 src = stm32_cryp_next_in(cryp, src, 1); 1291 src = stm32_cryp_next_in(cryp, src, 1);
833 } 1292 }
834 1293
835 stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); 1294 stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
836 cryp->total_in = 0; 1295 cryp->total_in = tag_size;
837 } 1296 }
838 } 1297 }
839} 1298}
840 1299
1300static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
1301{
1302 int err;
1303 u32 cfg, tmp[AES_BLOCK_32];
1304 size_t total_in_ori = cryp->total_in;
1305 struct scatterlist *out_sg_ori = cryp->out_sg;
1306 unsigned int i;
1307
1308 /* 'Special workaround' procedure described in the datasheet */
1309
1310 /* a) disable ip */
1311 stm32_cryp_write(cryp, CRYP_IMSCR, 0);
1312 cfg = stm32_cryp_read(cryp, CRYP_CR);
1313 cfg &= ~CR_CRYPEN;
1314 stm32_cryp_write(cryp, CRYP_CR, cfg);
1315
1316 /* b) Update IV1R */
1317 stm32_cryp_write(cryp, CRYP_IV1RR, cryp->gcm_ctr - 2);
1318
1319 /* c) change mode to CTR */
1320 cfg &= ~CR_ALGO_MASK;
1321 cfg |= CR_AES_CTR;
1322 stm32_cryp_write(cryp, CRYP_CR, cfg);
1323
1324 /* a) enable IP */
1325 cfg |= CR_CRYPEN;
1326 stm32_cryp_write(cryp, CRYP_CR, cfg);
1327
1328 /* b) pad and write the last block */
1329 stm32_cryp_irq_write_block(cryp);
1330 cryp->total_in = total_in_ori;
1331 err = stm32_cryp_wait_output(cryp);
1332 if (err) {
1333 dev_err(cryp->dev, "Timeout (write gcm header)\n");
1334 return stm32_cryp_finish_req(cryp, err);
1335 }
1336
1337 /* c) get and store encrypted data */
1338 stm32_cryp_irq_read_data(cryp);
1339 scatterwalk_map_and_copy(tmp, out_sg_ori,
1340 cryp->total_in_save - total_in_ori,
1341 total_in_ori, 0);
1342
1343 /* d) change mode back to AES GCM */
1344 cfg &= ~CR_ALGO_MASK;
1345 cfg |= CR_AES_GCM;
1346 stm32_cryp_write(cryp, CRYP_CR, cfg);
1347
1348 /* e) change phase to Final */
1349 cfg &= ~CR_PH_MASK;
1350 cfg |= CR_PH_FINAL;
1351 stm32_cryp_write(cryp, CRYP_CR, cfg);
1352
1353 /* f) write padded data */
1354 for (i = 0; i < AES_BLOCK_32; i++) {
1355 if (cryp->total_in)
1356 stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
1357 else
1358 stm32_cryp_write(cryp, CRYP_DIN, 0);
1359
1360 cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
1361 }
1362
1363 /* g) Empty fifo out */
1364 err = stm32_cryp_wait_output(cryp);
1365 if (err) {
1366 dev_err(cryp->dev, "Timeout (write gcm header)\n");
1367 return stm32_cryp_finish_req(cryp, err);
1368 }
1369
1370 for (i = 0; i < AES_BLOCK_32; i++)
1371 stm32_cryp_read(cryp, CRYP_DOUT);
1372
1373 /* h) run the he normal Final phase */
1374 stm32_cryp_finish_req(cryp, 0);
1375}
1376
1377static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp)
1378{
1379 u32 cfg, payload_bytes;
1380
1381 /* disable ip, set NPBLB and reneable ip */
1382 cfg = stm32_cryp_read(cryp, CRYP_CR);
1383 cfg &= ~CR_CRYPEN;
1384 stm32_cryp_write(cryp, CRYP_CR, cfg);
1385
1386 payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize :
1387 cryp->total_in;
1388 cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT;
1389 cfg |= CR_CRYPEN;
1390 stm32_cryp_write(cryp, CRYP_CR, cfg);
1391}
1392
1393static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
1394{
1395 int err = 0;
1396 u32 cfg, iv1tmp;
1397 u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32];
1398 size_t last_total_out, total_in_ori = cryp->total_in;
1399 struct scatterlist *out_sg_ori = cryp->out_sg;
1400 unsigned int i;
1401
1402 /* 'Special workaround' procedure described in the datasheet */
1403 cryp->flags |= FLG_CCM_PADDED_WA;
1404
1405 /* a) disable ip */
1406 stm32_cryp_write(cryp, CRYP_IMSCR, 0);
1407
1408 cfg = stm32_cryp_read(cryp, CRYP_CR);
1409 cfg &= ~CR_CRYPEN;
1410 stm32_cryp_write(cryp, CRYP_CR, cfg);
1411
1412 /* b) get IV1 from CRYP_CSGCMCCM7 */
1413 iv1tmp = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + 7 * 4);
1414
1415 /* c) Load CRYP_CSGCMCCMxR */
1416 for (i = 0; i < ARRAY_SIZE(cstmp1); i++)
1417 cstmp1[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4);
1418
1419 /* d) Write IV1R */
1420 stm32_cryp_write(cryp, CRYP_IV1RR, iv1tmp);
1421
1422 /* e) change mode to CTR */
1423 cfg &= ~CR_ALGO_MASK;
1424 cfg |= CR_AES_CTR;
1425 stm32_cryp_write(cryp, CRYP_CR, cfg);
1426
1427 /* a) enable IP */
1428 cfg |= CR_CRYPEN;
1429 stm32_cryp_write(cryp, CRYP_CR, cfg);
1430
1431 /* b) pad and write the last block */
1432 stm32_cryp_irq_write_block(cryp);
1433 cryp->total_in = total_in_ori;
1434 err = stm32_cryp_wait_output(cryp);
1435 if (err) {
1436 dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
1437 return stm32_cryp_finish_req(cryp, err);
1438 }
1439
1440 /* c) get and store decrypted data */
1441 last_total_out = cryp->total_out;
1442 stm32_cryp_irq_read_data(cryp);
1443
1444 memset(tmp, 0, sizeof(tmp));
1445 scatterwalk_map_and_copy(tmp, out_sg_ori,
1446 cryp->total_out_save - last_total_out,
1447 last_total_out, 0);
1448
1449 /* d) Load again CRYP_CSGCMCCMxR */
1450 for (i = 0; i < ARRAY_SIZE(cstmp2); i++)
1451 cstmp2[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4);
1452
1453 /* e) change mode back to AES CCM */
1454 cfg &= ~CR_ALGO_MASK;
1455 cfg |= CR_AES_CCM;
1456 stm32_cryp_write(cryp, CRYP_CR, cfg);
1457
1458 /* f) change phase to header */
1459 cfg &= ~CR_PH_MASK;
1460 cfg |= CR_PH_HEADER;
1461 stm32_cryp_write(cryp, CRYP_CR, cfg);
1462
1463 /* g) XOR and write padded data */
1464 for (i = 0; i < ARRAY_SIZE(tmp); i++) {
1465 tmp[i] ^= cstmp1[i];
1466 tmp[i] ^= cstmp2[i];
1467 stm32_cryp_write(cryp, CRYP_DIN, tmp[i]);
1468 }
1469
1470 /* h) wait for completion */
1471 err = stm32_cryp_wait_busy(cryp);
1472 if (err)
1473 dev_err(cryp->dev, "Timeout (wite ccm padded data)\n");
1474
1475 /* i) run the he normal Final phase */
1476 stm32_cryp_finish_req(cryp, err);
1477}
1478
841static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) 1479static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
842{ 1480{
843 if (unlikely(!cryp->total_in)) { 1481 if (unlikely(!cryp->total_in)) {
@@ -845,28 +1483,220 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
845 return; 1483 return;
846 } 1484 }
847 1485
1486 if (unlikely(cryp->total_in < AES_BLOCK_SIZE &&
1487 (stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) &&
1488 is_encrypt(cryp))) {
1489 /* Padding for AES GCM encryption */
1490 if (cryp->caps->padding_wa)
1491 /* Special case 1 */
1492 return stm32_cryp_irq_write_gcm_padded_data(cryp);
1493
1494 /* Setting padding bytes (NBBLB) */
1495 stm32_cryp_irq_set_npblb(cryp);
1496 }
1497
1498 if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) &&
1499 (stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) &&
1500 is_decrypt(cryp))) {
1501 /* Padding for AES CCM decryption */
1502 if (cryp->caps->padding_wa)
1503 /* Special case 2 */
1504 return stm32_cryp_irq_write_ccm_padded_data(cryp);
1505
1506 /* Setting padding bytes (NBBLB) */
1507 stm32_cryp_irq_set_npblb(cryp);
1508 }
1509
848 if (is_aes(cryp) && is_ctr(cryp)) 1510 if (is_aes(cryp) && is_ctr(cryp))
849 stm32_cryp_check_ctr_counter(cryp); 1511 stm32_cryp_check_ctr_counter(cryp);
850 1512
851 stm32_cryp_irq_write_block(cryp); 1513 stm32_cryp_irq_write_block(cryp);
852} 1514}
853 1515
1516static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp)
1517{
1518 int err;
1519 unsigned int i, j;
1520 u32 cfg, *src;
1521
1522 src = sg_virt(cryp->in_sg) + _walked_in;
1523
1524 for (i = 0; i < AES_BLOCK_32; i++) {
1525 stm32_cryp_write(cryp, CRYP_DIN, *src);
1526
1527 src = stm32_cryp_next_in(cryp, src, sizeof(u32));
1528 cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in);
1529
1530 /* Check if whole header written */
1531 if ((cryp->total_in_save - cryp->total_in) ==
1532 cryp->areq->assoclen) {
1533 /* Write padding if needed */
1534 for (j = i + 1; j < AES_BLOCK_32; j++)
1535 stm32_cryp_write(cryp, CRYP_DIN, 0);
1536
1537 /* Wait for completion */
1538 err = stm32_cryp_wait_busy(cryp);
1539 if (err) {
1540 dev_err(cryp->dev, "Timeout (gcm header)\n");
1541 return stm32_cryp_finish_req(cryp, err);
1542 }
1543
1544 if (stm32_cryp_get_input_text_len(cryp)) {
1545 /* Phase 3 : payload */
1546 cfg = stm32_cryp_read(cryp, CRYP_CR);
1547 cfg &= ~CR_CRYPEN;
1548 stm32_cryp_write(cryp, CRYP_CR, cfg);
1549
1550 cfg &= ~CR_PH_MASK;
1551 cfg |= CR_PH_PAYLOAD;
1552 cfg |= CR_CRYPEN;
1553 stm32_cryp_write(cryp, CRYP_CR, cfg);
1554 } else {
1555 /* Phase 4 : tag */
1556 stm32_cryp_write(cryp, CRYP_IMSCR, 0);
1557 stm32_cryp_finish_req(cryp, 0);
1558 }
1559
1560 break;
1561 }
1562
1563 if (!cryp->total_in)
1564 break;
1565 }
1566}
1567
1568static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp)
1569{
1570 int err;
1571 unsigned int i = 0, j, k;
1572 u32 alen, cfg, *src;
1573 u8 d8[4];
1574
1575 src = sg_virt(cryp->in_sg) + _walked_in;
1576 alen = cryp->areq->assoclen;
1577
1578 if (!_walked_in) {
1579 if (cryp->areq->assoclen <= 65280) {
1580 /* Write first u32 of B1 */
1581 d8[0] = (alen >> 8) & 0xFF;
1582 d8[1] = alen & 0xFF;
1583 d8[2] = *((u8 *)src);
1584 src = stm32_cryp_next_in(cryp, src, 1);
1585 d8[3] = *((u8 *)src);
1586 src = stm32_cryp_next_in(cryp, src, 1);
1587
1588 stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
1589 i++;
1590
1591 cryp->total_in -= min_t(size_t, 2, cryp->total_in);
1592 } else {
1593 /* Build the two first u32 of B1 */
1594 d8[0] = 0xFF;
1595 d8[1] = 0xFE;
1596 d8[2] = alen & 0xFF000000;
1597 d8[3] = alen & 0x00FF0000;
1598
1599 stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
1600 i++;
1601
1602 d8[0] = alen & 0x0000FF00;
1603 d8[1] = alen & 0x000000FF;
1604 d8[2] = *((u8 *)src);
1605 src = stm32_cryp_next_in(cryp, src, 1);
1606 d8[3] = *((u8 *)src);
1607 src = stm32_cryp_next_in(cryp, src, 1);
1608
1609 stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
1610 i++;
1611
1612 cryp->total_in -= min_t(size_t, 2, cryp->total_in);
1613 }
1614 }
1615
1616 /* Write next u32 */
1617 for (; i < AES_BLOCK_32; i++) {
1618 /* Build an u32 */
1619 memset(d8, 0, sizeof(u32));
1620 for (k = 0; k < sizeof(u32); k++) {
1621 d8[k] = *((u8 *)src);
1622 src = stm32_cryp_next_in(cryp, src, 1);
1623
1624 cryp->total_in -= min_t(size_t, 1, cryp->total_in);
1625 if ((cryp->total_in_save - cryp->total_in) == alen)
1626 break;
1627 }
1628
1629 stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
1630
1631 if ((cryp->total_in_save - cryp->total_in) == alen) {
1632 /* Write padding if needed */
1633 for (j = i + 1; j < AES_BLOCK_32; j++)
1634 stm32_cryp_write(cryp, CRYP_DIN, 0);
1635
1636 /* Wait for completion */
1637 err = stm32_cryp_wait_busy(cryp);
1638 if (err) {
1639 dev_err(cryp->dev, "Timeout (ccm header)\n");
1640 return stm32_cryp_finish_req(cryp, err);
1641 }
1642
1643 if (stm32_cryp_get_input_text_len(cryp)) {
1644 /* Phase 3 : payload */
1645 cfg = stm32_cryp_read(cryp, CRYP_CR);
1646 cfg &= ~CR_CRYPEN;
1647 stm32_cryp_write(cryp, CRYP_CR, cfg);
1648
1649 cfg &= ~CR_PH_MASK;
1650 cfg |= CR_PH_PAYLOAD;
1651 cfg |= CR_CRYPEN;
1652 stm32_cryp_write(cryp, CRYP_CR, cfg);
1653 } else {
1654 /* Phase 4 : tag */
1655 stm32_cryp_write(cryp, CRYP_IMSCR, 0);
1656 stm32_cryp_finish_req(cryp, 0);
1657 }
1658
1659 break;
1660 }
1661 }
1662}
1663
854static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) 1664static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
855{ 1665{
856 struct stm32_cryp *cryp = arg; 1666 struct stm32_cryp *cryp = arg;
1667 u32 ph;
857 1668
858 if (cryp->irq_status & MISR_OUT) 1669 if (cryp->irq_status & MISR_OUT)
859 /* Output FIFO IRQ: read data */ 1670 /* Output FIFO IRQ: read data */
860 if (unlikely(stm32_cryp_irq_read_data(cryp))) { 1671 if (unlikely(stm32_cryp_irq_read_data(cryp))) {
861 /* All bytes processed, finish */ 1672 /* All bytes processed, finish */
862 stm32_cryp_write(cryp, CRYP_IMSCR, 0); 1673 stm32_cryp_write(cryp, CRYP_IMSCR, 0);
863 stm32_cryp_finish_req(cryp); 1674 stm32_cryp_finish_req(cryp, 0);
864 return IRQ_HANDLED; 1675 return IRQ_HANDLED;
865 } 1676 }
866 1677
867 if (cryp->irq_status & MISR_IN) { 1678 if (cryp->irq_status & MISR_IN) {
868 /* Input FIFO IRQ: write data */ 1679 if (is_gcm(cryp)) {
869 stm32_cryp_irq_write_data(cryp); 1680 ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
1681 if (unlikely(ph == CR_PH_HEADER))
1682 /* Write Header */
1683 stm32_cryp_irq_write_gcm_header(cryp);
1684 else
1685 /* Input FIFO IRQ: write data */
1686 stm32_cryp_irq_write_data(cryp);
1687 cryp->gcm_ctr++;
1688 } else if (is_ccm(cryp)) {
1689 ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK;
1690 if (unlikely(ph == CR_PH_HEADER))
1691 /* Write Header */
1692 stm32_cryp_irq_write_ccm_header(cryp);
1693 else
1694 /* Input FIFO IRQ: write data */
1695 stm32_cryp_irq_write_data(cryp);
1696 } else {
1697 /* Input FIFO IRQ: write data */
1698 stm32_cryp_irq_write_data(cryp);
1699 }
870 } 1700 }
871 1701
872 return IRQ_HANDLED; 1702 return IRQ_HANDLED;
@@ -1028,8 +1858,62 @@ static struct crypto_alg crypto_algs[] = {
1028}, 1858},
1029}; 1859};
1030 1860
1861static struct aead_alg aead_algs[] = {
1862{
1863 .setkey = stm32_cryp_aes_aead_setkey,
1864 .setauthsize = stm32_cryp_aes_gcm_setauthsize,
1865 .encrypt = stm32_cryp_aes_gcm_encrypt,
1866 .decrypt = stm32_cryp_aes_gcm_decrypt,
1867 .init = stm32_cryp_aes_aead_init,
1868 .ivsize = 12,
1869 .maxauthsize = AES_BLOCK_SIZE,
1870
1871 .base = {
1872 .cra_name = "gcm(aes)",
1873 .cra_driver_name = "stm32-gcm-aes",
1874 .cra_priority = 200,
1875 .cra_flags = CRYPTO_ALG_ASYNC,
1876 .cra_blocksize = 1,
1877 .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
1878 .cra_alignmask = 0xf,
1879 .cra_module = THIS_MODULE,
1880 },
1881},
1882{
1883 .setkey = stm32_cryp_aes_aead_setkey,
1884 .setauthsize = stm32_cryp_aes_ccm_setauthsize,
1885 .encrypt = stm32_cryp_aes_ccm_encrypt,
1886 .decrypt = stm32_cryp_aes_ccm_decrypt,
1887 .init = stm32_cryp_aes_aead_init,
1888 .ivsize = AES_BLOCK_SIZE,
1889 .maxauthsize = AES_BLOCK_SIZE,
1890
1891 .base = {
1892 .cra_name = "ccm(aes)",
1893 .cra_driver_name = "stm32-ccm-aes",
1894 .cra_priority = 200,
1895 .cra_flags = CRYPTO_ALG_ASYNC,
1896 .cra_blocksize = 1,
1897 .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
1898 .cra_alignmask = 0xf,
1899 .cra_module = THIS_MODULE,
1900 },
1901},
1902};
1903
1904static const struct stm32_cryp_caps f7_data = {
1905 .swap_final = true,
1906 .padding_wa = true,
1907};
1908
1909static const struct stm32_cryp_caps mp1_data = {
1910 .swap_final = false,
1911 .padding_wa = false,
1912};
1913
1031static const struct of_device_id stm32_dt_ids[] = { 1914static const struct of_device_id stm32_dt_ids[] = {
1032 { .compatible = "st,stm32f756-cryp", }, 1915 { .compatible = "st,stm32f756-cryp", .data = &f7_data},
1916 { .compatible = "st,stm32mp1-cryp", .data = &mp1_data},
1033 {}, 1917 {},
1034}; 1918};
1035MODULE_DEVICE_TABLE(of, stm32_dt_ids); 1919MODULE_DEVICE_TABLE(of, stm32_dt_ids);
@@ -1046,6 +1930,10 @@ static int stm32_cryp_probe(struct platform_device *pdev)
1046 if (!cryp) 1930 if (!cryp)
1047 return -ENOMEM; 1931 return -ENOMEM;
1048 1932
1933 cryp->caps = of_device_get_match_data(dev);
1934 if (!cryp->caps)
1935 return -ENODEV;
1936
1049 cryp->dev = dev; 1937 cryp->dev = dev;
1050 1938
1051 mutex_init(&cryp->lock); 1939 mutex_init(&cryp->lock);
@@ -1102,9 +1990,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
1102 goto err_engine1; 1990 goto err_engine1;
1103 } 1991 }
1104 1992
1105 cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req;
1106 cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req;
1107
1108 ret = crypto_engine_start(cryp->engine); 1993 ret = crypto_engine_start(cryp->engine);
1109 if (ret) { 1994 if (ret) {
1110 dev_err(dev, "Could not start crypto engine\n"); 1995 dev_err(dev, "Could not start crypto engine\n");
@@ -1117,10 +2002,16 @@ static int stm32_cryp_probe(struct platform_device *pdev)
1117 goto err_algs; 2002 goto err_algs;
1118 } 2003 }
1119 2004
2005 ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
2006 if (ret)
2007 goto err_aead_algs;
2008
1120 dev_info(dev, "Initialized\n"); 2009 dev_info(dev, "Initialized\n");
1121 2010
1122 return 0; 2011 return 0;
1123 2012
2013err_aead_algs:
2014 crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
1124err_algs: 2015err_algs:
1125err_engine2: 2016err_engine2:
1126 crypto_engine_exit(cryp->engine); 2017 crypto_engine_exit(cryp->engine);
@@ -1141,6 +2032,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
1141 if (!cryp) 2032 if (!cryp)
1142 return -ENODEV; 2033 return -ENODEV;
1143 2034
2035 crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
1144 crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs)); 2036 crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
1145 2037
1146 crypto_engine_exit(cryp->engine); 2038 crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4ca4a264a833..981e45692695 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -122,6 +122,7 @@ enum stm32_hash_data_format {
122#define HASH_DMA_THRESHOLD 50 122#define HASH_DMA_THRESHOLD 50
123 123
124struct stm32_hash_ctx { 124struct stm32_hash_ctx {
125 struct crypto_engine_ctx enginectx;
125 struct stm32_hash_dev *hdev; 126 struct stm32_hash_dev *hdev;
126 unsigned long flags; 127 unsigned long flags;
127 128
@@ -626,7 +627,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
626 writesl(hdev->io_base + HASH_DIN, buffer, 627 writesl(hdev->io_base + HASH_DIN, buffer,
627 DIV_ROUND_UP(ncp, sizeof(u32))); 628 DIV_ROUND_UP(ncp, sizeof(u32)));
628 } 629 }
629 stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32))); 630 stm32_hash_set_nblw(hdev, ncp);
630 reg = stm32_hash_read(hdev, HASH_STR); 631 reg = stm32_hash_read(hdev, HASH_STR);
631 reg |= HASH_STR_DCAL; 632 reg |= HASH_STR_DCAL;
632 stm32_hash_write(hdev, HASH_STR, reg); 633 stm32_hash_write(hdev, HASH_STR, reg);
@@ -743,13 +744,15 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
743 struct ahash_request *req = hdev->req; 744 struct ahash_request *req = hdev->req;
744 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 745 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
745 int err; 746 int err;
747 int buflen = rctx->bufcnt;
748
749 rctx->bufcnt = 0;
746 750
747 if (!(rctx->flags & HASH_FLAGS_CPU)) 751 if (!(rctx->flags & HASH_FLAGS_CPU))
748 err = stm32_hash_dma_send(hdev); 752 err = stm32_hash_dma_send(hdev);
749 else 753 else
750 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1); 754 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
751 755
752 rctx->bufcnt = 0;
753 756
754 return err; 757 return err;
755} 758}
@@ -828,15 +831,19 @@ static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
828 return 0; 831 return 0;
829} 832}
830 833
834static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
835static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
836
831static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev, 837static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
832 struct ahash_request *req) 838 struct ahash_request *req)
833{ 839{
834 return crypto_transfer_hash_request_to_engine(hdev->engine, req); 840 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
835} 841}
836 842
837static int stm32_hash_prepare_req(struct crypto_engine *engine, 843static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
838 struct ahash_request *req)
839{ 844{
845 struct ahash_request *req = container_of(areq, struct ahash_request,
846 base);
840 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 847 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
841 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); 848 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
842 struct stm32_hash_request_ctx *rctx; 849 struct stm32_hash_request_ctx *rctx;
@@ -854,9 +861,10 @@ static int stm32_hash_prepare_req(struct crypto_engine *engine,
854 return stm32_hash_hw_init(hdev, rctx); 861 return stm32_hash_hw_init(hdev, rctx);
855} 862}
856 863
857static int stm32_hash_one_request(struct crypto_engine *engine, 864static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
858 struct ahash_request *req)
859{ 865{
866 struct ahash_request *req = container_of(areq, struct ahash_request,
867 base);
860 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 868 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
861 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); 869 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
862 struct stm32_hash_request_ctx *rctx; 870 struct stm32_hash_request_ctx *rctx;
@@ -1033,6 +1041,9 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1033 if (algs_hmac_name) 1041 if (algs_hmac_name)
1034 ctx->flags |= HASH_FLAGS_HMAC; 1042 ctx->flags |= HASH_FLAGS_HMAC;
1035 1043
1044 ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1045 ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1046 ctx->enginectx.op.unprepare_request = NULL;
1036 return 0; 1047 return 0;
1037} 1048}
1038 1049
@@ -1096,6 +1107,8 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1096 reg &= ~HASH_SR_OUTPUT_READY; 1107 reg &= ~HASH_SR_OUTPUT_READY;
1097 stm32_hash_write(hdev, HASH_SR, reg); 1108 stm32_hash_write(hdev, HASH_SR, reg);
1098 hdev->flags |= HASH_FLAGS_OUTPUT_READY; 1109 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1110 /* Disable IT*/
1111 stm32_hash_write(hdev, HASH_IMR, 0);
1099 return IRQ_WAKE_THREAD; 1112 return IRQ_WAKE_THREAD;
1100 } 1113 }
1101 1114
@@ -1404,18 +1417,19 @@ MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1404static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, 1417static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1405 struct device *dev) 1418 struct device *dev)
1406{ 1419{
1407 int err;
1408
1409 hdev->pdata = of_device_get_match_data(dev); 1420 hdev->pdata = of_device_get_match_data(dev);
1410 if (!hdev->pdata) { 1421 if (!hdev->pdata) {
1411 dev_err(dev, "no compatible OF match\n"); 1422 dev_err(dev, "no compatible OF match\n");
1412 return -EINVAL; 1423 return -EINVAL;
1413 } 1424 }
1414 1425
1415 err = of_property_read_u32(dev->of_node, "dma-maxburst", 1426 if (of_property_read_u32(dev->of_node, "dma-maxburst",
1416 &hdev->dma_maxburst); 1427 &hdev->dma_maxburst)) {
1428 dev_info(dev, "dma-maxburst not specified, using 0\n");
1429 hdev->dma_maxburst = 0;
1430 }
1417 1431
1418 return err; 1432 return 0;
1419} 1433}
1420 1434
1421static int stm32_hash_probe(struct platform_device *pdev) 1435static int stm32_hash_probe(struct platform_device *pdev)
@@ -1493,9 +1507,6 @@ static int stm32_hash_probe(struct platform_device *pdev)
1493 goto err_engine; 1507 goto err_engine;
1494 } 1508 }
1495 1509
1496 hdev->engine->prepare_hash_request = stm32_hash_prepare_req;
1497 hdev->engine->hash_one_request = stm32_hash_one_request;
1498
1499 ret = crypto_engine_start(hdev->engine); 1510 ret = crypto_engine_start(hdev->engine);
1500 if (ret) 1511 if (ret)
1501 goto err_engine_start; 1512 goto err_engine_start;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 1547cbe13dc2..a81d89b3b7d8 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -451,6 +451,7 @@ static struct platform_driver sun4i_ss_driver = {
451 451
452module_platform_driver(sun4i_ss_driver); 452module_platform_driver(sun4i_ss_driver);
453 453
454MODULE_ALIAS("platform:sun4i-ss");
454MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator"); 455MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
455MODULE_LICENSE("GPL"); 456MODULE_LICENSE("GPL");
456MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>"); 457MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6882fa2f8bad..7cebf0a6ffbc 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -104,16 +104,34 @@ static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
104/* 104/*
105 * map virtual single (contiguous) pointer to h/w descriptor pointer 105 * map virtual single (contiguous) pointer to h/w descriptor pointer
106 */ 106 */
107static void __map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir,
111 unsigned long attrs)
112{
113 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114 struct talitos_private *priv = dev_get_drvdata(dev);
115 bool is_sec1 = has_ftr_sec1(priv);
116
117 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
118}
119
107static void map_single_talitos_ptr(struct device *dev, 120static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr, 121 struct talitos_ptr *ptr,
109 unsigned int len, void *data, 122 unsigned int len, void *data,
110 enum dma_data_direction dir) 123 enum dma_data_direction dir)
111{ 124{
112 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); 125 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
113 struct talitos_private *priv = dev_get_drvdata(dev); 126}
114 bool is_sec1 = has_ftr_sec1(priv);
115 127
116 to_talitos_ptr(ptr, dma_addr, len, is_sec1); 128static void map_single_talitos_ptr_nosync(struct device *dev,
129 struct talitos_ptr *ptr,
130 unsigned int len, void *data,
131 enum dma_data_direction dir)
132{
133 __map_single_talitos_ptr(dev, ptr, len, data, dir,
134 DMA_ATTR_SKIP_CPU_SYNC);
117} 135}
118 136
119/* 137/*
@@ -832,8 +850,6 @@ struct talitos_ctx {
832 unsigned int keylen; 850 unsigned int keylen;
833 unsigned int enckeylen; 851 unsigned int enckeylen;
834 unsigned int authkeylen; 852 unsigned int authkeylen;
835 dma_addr_t dma_buf;
836 dma_addr_t dma_hw_context;
837}; 853};
838 854
839#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE 855#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
@@ -888,10 +904,12 @@ static int aead_setkey(struct crypto_aead *authenc,
888 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen, 904 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
889 DMA_TO_DEVICE); 905 DMA_TO_DEVICE);
890 906
907 memzero_explicit(&keys, sizeof(keys));
891 return 0; 908 return 0;
892 909
893badkey: 910badkey:
894 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 911 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
912 memzero_explicit(&keys, sizeof(keys));
895 return -EINVAL; 913 return -EINVAL;
896} 914}
897 915
@@ -1130,10 +1148,10 @@ next:
1130 return count; 1148 return count;
1131} 1149}
1132 1150
1133static int talitos_sg_map(struct device *dev, struct scatterlist *src, 1151static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1134 unsigned int len, struct talitos_edesc *edesc, 1152 unsigned int len, struct talitos_edesc *edesc,
1135 struct talitos_ptr *ptr, 1153 struct talitos_ptr *ptr, int sg_count,
1136 int sg_count, unsigned int offset, int tbl_off) 1154 unsigned int offset, int tbl_off, int elen)
1137{ 1155{
1138 struct talitos_private *priv = dev_get_drvdata(dev); 1156 struct talitos_private *priv = dev_get_drvdata(dev);
1139 bool is_sec1 = has_ftr_sec1(priv); 1157 bool is_sec1 = has_ftr_sec1(priv);
@@ -1142,6 +1160,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1142 to_talitos_ptr(ptr, 0, 0, is_sec1); 1160 to_talitos_ptr(ptr, 0, 0, is_sec1);
1143 return 1; 1161 return 1;
1144 } 1162 }
1163 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1145 if (sg_count == 1) { 1164 if (sg_count == 1) {
1146 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); 1165 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1147 return sg_count; 1166 return sg_count;
@@ -1150,7 +1169,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1150 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); 1169 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1151 return sg_count; 1170 return sg_count;
1152 } 1171 }
1153 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, 1172 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1154 &edesc->link_tbl[tbl_off]); 1173 &edesc->link_tbl[tbl_off]);
1155 if (sg_count == 1) { 1174 if (sg_count == 1) {
1156 /* Only one segment now, so no link tbl needed*/ 1175 /* Only one segment now, so no link tbl needed*/
@@ -1164,6 +1183,15 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1164 return sg_count; 1183 return sg_count;
1165} 1184}
1166 1185
1186static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1187 unsigned int len, struct talitos_edesc *edesc,
1188 struct talitos_ptr *ptr, int sg_count,
1189 unsigned int offset, int tbl_off)
1190{
1191 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1192 tbl_off, 0);
1193}
1194
1167/* 1195/*
1168 * fill in and submit ipsec_esp descriptor 1196 * fill in and submit ipsec_esp descriptor
1169 */ 1197 */
@@ -1181,7 +1209,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1181 unsigned int ivsize = crypto_aead_ivsize(aead); 1209 unsigned int ivsize = crypto_aead_ivsize(aead);
1182 int tbl_off = 0; 1210 int tbl_off = 0;
1183 int sg_count, ret; 1211 int sg_count, ret;
1184 int sg_link_tbl_len; 1212 int elen = 0;
1185 bool sync_needed = false; 1213 bool sync_needed = false;
1186 struct talitos_private *priv = dev_get_drvdata(dev); 1214 struct talitos_private *priv = dev_get_drvdata(dev);
1187 bool is_sec1 = has_ftr_sec1(priv); 1215 bool is_sec1 = has_ftr_sec1(priv);
@@ -1223,17 +1251,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1223 * extent is bytes of HMAC postpended to ciphertext, 1251 * extent is bytes of HMAC postpended to ciphertext,
1224 * typically 12 for ipsec 1252 * typically 12 for ipsec
1225 */ 1253 */
1226 sg_link_tbl_len = cryptlen; 1254 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1227 1255 elen = authsize;
1228 if (is_ipsec_esp) {
1229 to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
1230 1256
1231 if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV) 1257 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1232 sg_link_tbl_len += authsize; 1258 sg_count, areq->assoclen, tbl_off, elen);
1233 }
1234
1235 ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
1236 &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
1237 1259
1238 if (ret > 1) { 1260 if (ret > 1) {
1239 tbl_off += ret; 1261 tbl_off += ret;
@@ -1404,7 +1426,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1404 1426
1405 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1427 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1406 if (!edesc) { 1428 if (!edesc) {
1407 dev_err(dev, "could not allocate edescriptor\n");
1408 err = ERR_PTR(-ENOMEM); 1429 err = ERR_PTR(-ENOMEM);
1409 goto error_sg; 1430 goto error_sg;
1410 } 1431 }
@@ -1690,9 +1711,30 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
1690 struct ahash_request *areq) 1711 struct ahash_request *areq)
1691{ 1712{
1692 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1713 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1714 struct talitos_private *priv = dev_get_drvdata(dev);
1715 bool is_sec1 = has_ftr_sec1(priv);
1716 struct talitos_desc *desc = &edesc->desc;
1717 struct talitos_desc *desc2 = desc + 1;
1718
1719 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1720 if (desc->next_desc &&
1721 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1722 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1693 1723
1694 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); 1724 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1695 1725
1726 /* When using hashctx-in, must unmap it. */
1727 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1728 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1729 DMA_TO_DEVICE);
1730 else if (desc->next_desc)
1731 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1732 DMA_TO_DEVICE);
1733
1734 if (is_sec1 && req_ctx->nbuf)
1735 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1736 DMA_TO_DEVICE);
1737
1696 if (edesc->dma_len) 1738 if (edesc->dma_len)
1697 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1739 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1698 DMA_BIDIRECTIONAL); 1740 DMA_BIDIRECTIONAL);
@@ -1766,8 +1808,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1766 1808
1767 /* hash context in */ 1809 /* hash context in */
1768 if (!req_ctx->first || req_ctx->swinit) { 1810 if (!req_ctx->first || req_ctx->swinit) {
1769 to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context, 1811 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1770 req_ctx->hw_context_size, is_sec1); 1812 req_ctx->hw_context_size,
1813 req_ctx->hw_context,
1814 DMA_TO_DEVICE);
1771 req_ctx->swinit = 0; 1815 req_ctx->swinit = 0;
1772 } 1816 }
1773 /* Indicate next op is not the first. */ 1817 /* Indicate next op is not the first. */
@@ -1793,10 +1837,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1793 * data in 1837 * data in
1794 */ 1838 */
1795 if (is_sec1 && req_ctx->nbuf) { 1839 if (is_sec1 && req_ctx->nbuf) {
1796 dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx * 1840 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1797 HASH_MAX_BLOCK_SIZE; 1841 req_ctx->buf[req_ctx->buf_idx],
1798 1842 DMA_TO_DEVICE);
1799 to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1);
1800 } else { 1843 } else {
1801 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1844 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1802 &desc->ptr[3], sg_count, offset, 0); 1845 &desc->ptr[3], sg_count, offset, 0);
@@ -1812,8 +1855,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1812 crypto_ahash_digestsize(tfm), 1855 crypto_ahash_digestsize(tfm),
1813 areq->result, DMA_FROM_DEVICE); 1856 areq->result, DMA_FROM_DEVICE);
1814 else 1857 else
1815 to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context, 1858 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1816 req_ctx->hw_context_size, is_sec1); 1859 req_ctx->hw_context_size,
1860 req_ctx->hw_context,
1861 DMA_FROM_DEVICE);
1817 1862
1818 /* last DWORD empty */ 1863 /* last DWORD empty */
1819 1864
@@ -1832,9 +1877,14 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1832 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT; 1877 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1833 desc->hdr &= ~DESC_HDR_DONE_NOTIFY; 1878 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1834 1879
1835 to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context, 1880 if (desc->ptr[1].ptr)
1836 req_ctx->hw_context_size, is_sec1); 1881 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1837 1882 is_sec1);
1883 else
1884 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1885 req_ctx->hw_context_size,
1886 req_ctx->hw_context,
1887 DMA_TO_DEVICE);
1838 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); 1888 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1839 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1889 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1840 &desc2->ptr[3], sg_count, offset, 0); 1890 &desc2->ptr[3], sg_count, offset, 0);
@@ -1842,8 +1892,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1842 sync_needed = true; 1892 sync_needed = true;
1843 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); 1893 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1844 if (req_ctx->last) 1894 if (req_ctx->last)
1845 to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context, 1895 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1846 req_ctx->hw_context_size, is_sec1); 1896 req_ctx->hw_context_size,
1897 req_ctx->hw_context,
1898 DMA_FROM_DEVICE);
1847 1899
1848 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE, 1900 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1849 DMA_BIDIRECTIONAL); 1901 DMA_BIDIRECTIONAL);
@@ -1885,8 +1937,7 @@ static int ahash_init(struct ahash_request *areq)
1885 struct device *dev = ctx->dev; 1937 struct device *dev = ctx->dev;
1886 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1938 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1887 unsigned int size; 1939 unsigned int size;
1888 struct talitos_private *priv = dev_get_drvdata(dev); 1940 dma_addr_t dma;
1889 bool is_sec1 = has_ftr_sec1(priv);
1890 1941
1891 /* Initialize the context */ 1942 /* Initialize the context */
1892 req_ctx->buf_idx = 0; 1943 req_ctx->buf_idx = 0;
@@ -1898,18 +1949,10 @@ static int ahash_init(struct ahash_request *areq)
1898 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 1949 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1899 req_ctx->hw_context_size = size; 1950 req_ctx->hw_context_size = size;
1900 1951
1901 if (ctx->dma_hw_context) 1952 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1902 dma_unmap_single(dev, ctx->dma_hw_context, size, 1953 DMA_TO_DEVICE);
1903 DMA_BIDIRECTIONAL); 1954 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1904 ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size, 1955
1905 DMA_BIDIRECTIONAL);
1906 if (ctx->dma_buf)
1907 dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
1908 DMA_TO_DEVICE);
1909 if (is_sec1)
1910 ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
1911 sizeof(req_ctx->buf),
1912 DMA_TO_DEVICE);
1913 return 0; 1956 return 0;
1914} 1957}
1915 1958
@@ -1920,12 +1963,6 @@ static int ahash_init(struct ahash_request *areq)
1920static int ahash_init_sha224_swinit(struct ahash_request *areq) 1963static int ahash_init_sha224_swinit(struct ahash_request *areq)
1921{ 1964{
1922 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1965 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1923 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1924 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1925 struct device *dev = ctx->dev;
1926
1927 ahash_init(areq);
1928 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1929 1966
1930 req_ctx->hw_context[0] = SHA224_H0; 1967 req_ctx->hw_context[0] = SHA224_H0;
1931 req_ctx->hw_context[1] = SHA224_H1; 1968 req_ctx->hw_context[1] = SHA224_H1;
@@ -1940,8 +1977,8 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq)
1940 req_ctx->hw_context[8] = 0; 1977 req_ctx->hw_context[8] = 0;
1941 req_ctx->hw_context[9] = 0; 1978 req_ctx->hw_context[9] = 0;
1942 1979
1943 dma_sync_single_for_device(dev, ctx->dma_hw_context, 1980 ahash_init(areq);
1944 req_ctx->hw_context_size, DMA_TO_DEVICE); 1981 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1945 1982
1946 return 0; 1983 return 0;
1947} 1984}
@@ -2046,13 +2083,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2046 /* request SEC to INIT hash. */ 2083 /* request SEC to INIT hash. */
2047 if (req_ctx->first && !req_ctx->swinit) 2084 if (req_ctx->first && !req_ctx->swinit)
2048 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; 2085 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2049 if (is_sec1) {
2050 dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
2051 HASH_MAX_BLOCK_SIZE;
2052
2053 dma_sync_single_for_device(dev, dma_buf,
2054 req_ctx->nbuf, DMA_TO_DEVICE);
2055 }
2056 2086
2057 /* When the tfm context has a keylen, it's an HMAC. 2087 /* When the tfm context has a keylen, it's an HMAC.
2058 * A first or last (ie. not middle) descriptor must request HMAC. 2088 * A first or last (ie. not middle) descriptor must request HMAC.
@@ -2106,12 +2136,15 @@ static int ahash_export(struct ahash_request *areq, void *out)
2106{ 2136{
2107 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2137 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2108 struct talitos_export_state *export = out; 2138 struct talitos_export_state *export = out;
2109 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 2139 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2110 struct talitos_ctx *ctx = crypto_ahash_ctx(ahash); 2140 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2111 struct device *dev = ctx->dev; 2141 struct device *dev = ctx->dev;
2142 dma_addr_t dma;
2143
2144 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2145 DMA_FROM_DEVICE);
2146 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2112 2147
2113 dma_sync_single_for_cpu(dev, ctx->dma_hw_context,
2114 req_ctx->hw_context_size, DMA_FROM_DEVICE);
2115 memcpy(export->hw_context, req_ctx->hw_context, 2148 memcpy(export->hw_context, req_ctx->hw_context,
2116 req_ctx->hw_context_size); 2149 req_ctx->hw_context_size);
2117 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf); 2150 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
@@ -2128,39 +2161,29 @@ static int ahash_import(struct ahash_request *areq, const void *in)
2128{ 2161{
2129 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2162 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2130 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2163 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2131 const struct talitos_export_state *export = in;
2132 unsigned int size;
2133 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 2164 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2134 struct device *dev = ctx->dev; 2165 struct device *dev = ctx->dev;
2135 struct talitos_private *priv = dev_get_drvdata(dev); 2166 const struct talitos_export_state *export = in;
2136 bool is_sec1 = has_ftr_sec1(priv); 2167 unsigned int size;
2168 dma_addr_t dma;
2137 2169
2138 memset(req_ctx, 0, sizeof(*req_ctx)); 2170 memset(req_ctx, 0, sizeof(*req_ctx));
2139 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 2171 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2140 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 2172 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2141 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 2173 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2142 req_ctx->hw_context_size = size; 2174 req_ctx->hw_context_size = size;
2143 if (ctx->dma_hw_context)
2144 dma_unmap_single(dev, ctx->dma_hw_context, size,
2145 DMA_BIDIRECTIONAL);
2146
2147 memcpy(req_ctx->hw_context, export->hw_context, size); 2175 memcpy(req_ctx->hw_context, export->hw_context, size);
2148 ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
2149 DMA_BIDIRECTIONAL);
2150 if (ctx->dma_buf)
2151 dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
2152 DMA_TO_DEVICE);
2153 memcpy(req_ctx->buf[0], export->buf, export->nbuf); 2176 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2154 if (is_sec1)
2155 ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
2156 sizeof(req_ctx->buf),
2157 DMA_TO_DEVICE);
2158 req_ctx->swinit = export->swinit; 2177 req_ctx->swinit = export->swinit;
2159 req_ctx->first = export->first; 2178 req_ctx->first = export->first;
2160 req_ctx->last = export->last; 2179 req_ctx->last = export->last;
2161 req_ctx->to_hash_later = export->to_hash_later; 2180 req_ctx->to_hash_later = export->to_hash_later;
2162 req_ctx->nbuf = export->nbuf; 2181 req_ctx->nbuf = export->nbuf;
2163 2182
2183 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2184 DMA_TO_DEVICE);
2185 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2186
2164 return 0; 2187 return 0;
2165} 2188}
2166 2189
@@ -3064,27 +3087,6 @@ static void talitos_cra_exit(struct crypto_tfm *tfm)
3064 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 3087 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3065} 3088}
3066 3089
3067static void talitos_cra_exit_ahash(struct crypto_tfm *tfm)
3068{
3069 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3070 struct device *dev = ctx->dev;
3071 unsigned int size;
3072
3073 talitos_cra_exit(tfm);
3074
3075 size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <=
3076 SHA256_DIGEST_SIZE)
3077 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
3078 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
3079
3080 if (ctx->dma_hw_context)
3081 dma_unmap_single(dev, ctx->dma_hw_context, size,
3082 DMA_BIDIRECTIONAL);
3083 if (ctx->dma_buf)
3084 dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2,
3085 DMA_TO_DEVICE);
3086}
3087
3088/* 3090/*
3089 * given the alg's descriptor header template, determine whether descriptor 3091 * given the alg's descriptor header template, determine whether descriptor
3090 * type and primary/secondary execution units required match the hw 3092 * type and primary/secondary execution units required match the hw
@@ -3183,7 +3185,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3183 case CRYPTO_ALG_TYPE_AHASH: 3185 case CRYPTO_ALG_TYPE_AHASH:
3184 alg = &t_alg->algt.alg.hash.halg.base; 3186 alg = &t_alg->algt.alg.hash.halg.base;
3185 alg->cra_init = talitos_cra_init_ahash; 3187 alg->cra_init = talitos_cra_init_ahash;
3186 alg->cra_exit = talitos_cra_exit_ahash; 3188 alg->cra_exit = talitos_cra_exit;
3187 alg->cra_type = &crypto_ahash_type; 3189 alg->cra_type = &crypto_ahash_type;
3188 t_alg->algt.alg.hash.init = ahash_init; 3190 t_alg->algt.alg.hash.init = ahash_init;
3189 t_alg->algt.alg.hash.update = ahash_update; 3191 t_alg->algt.alg.hash.update = ahash_update;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 765f53e548ab..cb31b59c9d53 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1404,9 +1404,8 @@ static void cryp_algs_unregister_all(void)
1404static int ux500_cryp_probe(struct platform_device *pdev) 1404static int ux500_cryp_probe(struct platform_device *pdev)
1405{ 1405{
1406 int ret; 1406 int ret;
1407 int cryp_error = 0; 1407 struct resource *res;
1408 struct resource *res = NULL; 1408 struct resource *res_irq;
1409 struct resource *res_irq = NULL;
1410 struct cryp_device_data *device_data; 1409 struct cryp_device_data *device_data;
1411 struct cryp_protection_config prot = { 1410 struct cryp_protection_config prot = {
1412 .privilege_access = CRYP_STATE_ENABLE 1411 .privilege_access = CRYP_STATE_ENABLE
@@ -1416,7 +1415,6 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1416 dev_dbg(dev, "[%s]", __func__); 1415 dev_dbg(dev, "[%s]", __func__);
1417 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC); 1416 device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1418 if (!device_data) { 1417 if (!device_data) {
1419 dev_err(dev, "[%s]: kzalloc() failed!", __func__);
1420 ret = -ENOMEM; 1418 ret = -ENOMEM;
1421 goto out; 1419 goto out;
1422 } 1420 }
@@ -1479,15 +1477,13 @@ static int ux500_cryp_probe(struct platform_device *pdev)
1479 goto out_clk_unprepare; 1477 goto out_clk_unprepare;
1480 } 1478 }
1481 1479
1482 cryp_error = cryp_check(device_data); 1480 if (cryp_check(device_data)) {
1483 if (cryp_error != 0) { 1481 dev_err(dev, "[%s]: cryp_check() failed!", __func__);
1484 dev_err(dev, "[%s]: cryp_init() failed!", __func__);
1485 ret = -EINVAL; 1482 ret = -EINVAL;
1486 goto out_power; 1483 goto out_power;
1487 } 1484 }
1488 1485
1489 cryp_error = cryp_configure_protection(device_data, &prot); 1486 if (cryp_configure_protection(device_data, &prot)) {
1490 if (cryp_error != 0) {
1491 dev_err(dev, "[%s]: cryp_configure_protection() failed!", 1487 dev_err(dev, "[%s]: cryp_configure_protection() failed!",
1492 __func__); 1488 __func__);
1493 ret = -EINVAL; 1489 ret = -EINVAL;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 9acccad26928..2d0a677bcc76 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1403,6 +1403,16 @@ out:
1403 return ret1 ? ret1 : ret2; 1403 return ret1 ? ret1 : ret2;
1404} 1404}
1405 1405
1406static int ahash_noimport(struct ahash_request *req, const void *in)
1407{
1408 return -ENOSYS;
1409}
1410
1411static int ahash_noexport(struct ahash_request *req, void *out)
1412{
1413 return -ENOSYS;
1414}
1415
1406static int hmac_sha1_init(struct ahash_request *req) 1416static int hmac_sha1_init(struct ahash_request *req)
1407{ 1417{
1408 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1418 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -1507,6 +1517,8 @@ static struct hash_algo_template hash_algs[] = {
1507 .update = ahash_update, 1517 .update = ahash_update,
1508 .final = ahash_final, 1518 .final = ahash_final,
1509 .digest = ahash_sha1_digest, 1519 .digest = ahash_sha1_digest,
1520 .export = ahash_noexport,
1521 .import = ahash_noimport,
1510 .halg.digestsize = SHA1_DIGEST_SIZE, 1522 .halg.digestsize = SHA1_DIGEST_SIZE,
1511 .halg.statesize = sizeof(struct hash_ctx), 1523 .halg.statesize = sizeof(struct hash_ctx),
1512 .halg.base = { 1524 .halg.base = {
@@ -1529,6 +1541,8 @@ static struct hash_algo_template hash_algs[] = {
1529 .update = ahash_update, 1541 .update = ahash_update,
1530 .final = ahash_final, 1542 .final = ahash_final,
1531 .digest = ahash_sha256_digest, 1543 .digest = ahash_sha256_digest,
1544 .export = ahash_noexport,
1545 .import = ahash_noimport,
1532 .halg.digestsize = SHA256_DIGEST_SIZE, 1546 .halg.digestsize = SHA256_DIGEST_SIZE,
1533 .halg.statesize = sizeof(struct hash_ctx), 1547 .halg.statesize = sizeof(struct hash_ctx),
1534 .halg.base = { 1548 .halg.base = {
@@ -1553,6 +1567,8 @@ static struct hash_algo_template hash_algs[] = {
1553 .final = ahash_final, 1567 .final = ahash_final,
1554 .digest = hmac_sha1_digest, 1568 .digest = hmac_sha1_digest,
1555 .setkey = hmac_sha1_setkey, 1569 .setkey = hmac_sha1_setkey,
1570 .export = ahash_noexport,
1571 .import = ahash_noimport,
1556 .halg.digestsize = SHA1_DIGEST_SIZE, 1572 .halg.digestsize = SHA1_DIGEST_SIZE,
1557 .halg.statesize = sizeof(struct hash_ctx), 1573 .halg.statesize = sizeof(struct hash_ctx),
1558 .halg.base = { 1574 .halg.base = {
@@ -1577,6 +1593,8 @@ static struct hash_algo_template hash_algs[] = {
1577 .final = ahash_final, 1593 .final = ahash_final,
1578 .digest = hmac_sha256_digest, 1594 .digest = hmac_sha256_digest,
1579 .setkey = hmac_sha256_setkey, 1595 .setkey = hmac_sha256_setkey,
1596 .export = ahash_noexport,
1597 .import = ahash_noimport,
1580 .halg.digestsize = SHA256_DIGEST_SIZE, 1598 .halg.digestsize = SHA256_DIGEST_SIZE,
1581 .halg.statesize = sizeof(struct hash_ctx), 1599 .halg.statesize = sizeof(struct hash_ctx),
1582 .halg.base = { 1600 .halg.base = {
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
index 5db07495ddc5..a4324b1383a4 100644
--- a/drivers/crypto/virtio/Kconfig
+++ b/drivers/crypto/virtio/Kconfig
@@ -2,7 +2,6 @@ config CRYPTO_DEV_VIRTIO
2 tristate "VirtIO crypto driver" 2 tristate "VirtIO crypto driver"
3 depends on VIRTIO 3 depends on VIRTIO
4 select CRYPTO_AEAD 4 select CRYPTO_AEAD
5 select CRYPTO_AUTHENC
6 select CRYPTO_BLKCIPHER 5 select CRYPTO_BLKCIPHER
7 select CRYPTO_ENGINE 6 select CRYPTO_ENGINE
8 default m 7 default m
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index abe8c15450df..ba190cfa7aa1 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -29,6 +29,7 @@
29 29
30 30
31struct virtio_crypto_ablkcipher_ctx { 31struct virtio_crypto_ablkcipher_ctx {
32 struct crypto_engine_ctx enginectx;
32 struct virtio_crypto *vcrypto; 33 struct virtio_crypto *vcrypto;
33 struct crypto_tfm *tfm; 34 struct crypto_tfm *tfm;
34 35
@@ -491,7 +492,7 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
491 vc_sym_req->ablkcipher_req = req; 492 vc_sym_req->ablkcipher_req = req;
492 vc_sym_req->encrypt = true; 493 vc_sym_req->encrypt = true;
493 494
494 return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); 495 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
495} 496}
496 497
497static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 498static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
@@ -511,7 +512,7 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
511 vc_sym_req->ablkcipher_req = req; 512 vc_sym_req->ablkcipher_req = req;
512 vc_sym_req->encrypt = false; 513 vc_sym_req->encrypt = false;
513 514
514 return crypto_transfer_cipher_request_to_engine(data_vq->engine, req); 515 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
515} 516}
516 517
517static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm) 518static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
@@ -521,6 +522,9 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
521 tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request); 522 tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
522 ctx->tfm = tfm; 523 ctx->tfm = tfm;
523 524
525 ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
526 ctx->enginectx.op.prepare_request = NULL;
527 ctx->enginectx.op.unprepare_request = NULL;
524 return 0; 528 return 0;
525} 529}
526 530
@@ -538,9 +542,9 @@ static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
538} 542}
539 543
540int virtio_crypto_ablkcipher_crypt_req( 544int virtio_crypto_ablkcipher_crypt_req(
541 struct crypto_engine *engine, 545 struct crypto_engine *engine, void *vreq)
542 struct ablkcipher_request *req)
543{ 546{
547 struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
544 struct virtio_crypto_sym_request *vc_sym_req = 548 struct virtio_crypto_sym_request *vc_sym_req =
545 ablkcipher_request_ctx(req); 549 ablkcipher_request_ctx(req);
546 struct virtio_crypto_request *vc_req = &vc_sym_req->base; 550 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
@@ -561,8 +565,8 @@ static void virtio_crypto_ablkcipher_finalize_req(
561 struct ablkcipher_request *req, 565 struct ablkcipher_request *req,
562 int err) 566 int err)
563{ 567{
564 crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine, 568 crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
565 req, err); 569 req, err);
566 kzfree(vc_sym_req->iv); 570 kzfree(vc_sym_req->iv);
567 virtcrypto_clear_request(&vc_sym_req->base); 571 virtcrypto_clear_request(&vc_sym_req->base);
568} 572}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index e976539a05d9..66501a5a2b7b 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -24,7 +24,6 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <crypto/aead.h> 25#include <crypto/aead.h>
26#include <crypto/aes.h> 26#include <crypto/aes.h>
27#include <crypto/authenc.h>
28#include <crypto/engine.h> 27#include <crypto/engine.h>
29 28
30 29
@@ -107,8 +106,7 @@ struct virtio_crypto *virtcrypto_get_dev_node(int node);
107int virtcrypto_dev_start(struct virtio_crypto *vcrypto); 106int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
108void virtcrypto_dev_stop(struct virtio_crypto *vcrypto); 107void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
109int virtio_crypto_ablkcipher_crypt_req( 108int virtio_crypto_ablkcipher_crypt_req(
110 struct crypto_engine *engine, 109 struct crypto_engine *engine, void *vreq);
111 struct ablkcipher_request *req);
112 110
113void 111void
114virtcrypto_clear_request(struct virtio_crypto_request *vc_req); 112virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index ff1410a32c2b..83326986c113 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -111,9 +111,6 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
111 ret = -ENOMEM; 111 ret = -ENOMEM;
112 goto err_engine; 112 goto err_engine;
113 } 113 }
114
115 vi->data_vq[i].engine->cipher_one_request =
116 virtio_crypto_ablkcipher_crypt_req;
117 } 114 }
118 115
119 kfree(names); 116 kfree(names);
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index c94dfe8adb63..168191fa0357 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -1,8 +1,8 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2 2
3config CRYPTO_DEV_CCREE 3config CRYPTO_DEV_CCREE_OLD
4 tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators" 4 tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
5 depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA 5 depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA && BROKEN
6 default n 6 default n
7 select CRYPTO_HASH 7 select CRYPTO_HASH
8 select CRYPTO_BLKCIPHER 8 select CRYPTO_BLKCIPHER
diff --git a/drivers/staging/ccree/Makefile b/drivers/staging/ccree/Makefile
index bdc27970f95f..553db5c45354 100644
--- a/drivers/staging/ccree/Makefile
+++ b/drivers/staging/ccree/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2 2
3obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o 3obj-$(CONFIG_CRYPTO_DEV_CCREE_OLD) := ccree.o
4ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o 4ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
5ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o 5ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
6ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o 6ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
diff --git a/include/crypto/ablk_helper.h b/include/crypto/ablk_helper.h
deleted file mode 100644
index 4e655c2a4e15..000000000000
--- a/include/crypto/ablk_helper.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Shared async block cipher helpers
4 */
5
6#ifndef _CRYPTO_ABLK_HELPER_H
7#define _CRYPTO_ABLK_HELPER_H
8
9#include <linux/crypto.h>
10#include <linux/kernel.h>
11#include <crypto/cryptd.h>
12
13struct async_helper_ctx {
14 struct cryptd_ablkcipher *cryptd_tfm;
15};
16
17extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
18 unsigned int key_len);
19
20extern int __ablk_encrypt(struct ablkcipher_request *req);
21
22extern int ablk_encrypt(struct ablkcipher_request *req);
23
24extern int ablk_decrypt(struct ablkcipher_request *req);
25
26extern void ablk_exit(struct crypto_tfm *tfm);
27
28extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name);
29
30extern int ablk_init(struct crypto_tfm *tfm);
31
32#endif /* _CRYPTO_ABLK_HELPER_H */
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index e3cebf640c00..1aba888241dd 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -30,7 +30,6 @@ struct crypto_type {
30 int (*init_tfm)(struct crypto_tfm *tfm); 30 int (*init_tfm)(struct crypto_tfm *tfm);
31 void (*show)(struct seq_file *m, struct crypto_alg *alg); 31 void (*show)(struct seq_file *m, struct crypto_alg *alg);
32 int (*report)(struct sk_buff *skb, struct crypto_alg *alg); 32 int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
33 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
34 void (*free)(struct crypto_instance *inst); 33 void (*free)(struct crypto_instance *inst);
35 34
36 unsigned int type; 35 unsigned int type;
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index dd04c1699b51..1cbec29af3d6 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -17,7 +17,10 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/kthread.h> 18#include <linux/kthread.h>
19#include <crypto/algapi.h> 19#include <crypto/algapi.h>
20#include <crypto/aead.h>
21#include <crypto/akcipher.h>
20#include <crypto/hash.h> 22#include <crypto/hash.h>
23#include <crypto/skcipher.h>
21 24
22#define ENGINE_NAME_LEN 30 25#define ENGINE_NAME_LEN 30
23/* 26/*
@@ -37,12 +40,6 @@
37 * @unprepare_crypt_hardware: there are currently no more requests on the 40 * @unprepare_crypt_hardware: there are currently no more requests on the
38 * queue so the subsystem notifies the driver that it may relax the 41 * queue so the subsystem notifies the driver that it may relax the
39 * hardware by issuing this call 42 * hardware by issuing this call
40 * @prepare_cipher_request: do some prepare if need before handle the current request
41 * @unprepare_cipher_request: undo any work done by prepare_cipher_request()
42 * @cipher_one_request: do encryption for current request
43 * @prepare_hash_request: do some prepare if need before handle the current request
44 * @unprepare_hash_request: undo any work done by prepare_hash_request()
45 * @hash_one_request: do hash for current request
46 * @kworker: kthread worker struct for request pump 43 * @kworker: kthread worker struct for request pump
47 * @pump_requests: work struct for scheduling work to the request pump 44 * @pump_requests: work struct for scheduling work to the request pump
48 * @priv_data: the engine private data 45 * @priv_data: the engine private data
@@ -65,19 +62,6 @@ struct crypto_engine {
65 int (*prepare_crypt_hardware)(struct crypto_engine *engine); 62 int (*prepare_crypt_hardware)(struct crypto_engine *engine);
66 int (*unprepare_crypt_hardware)(struct crypto_engine *engine); 63 int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
67 64
68 int (*prepare_cipher_request)(struct crypto_engine *engine,
69 struct ablkcipher_request *req);
70 int (*unprepare_cipher_request)(struct crypto_engine *engine,
71 struct ablkcipher_request *req);
72 int (*prepare_hash_request)(struct crypto_engine *engine,
73 struct ahash_request *req);
74 int (*unprepare_hash_request)(struct crypto_engine *engine,
75 struct ahash_request *req);
76 int (*cipher_one_request)(struct crypto_engine *engine,
77 struct ablkcipher_request *req);
78 int (*hash_one_request)(struct crypto_engine *engine,
79 struct ahash_request *req);
80
81 struct kthread_worker *kworker; 65 struct kthread_worker *kworker;
82 struct kthread_work pump_requests; 66 struct kthread_work pump_requests;
83 67
@@ -85,19 +69,45 @@ struct crypto_engine {
85 struct crypto_async_request *cur_req; 69 struct crypto_async_request *cur_req;
86}; 70};
87 71
88int crypto_transfer_cipher_request(struct crypto_engine *engine, 72/*
89 struct ablkcipher_request *req, 73 * struct crypto_engine_op - crypto hardware engine operations
90 bool need_pump); 74 * @prepare__request: do some prepare if need before handle the current request
91int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, 75 * @unprepare_request: undo any work done by prepare_request()
92 struct ablkcipher_request *req); 76 * @do_one_request: do encryption for current request
93int crypto_transfer_hash_request(struct crypto_engine *engine, 77 */
94 struct ahash_request *req, bool need_pump); 78struct crypto_engine_op {
79 int (*prepare_request)(struct crypto_engine *engine,
80 void *areq);
81 int (*unprepare_request)(struct crypto_engine *engine,
82 void *areq);
83 int (*do_one_request)(struct crypto_engine *engine,
84 void *areq);
85};
86
87struct crypto_engine_ctx {
88 struct crypto_engine_op op;
89};
90
91int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
92 struct ablkcipher_request *req);
93int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
94 struct aead_request *req);
95int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
96 struct akcipher_request *req);
95int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, 97int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
96 struct ahash_request *req); 98 struct ahash_request *req);
97void crypto_finalize_cipher_request(struct crypto_engine *engine, 99int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
98 struct ablkcipher_request *req, int err); 100 struct skcipher_request *req);
101void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
102 struct ablkcipher_request *req, int err);
103void crypto_finalize_aead_request(struct crypto_engine *engine,
104 struct aead_request *req, int err);
105void crypto_finalize_akcipher_request(struct crypto_engine *engine,
106 struct akcipher_request *req, int err);
99void crypto_finalize_hash_request(struct crypto_engine *engine, 107void crypto_finalize_hash_request(struct crypto_engine *engine,
100 struct ahash_request *req, int err); 108 struct ahash_request *req, int err);
109void crypto_finalize_skcipher_request(struct crypto_engine *engine,
110 struct skcipher_request *req, int err);
101int crypto_engine_start(struct crypto_engine *engine); 111int crypto_engine_start(struct crypto_engine *engine);
102int crypto_engine_stop(struct crypto_engine *engine); 112int crypto_engine_stop(struct crypto_engine *engine);
103struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); 113struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 2d1849dffb80..76e432cab75d 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -74,7 +74,8 @@ struct ahash_request {
74 * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the 74 * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
75 * state of the HASH transformation at the beginning. This shall fill in 75 * state of the HASH transformation at the beginning. This shall fill in
76 * the internal structures used during the entire duration of the whole 76 * the internal structures used during the entire duration of the whole
77 * transformation. No data processing happens at this point. 77 * transformation. No data processing happens at this point. Driver code
78 * implementation must not use req->result.
78 * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This 79 * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
79 * function actually pushes blocks of data from upper layers into the 80 * function actually pushes blocks of data from upper layers into the
80 * driver, which then passes those to the hardware as seen fit. This 81 * driver, which then passes those to the hardware as seen fit. This
@@ -83,7 +84,8 @@ struct ahash_request {
83 * transformation. This function shall not modify the transformation 84 * transformation. This function shall not modify the transformation
84 * context, as this function may be called in parallel with the same 85 * context, as this function may be called in parallel with the same
85 * transformation object. Data processing can happen synchronously 86 * transformation object. Data processing can happen synchronously
86 * [SHASH] or asynchronously [AHASH] at this point. 87 * [SHASH] or asynchronously [AHASH] at this point. Driver must not use
88 * req->result.
87 * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the 89 * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
88 * transformation and retrieves the resulting hash from the driver and 90 * transformation and retrieves the resulting hash from the driver and
89 * pushes it back to upper layers. No data processing happens at this 91 * pushes it back to upper layers. No data processing happens at this
@@ -120,11 +122,12 @@ struct ahash_request {
120 * you want to save partial result of the transformation after 122 * you want to save partial result of the transformation after
121 * processing certain amount of data and reload this partial result 123 * processing certain amount of data and reload this partial result
122 * multiple times later on for multiple re-use. No data processing 124 * multiple times later on for multiple re-use. No data processing
123 * happens at this point. 125 * happens at this point. Driver must not use req->result.
124 * @import: Import partial state of the transformation. This function loads the 126 * @import: Import partial state of the transformation. This function loads the
125 * entire state of the ongoing transformation from a provided block of 127 * entire state of the ongoing transformation from a provided block of
126 * data so the transformation can continue from this point onward. No 128 * data so the transformation can continue from this point onward. No
127 * data processing happens at this point. 129 * data processing happens at this point. Driver must not use
130 * req->result.
128 * @halg: see struct hash_alg_common 131 * @halg: see struct hash_alg_common
129 */ 132 */
130struct ahash_alg { 133struct ahash_alg {
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 27040a46d50a..a0b0ad9d585e 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -126,11 +126,6 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
126int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); 126int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
127int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); 127int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
128 128
129int ahash_mcryptd_update(struct ahash_request *desc);
130int ahash_mcryptd_final(struct ahash_request *desc);
131int ahash_mcryptd_finup(struct ahash_request *desc);
132int ahash_mcryptd_digest(struct ahash_request *desc);
133
134int crypto_init_shash_ops_async(struct crypto_tfm *tfm); 129int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
135 130
136static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) 131static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index 32ceb6929885..f18344518e32 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -7,6 +7,7 @@
7#define _CRYPTO_INTERNAL_SIMD_H 7#define _CRYPTO_INTERNAL_SIMD_H
8 8
9struct simd_skcipher_alg; 9struct simd_skcipher_alg;
10struct skcipher_alg;
10 11
11struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, 12struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
12 const char *drvname, 13 const char *drvname,
@@ -15,4 +16,10 @@ struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
15 const char *basename); 16 const char *basename);
16void simd_skcipher_free(struct simd_skcipher_alg *alg); 17void simd_skcipher_free(struct simd_skcipher_alg *alg);
17 18
19int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
20 struct simd_skcipher_alg **simd_algs);
21
22void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
23 struct simd_skcipher_alg **simd_algs);
24
18#endif /* _CRYPTO_INTERNAL_SIMD_H */ 25#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/lrw.h b/include/crypto/lrw.h
deleted file mode 100644
index a9d44c06d081..000000000000
--- a/include/crypto/lrw.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _CRYPTO_LRW_H
3#define _CRYPTO_LRW_H
4
5#include <crypto/b128ops.h>
6
7struct scatterlist;
8struct gf128mul_64k;
9struct blkcipher_desc;
10
11#define LRW_BLOCK_SIZE 16
12
13struct lrw_table_ctx {
14 /* optimizes multiplying a random (non incrementing, as at the
15 * start of a new sector) value with key2, we could also have
16 * used 4k optimization tables or no optimization at all. In the
17 * latter case we would have to store key2 here */
18 struct gf128mul_64k *table;
19 /* stores:
20 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
21 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
22 * key2*{ 0,0,...1,1,1,1,1 }, etc
23 * needed for optimized multiplication of incrementing values
24 * with key2 */
25 be128 mulinc[128];
26};
27
28int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak);
29void lrw_free_table(struct lrw_table_ctx *ctx);
30
31struct lrw_crypt_req {
32 be128 *tbuf;
33 unsigned int tbuflen;
34
35 struct lrw_table_ctx *table_ctx;
36 void *crypt_ctx;
37 void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
38};
39
40int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
41 struct scatterlist *src, unsigned int nbytes,
42 struct lrw_crypt_req *req);
43
44#endif /* _CRYPTO_LRW_H */
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
new file mode 100644
index 000000000000..b64e64d20b28
--- /dev/null
+++ b/include/crypto/sm4.h
@@ -0,0 +1,28 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3/*
4 * Common values for the SM4 algorithm
5 * Copyright (C) 2018 ARM Limited or its affiliates.
6 */
7
8#ifndef _CRYPTO_SM4_H
9#define _CRYPTO_SM4_H
10
11#include <linux/types.h>
12#include <linux/crypto.h>
13
14#define SM4_KEY_SIZE 16
15#define SM4_BLOCK_SIZE 16
16#define SM4_RKEY_WORDS 32
17
18struct crypto_sm4_ctx {
19 u32 rkey_enc[SM4_RKEY_WORDS];
20 u32 rkey_dec[SM4_RKEY_WORDS];
21};
22
23int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
24 unsigned int key_len);
25int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
26 unsigned int key_len);
27
28#endif
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
new file mode 100644
index 000000000000..73cfc952d405
--- /dev/null
+++ b/include/crypto/speck.h
@@ -0,0 +1,62 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Common values for the Speck algorithm
4 */
5
6#ifndef _CRYPTO_SPECK_H
7#define _CRYPTO_SPECK_H
8
9#include <linux/types.h>
10
11/* Speck128 */
12
13#define SPECK128_BLOCK_SIZE 16
14
15#define SPECK128_128_KEY_SIZE 16
16#define SPECK128_128_NROUNDS 32
17
18#define SPECK128_192_KEY_SIZE 24
19#define SPECK128_192_NROUNDS 33
20
21#define SPECK128_256_KEY_SIZE 32
22#define SPECK128_256_NROUNDS 34
23
24struct speck128_tfm_ctx {
25 u64 round_keys[SPECK128_256_NROUNDS];
26 int nrounds;
27};
28
29void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
30 u8 *out, const u8 *in);
31
32void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
33 u8 *out, const u8 *in);
34
35int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
36 unsigned int keysize);
37
38/* Speck64 */
39
40#define SPECK64_BLOCK_SIZE 8
41
42#define SPECK64_96_KEY_SIZE 12
43#define SPECK64_96_NROUNDS 26
44
45#define SPECK64_128_KEY_SIZE 16
46#define SPECK64_128_NROUNDS 27
47
48struct speck64_tfm_ctx {
49 u32 round_keys[SPECK64_128_NROUNDS];
50 int nrounds;
51};
52
53void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
54 u8 *out, const u8 *in);
55
56void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
57 u8 *out, const u8 *in);
58
59int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
60 unsigned int keysize);
61
62#endif /* _CRYPTO_SPECK_H */
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 322aab6e78a7..34d94c95445a 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -6,27 +6,10 @@
6#include <crypto/internal/skcipher.h> 6#include <crypto/internal/skcipher.h>
7#include <linux/fips.h> 7#include <linux/fips.h>
8 8
9struct scatterlist;
10struct blkcipher_desc;
11
12#define XTS_BLOCK_SIZE 16 9#define XTS_BLOCK_SIZE 16
13 10
14struct xts_crypt_req {
15 le128 *tbuf;
16 unsigned int tbuflen;
17
18 void *tweak_ctx;
19 void (*tweak_fn)(void *ctx, u8* dst, const u8* src);
20 void *crypt_ctx;
21 void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
22};
23
24#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x)) 11#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
25 12
26int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
27 struct scatterlist *src, unsigned int nbytes,
28 struct xts_crypt_req *req);
29
30static inline int xts_check_key(struct crypto_tfm *tfm, 13static inline int xts_check_key(struct crypto_tfm *tfm,
31 const u8 *key, unsigned int keylen) 14 const u8 *key, unsigned int keylen)
32{ 15{
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 451aaa0786ae..4b13e0a3e15b 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -156,6 +156,23 @@ static inline void le64_add_cpu(__le64 *var, u64 val)
156 *var = cpu_to_le64(le64_to_cpu(*var) + val); 156 *var = cpu_to_le64(le64_to_cpu(*var) + val);
157} 157}
158 158
159/* XXX: this stuff can be optimized */
160static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
161{
162 while (words--) {
163 __le32_to_cpus(buf);
164 buf++;
165 }
166}
167
168static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
169{
170 while (words--) {
171 __cpu_to_le32s(buf);
172 buf++;
173 }
174}
175
159static inline void be16_add_cpu(__be16 *var, u16 val) 176static inline void be16_add_cpu(__be16 *var, u16 val)
160{ 177{
161 *var = cpu_to_be16(be16_to_cpu(*var) + val); 178 *var = cpu_to_be16(be16_to_cpu(*var) + val);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 7e6e84cf6383..6eb06101089f 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -435,6 +435,14 @@ struct compress_alg {
435 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 435 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
436 * counterpart to @cra_init, used to remove various changes set in 436 * counterpart to @cra_init, used to remove various changes set in
437 * @cra_init. 437 * @cra_init.
438 * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher
439 * definition. See @struct @ablkcipher_alg.
440 * @cra_u.blkcipher: Union member which contains a synchronous block cipher
441 * definition See @struct @blkcipher_alg.
442 * @cra_u.cipher: Union member which contains a single-block symmetric cipher
443 * definition. See @struct @cipher_alg.
444 * @cra_u.compress: Union member which contains a (de)compression algorithm.
445 * See @struct @compress_alg.
438 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 446 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
439 * @cra_list: internally used 447 * @cra_list: internally used
440 * @cra_users: internally used 448 * @cra_users: internally used