diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-08 23:57:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-08 23:57:08 -0400 |
commit | 4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26 (patch) | |
tree | cbb763ec5e74cfbaac6ce53df277883cb78a8a1a | |
parent | 8b68150883ca466a23e90902dd4113b22e692f04 (diff) | |
parent | f3880a23564e3172437285ebcb5b8a124539fdae (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 5.3:
API:
- Test shash interface directly in testmgr
- cra_driver_name is now mandatory
Algorithms:
- Replace arc4 crypto_cipher with library helper
- Implement 5 way interleave for ECB, CBC and CTR on arm64
- Add xxhash
- Add continuous self-test on noise source to drbg
- Update jitter RNG
Drivers:
- Add support for SHA204A random number generator
- Add support for 7211 in iproc-rng200
- Fix fuzz test failures in inside-secure
- Fix fuzz test failures in talitos
- Fix fuzz test failures in qat"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits)
crypto: stm32/hash - remove interruptible condition for dma
crypto: stm32/hash - Fix hmac issue more than 256 bytes
crypto: stm32/crc32 - rename driver file
crypto: amcc - remove memset after dma_alloc_coherent
crypto: ccp - Switch to SPDX license identifiers
crypto: ccp - Validate the the error value used to index error messages
crypto: doc - Fix formatting of new crypto engine content
crypto: doc - Add parameter documentation
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
crypto: arm64/aes-ce - add 5 way interleave routines
crypto: talitos - drop icv_ool
crypto: talitos - fix hash on SEC1.
crypto: talitos - move struct talitos_edesc into talitos.h
lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE
crypto/NX: Set receive window credits to max number of CRBs in RxFIFO
crypto: asymmetric_keys - select CRYPTO_HASH where needed
crypto: serpent - mark __serpent_setkey_sbox noinline
crypto: testmgr - dynamically allocate crypto_shash
crypto: testmgr - dynamically allocate testvec_config
crypto: talitos - eliminate unneeded 'done' functions at build time
...
168 files changed, 4527 insertions, 3798 deletions
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst index f14afaaf2f32..e923f17bc2bd 100644 --- a/Documentation/crypto/api-samples.rst +++ b/Documentation/crypto/api-samples.rst | |||
@@ -4,111 +4,89 @@ Code Examples | |||
4 | Code Example For Symmetric Key Cipher Operation | 4 | Code Example For Symmetric Key Cipher Operation |
5 | ----------------------------------------------- | 5 | ----------------------------------------------- |
6 | 6 | ||
7 | :: | 7 | This code encrypts some data with AES-256-XTS. For sake of example, |
8 | 8 | all inputs are random bytes, the encryption is done in-place, and it's | |
9 | 9 | assumed the code is running in a context where it can sleep. | |
10 | /* tie all data structures together */ | ||
11 | struct skcipher_def { | ||
12 | struct scatterlist sg; | ||
13 | struct crypto_skcipher *tfm; | ||
14 | struct skcipher_request *req; | ||
15 | struct crypto_wait wait; | ||
16 | }; | ||
17 | |||
18 | /* Perform cipher operation */ | ||
19 | static unsigned int test_skcipher_encdec(struct skcipher_def *sk, | ||
20 | int enc) | ||
21 | { | ||
22 | int rc; | ||
23 | |||
24 | if (enc) | ||
25 | rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait); | ||
26 | else | ||
27 | rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait); | ||
28 | |||
29 | if (rc) | ||
30 | pr_info("skcipher encrypt returned with result %d\n", rc); | ||
31 | 10 | ||
32 | return rc; | 11 | :: |
33 | } | ||
34 | 12 | ||
35 | /* Initialize and trigger cipher operation */ | ||
36 | static int test_skcipher(void) | 13 | static int test_skcipher(void) |
37 | { | 14 | { |
38 | struct skcipher_def sk; | 15 | struct crypto_skcipher *tfm = NULL; |
39 | struct crypto_skcipher *skcipher = NULL; | 16 | struct skcipher_request *req = NULL; |
40 | struct skcipher_request *req = NULL; | 17 | u8 *data = NULL; |
41 | char *scratchpad = NULL; | 18 | const size_t datasize = 512; /* data size in bytes */ |
42 | char *ivdata = NULL; | 19 | struct scatterlist sg; |
43 | unsigned char key[32]; | 20 | DECLARE_CRYPTO_WAIT(wait); |
44 | int ret = -EFAULT; | 21 | u8 iv[16]; /* AES-256-XTS takes a 16-byte IV */ |
45 | 22 | u8 key[64]; /* AES-256-XTS takes a 64-byte key */ | |
46 | skcipher = crypto_alloc_skcipher("cbc-aes-aesni", 0, 0); | 23 | int err; |
47 | if (IS_ERR(skcipher)) { | 24 | |
48 | pr_info("could not allocate skcipher handle\n"); | 25 | /* |
49 | return PTR_ERR(skcipher); | 26 | * Allocate a tfm (a transformation object) and set the key. |
50 | } | 27 | * |
51 | 28 | * In real-world use, a tfm and key are typically used for many | |
52 | req = skcipher_request_alloc(skcipher, GFP_KERNEL); | 29 | * encryption/decryption operations. But in this example, we'll just do a |
53 | if (!req) { | 30 | * single encryption operation with it (which is not very efficient). |
54 | pr_info("could not allocate skcipher request\n"); | 31 | */ |
55 | ret = -ENOMEM; | 32 | |
56 | goto out; | 33 | tfm = crypto_alloc_skcipher("xts(aes)", 0, 0); |
57 | } | 34 | if (IS_ERR(tfm)) { |
58 | 35 | pr_err("Error allocating xts(aes) handle: %ld\n", PTR_ERR(tfm)); | |
59 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 36 | return PTR_ERR(tfm); |
60 | crypto_req_done, | 37 | } |
61 | &sk.wait); | 38 | |
62 | 39 | get_random_bytes(key, sizeof(key)); | |
63 | /* AES 256 with random key */ | 40 | err = crypto_skcipher_setkey(tfm, key, sizeof(key)); |
64 | get_random_bytes(&key, 32); | 41 | if (err) { |
65 | if (crypto_skcipher_setkey(skcipher, key, 32)) { | 42 | pr_err("Error setting key: %d\n", err); |
66 | pr_info("key could not be set\n"); | 43 | goto out; |
67 | ret = -EAGAIN; | 44 | } |
68 | goto out; | 45 | |
69 | } | 46 | /* Allocate a request object */ |
70 | 47 | req = skcipher_request_alloc(tfm, GFP_KERNEL); | |
71 | /* IV will be random */ | 48 | if (!req) { |
72 | ivdata = kmalloc(16, GFP_KERNEL); | 49 | err = -ENOMEM; |
73 | if (!ivdata) { | 50 | goto out; |
74 | pr_info("could not allocate ivdata\n"); | 51 | } |
75 | goto out; | 52 | |
76 | } | 53 | /* Prepare the input data */ |
77 | get_random_bytes(ivdata, 16); | 54 | data = kmalloc(datasize, GFP_KERNEL); |
78 | 55 | if (!data) { | |
79 | /* Input data will be random */ | 56 | err = -ENOMEM; |
80 | scratchpad = kmalloc(16, GFP_KERNEL); | 57 | goto out; |
81 | if (!scratchpad) { | 58 | } |
82 | pr_info("could not allocate scratchpad\n"); | 59 | get_random_bytes(data, datasize); |
83 | goto out; | 60 | |
84 | } | 61 | /* Initialize the IV */ |
85 | get_random_bytes(scratchpad, 16); | 62 | get_random_bytes(iv, sizeof(iv)); |
86 | 63 | ||
87 | sk.tfm = skcipher; | 64 | /* |
88 | sk.req = req; | 65 | * Encrypt the data in-place. |
89 | 66 | * | |
90 | /* We encrypt one block */ | 67 | * For simplicity, in this example we wait for the request to complete |
91 | sg_init_one(&sk.sg, scratchpad, 16); | 68 | * before proceeding, even if the underlying implementation is asynchronous. |
92 | skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata); | 69 | * |
93 | crypto_init_wait(&sk.wait); | 70 | * To decrypt instead of encrypt, just change crypto_skcipher_encrypt() to |
94 | 71 | * crypto_skcipher_decrypt(). | |
95 | /* encrypt data */ | 72 | */ |
96 | ret = test_skcipher_encdec(&sk, 1); | 73 | sg_init_one(&sg, data, datasize); |
97 | if (ret) | 74 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | |
98 | goto out; | 75 | CRYPTO_TFM_REQ_MAY_SLEEP, |
99 | 76 | crypto_req_done, &wait); | |
100 | pr_info("Encryption triggered successfully\n"); | 77 | skcipher_request_set_crypt(req, &sg, &sg, datasize, iv); |
101 | 78 | err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); | |
79 | if (err) { | ||
80 | pr_err("Error encrypting data: %d\n", err); | ||
81 | goto out; | ||
82 | } | ||
83 | |||
84 | pr_debug("Encryption was successful\n"); | ||
102 | out: | 85 | out: |
103 | if (skcipher) | 86 | crypto_free_skcipher(tfm); |
104 | crypto_free_skcipher(skcipher); | ||
105 | if (req) | ||
106 | skcipher_request_free(req); | 87 | skcipher_request_free(req); |
107 | if (ivdata) | 88 | kfree(data); |
108 | kfree(ivdata); | 89 | return err; |
109 | if (scratchpad) | ||
110 | kfree(scratchpad); | ||
111 | return ret; | ||
112 | } | 90 | } |
113 | 91 | ||
114 | 92 | ||
diff --git a/Documentation/crypto/api-skcipher.rst b/Documentation/crypto/api-skcipher.rst index 4eec4a93f7e3..20ba08dddf2e 100644 --- a/Documentation/crypto/api-skcipher.rst +++ b/Documentation/crypto/api-skcipher.rst | |||
@@ -5,7 +5,7 @@ Block Cipher Algorithm Definitions | |||
5 | :doc: Block Cipher Algorithm Definitions | 5 | :doc: Block Cipher Algorithm Definitions |
6 | 6 | ||
7 | .. kernel-doc:: include/linux/crypto.h | 7 | .. kernel-doc:: include/linux/crypto.h |
8 | :functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg | 8 | :functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg compress_alg |
9 | 9 | ||
10 | Symmetric Key Cipher API | 10 | Symmetric Key Cipher API |
11 | ------------------------ | 11 | ------------------------ |
diff --git a/Documentation/crypto/architecture.rst b/Documentation/crypto/architecture.rst index ee8ff0762d7f..3eae1ae7f798 100644 --- a/Documentation/crypto/architecture.rst +++ b/Documentation/crypto/architecture.rst | |||
@@ -208,9 +208,7 @@ the aforementioned cipher types: | |||
208 | - CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as | 208 | - CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as |
209 | an ECDH or DH implementation | 209 | an ECDH or DH implementation |
210 | 210 | ||
211 | - CRYPTO_ALG_TYPE_DIGEST Raw message digest | 211 | - CRYPTO_ALG_TYPE_HASH Raw message digest |
212 | |||
213 | - CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST | ||
214 | 212 | ||
215 | - CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash | 213 | - CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash |
216 | 214 | ||
diff --git a/Documentation/crypto/crypto_engine.rst b/Documentation/crypto/crypto_engine.rst index 1d56221dfe35..236c674d6897 100644 --- a/Documentation/crypto/crypto_engine.rst +++ b/Documentation/crypto/crypto_engine.rst | |||
@@ -1,50 +1,85 @@ | |||
1 | ============= | 1 | .. SPDX-License-Identifier: GPL-2.0 |
2 | CRYPTO ENGINE | 2 | Crypto Engine |
3 | ============= | 3 | ============= |
4 | 4 | ||
5 | Overview | 5 | Overview |
6 | -------- | 6 | -------- |
7 | The crypto engine API (CE), is a crypto queue manager. | 7 | The crypto engine (CE) API is a crypto queue manager. |
8 | 8 | ||
9 | Requirement | 9 | Requirement |
10 | ----------- | 10 | ----------- |
11 | You have to put at start of your tfm_ctx the struct crypto_engine_ctx:: | 11 | You must put, at the start of your transform context your_tfm_ctx, the structure |
12 | crypto_engine: | ||
13 | |||
14 | :: | ||
12 | 15 | ||
13 | struct your_tfm_ctx { | 16 | struct your_tfm_ctx { |
14 | struct crypto_engine_ctx enginectx; | 17 | struct crypto_engine engine; |
15 | ... | 18 | ... |
16 | }; | 19 | }; |
17 | 20 | ||
18 | Why: Since CE manage only crypto_async_request, it cannot know the underlying | 21 | The crypto engine only manages asynchronous requests in the form of |
19 | request_type and so have access only on the TFM. | 22 | crypto_async_request. It cannot know the underlying request type and thus only |
20 | So using container_of for accessing __ctx is impossible. | 23 | has access to the transform structure. It is not possible to access the context |
21 | Furthermore, the crypto engine cannot know the "struct your_tfm_ctx", | 24 | using container_of. In addition, the engine knows nothing about your |
22 | so it must assume that crypto_engine_ctx is at start of it. | 25 | structure "``struct your_tfm_ctx``". The engine assumes (requires) the placement |
26 | of the known member ``struct crypto_engine`` at the beginning. | ||
23 | 27 | ||
24 | Order of operations | 28 | Order of operations |
25 | ------------------- | 29 | ------------------- |
26 | You have to obtain a struct crypto_engine via crypto_engine_alloc_init(). | 30 | You are required to obtain a struct crypto_engine via ``crypto_engine_alloc_init()``. |
27 | And start it via crypto_engine_start(). | 31 | Start it via ``crypto_engine_start()``. When finished with your work, shut down the |
28 | 32 | engine using ``crypto_engine_stop()`` and destroy the engine with | |
29 | Before transferring any request, you have to fill the enginectx. | 33 | ``crypto_engine_exit()``. |
30 | - prepare_request: (taking a function pointer) If you need to do some processing before doing the request | 34 | |
31 | - unprepare_request: (taking a function pointer) Undoing what's done in prepare_request | 35 | Before transferring any request, you have to fill the context enginectx by |
32 | - do_one_request: (taking a function pointer) Do encryption for current request | 36 | providing functions for the following: |
33 | 37 | ||
34 | Note: that those three functions get the crypto_async_request associated with the received request. | 38 | * ``prepare_crypt_hardware``: Called once before any prepare functions are |
35 | So your need to get the original request via container_of(areq, struct yourrequesttype_request, base); | 39 | called. |
36 | 40 | ||
37 | When your driver receive a crypto_request, you have to transfer it to | 41 | * ``unprepare_crypt_hardware``: Called once after all unprepare functions have |
38 | the cryptoengine via one of: | 42 | been called. |
39 | - crypto_transfer_ablkcipher_request_to_engine() | 43 | |
40 | - crypto_transfer_aead_request_to_engine() | 44 | * ``prepare_cipher_request``/``prepare_hash_request``: Called before each |
41 | - crypto_transfer_akcipher_request_to_engine() | 45 | corresponding request is performed. If some processing or other preparatory |
42 | - crypto_transfer_hash_request_to_engine() | 46 | work is required, do it here. |
43 | - crypto_transfer_skcipher_request_to_engine() | 47 | |
44 | 48 | * ``unprepare_cipher_request``/``unprepare_hash_request``: Called after each | |
45 | At the end of the request process, a call to one of the following function is needed: | 49 | request is handled. Clean up / undo what was done in the prepare function. |
46 | - crypto_finalize_ablkcipher_request | 50 | |
47 | - crypto_finalize_aead_request | 51 | * ``cipher_one_request``/``hash_one_request``: Handle the current request by |
48 | - crypto_finalize_akcipher_request | 52 | performing the operation. |
49 | - crypto_finalize_hash_request | 53 | |
50 | - crypto_finalize_skcipher_request | 54 | Note that these functions access the crypto_async_request structure |
55 | associated with the received request. You are able to retrieve the original | ||
56 | request by using: | ||
57 | |||
58 | :: | ||
59 | |||
60 | container_of(areq, struct yourrequesttype_request, base); | ||
61 | |||
62 | When your driver receives a crypto_request, you must to transfer it to | ||
63 | the crypto engine via one of: | ||
64 | |||
65 | * crypto_transfer_ablkcipher_request_to_engine() | ||
66 | |||
67 | * crypto_transfer_aead_request_to_engine() | ||
68 | |||
69 | * crypto_transfer_akcipher_request_to_engine() | ||
70 | |||
71 | * crypto_transfer_hash_request_to_engine() | ||
72 | |||
73 | * crypto_transfer_skcipher_request_to_engine() | ||
74 | |||
75 | At the end of the request process, a call to one of the following functions is needed: | ||
76 | |||
77 | * crypto_finalize_ablkcipher_request() | ||
78 | |||
79 | * crypto_finalize_aead_request() | ||
80 | |||
81 | * crypto_finalize_akcipher_request() | ||
82 | |||
83 | * crypto_finalize_hash_request() | ||
84 | |||
85 | * crypto_finalize_skcipher_request() | ||
diff --git a/Documentation/devicetree/bindings/crypto/atmel-crypto.txt b/Documentation/devicetree/bindings/crypto/atmel-crypto.txt index 6b458bb2440d..f2aab3dc2b52 100644 --- a/Documentation/devicetree/bindings/crypto/atmel-crypto.txt +++ b/Documentation/devicetree/bindings/crypto/atmel-crypto.txt | |||
@@ -66,16 +66,3 @@ sha@f8034000 { | |||
66 | dmas = <&dma1 2 17>; | 66 | dmas = <&dma1 2 17>; |
67 | dma-names = "tx"; | 67 | dma-names = "tx"; |
68 | }; | 68 | }; |
69 | |||
70 | * Eliptic Curve Cryptography (I2C) | ||
71 | |||
72 | Required properties: | ||
73 | - compatible : must be "atmel,atecc508a". | ||
74 | - reg: I2C bus address of the device. | ||
75 | - clock-frequency: must be present in the i2c controller node. | ||
76 | |||
77 | Example: | ||
78 | atecc508a@c0 { | ||
79 | compatible = "atmel,atecc508a"; | ||
80 | reg = <0xC0>; | ||
81 | }; | ||
diff --git a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt index 0014da9145af..c223e54452da 100644 --- a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt +++ b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt | |||
@@ -2,6 +2,7 @@ HWRNG support for the iproc-rng200 driver | |||
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible : Must be one of: | 4 | - compatible : Must be one of: |
5 | "brcm,bcm7211-rng200" | ||
5 | "brcm,bcm7278-rng200" | 6 | "brcm,bcm7278-rng200" |
6 | "brcm,iproc-rng200" | 7 | "brcm,iproc-rng200" |
7 | - reg : base address and size of control register block | 8 | - reg : base address and size of control register block |
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml index 747fd3f689dc..2e742d399e87 100644 --- a/Documentation/devicetree/bindings/trivial-devices.yaml +++ b/Documentation/devicetree/bindings/trivial-devices.yaml | |||
@@ -52,6 +52,10 @@ properties: | |||
52 | - at,24c08 | 52 | - at,24c08 |
53 | # i2c trusted platform module (TPM) | 53 | # i2c trusted platform module (TPM) |
54 | - atmel,at97sc3204t | 54 | - atmel,at97sc3204t |
55 | # i2c h/w symmetric crypto module | ||
56 | - atmel,atsha204a | ||
57 | # i2c h/w elliptic curve crypto module | ||
58 | - atmel,atecc508a | ||
55 | # CM32181: Ambient Light Sensor | 59 | # CM32181: Ambient Light Sensor |
56 | - capella,cm32181 | 60 | - capella,cm32181 |
57 | # CM3232: Ambient Light Sensor | 61 | # CM3232: Ambient Light Sensor |
diff --git a/MAINTAINERS b/MAINTAINERS index a2b97d38a704..4a9e8e5b2432 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4257,6 +4257,7 @@ F: crypto/ | |||
4257 | F: drivers/crypto/ | 4257 | F: drivers/crypto/ |
4258 | F: include/crypto/ | 4258 | F: include/crypto/ |
4259 | F: include/linux/crypto* | 4259 | F: include/linux/crypto* |
4260 | F: lib/crypto/ | ||
4260 | 4261 | ||
4261 | CRYPTOGRAPHIC RANDOM NUMBER GENERATOR | 4262 | CRYPTOGRAPHIC RANDOM NUMBER GENERATOR |
4262 | M: Neil Horman <nhorman@tuxdriver.com> | 4263 | M: Neil Horman <nhorman@tuxdriver.com> |
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index d6b711011cba..e20483714be5 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi | |||
@@ -100,6 +100,29 @@ | |||
100 | reg = <0x40000000 0x800000>; | 100 | reg = <0x40000000 0x800000>; |
101 | ranges; | 101 | ranges; |
102 | 102 | ||
103 | crypto: crypto@40240000 { | ||
104 | compatible = "fsl,sec-v4.0"; | ||
105 | #address-cells = <1>; | ||
106 | #size-cells = <1>; | ||
107 | reg = <0x40240000 0x10000>; | ||
108 | ranges = <0 0x40240000 0x10000>; | ||
109 | clocks = <&pcc2 IMX7ULP_CLK_CAAM>, | ||
110 | <&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>; | ||
111 | clock-names = "aclk", "ipg"; | ||
112 | |||
113 | sec_jr0: jr0@1000 { | ||
114 | compatible = "fsl,sec-v4.0-job-ring"; | ||
115 | reg = <0x1000 0x1000>; | ||
116 | interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; | ||
117 | }; | ||
118 | |||
119 | sec_jr1: jr1@2000 { | ||
120 | compatible = "fsl,sec-v4.0-job-ring"; | ||
121 | reg = <0x2000 0x1000>; | ||
122 | interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>; | ||
123 | }; | ||
124 | }; | ||
125 | |||
103 | lpuart4: serial@402d0000 { | 126 | lpuart4: serial@402d0000 { |
104 | compatible = "fsl,imx7ulp-lpuart"; | 127 | compatible = "fsl,imx7ulp-lpuart"; |
105 | reg = <0x402d0000 0x1000>; | 128 | reg = <0x402d0000 0x1000>; |
diff --git a/arch/arm/crypto/chacha-neon-glue.c b/arch/arm/crypto/chacha-neon-glue.c index 48a89537b828..a8e9b534c8da 100644 --- a/arch/arm/crypto/chacha-neon-glue.c +++ b/arch/arm/crypto/chacha-neon-glue.c | |||
@@ -63,7 +63,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, | |||
63 | } | 63 | } |
64 | 64 | ||
65 | static int chacha_neon_stream_xor(struct skcipher_request *req, | 65 | static int chacha_neon_stream_xor(struct skcipher_request *req, |
66 | struct chacha_ctx *ctx, u8 *iv) | 66 | const struct chacha_ctx *ctx, const u8 *iv) |
67 | { | 67 | { |
68 | struct skcipher_walk walk; | 68 | struct skcipher_walk walk; |
69 | u32 state[16]; | 69 | u32 state[16]; |
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c index 232eeab1ec37..8775aa42bbbe 100644 --- a/arch/arm/crypto/sha512-glue.c +++ b/arch/arm/crypto/sha512-glue.c | |||
@@ -34,7 +34,7 @@ int sha512_arm_update(struct shash_desc *desc, const u8 *data, | |||
34 | (sha512_block_fn *)sha512_block_data_order); | 34 | (sha512_block_fn *)sha512_block_data_order); |
35 | } | 35 | } |
36 | 36 | ||
37 | int sha512_arm_final(struct shash_desc *desc, u8 *out) | 37 | static int sha512_arm_final(struct shash_desc *desc, u8 *out) |
38 | { | 38 | { |
39 | sha512_base_do_finalize(desc, | 39 | sha512_base_do_finalize(desc, |
40 | (sha512_block_fn *)sha512_block_data_order); | 40 | (sha512_block_fn *)sha512_block_data_order); |
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S index 3ebfaec97e27..00bd2885feaa 100644 --- a/arch/arm64/crypto/aes-ce.S +++ b/arch/arm64/crypto/aes-ce.S | |||
@@ -15,6 +15,8 @@ | |||
15 | .arch armv8-a+crypto | 15 | .arch armv8-a+crypto |
16 | 16 | ||
17 | xtsmask .req v16 | 17 | xtsmask .req v16 |
18 | cbciv .req v16 | ||
19 | vctr .req v16 | ||
18 | 20 | ||
19 | .macro xts_reload_mask, tmp | 21 | .macro xts_reload_mask, tmp |
20 | .endm | 22 | .endm |
@@ -49,7 +51,7 @@ | |||
49 | load_round_keys \rounds, \temp | 51 | load_round_keys \rounds, \temp |
50 | .endm | 52 | .endm |
51 | 53 | ||
52 | .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3 | 54 | .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3, i4 |
53 | aes\de \i0\().16b, \k\().16b | 55 | aes\de \i0\().16b, \k\().16b |
54 | aes\mc \i0\().16b, \i0\().16b | 56 | aes\mc \i0\().16b, \i0\().16b |
55 | .ifnb \i1 | 57 | .ifnb \i1 |
@@ -60,27 +62,34 @@ | |||
60 | aes\mc \i2\().16b, \i2\().16b | 62 | aes\mc \i2\().16b, \i2\().16b |
61 | aes\de \i3\().16b, \k\().16b | 63 | aes\de \i3\().16b, \k\().16b |
62 | aes\mc \i3\().16b, \i3\().16b | 64 | aes\mc \i3\().16b, \i3\().16b |
65 | .ifnb \i4 | ||
66 | aes\de \i4\().16b, \k\().16b | ||
67 | aes\mc \i4\().16b, \i4\().16b | ||
68 | .endif | ||
63 | .endif | 69 | .endif |
64 | .endif | 70 | .endif |
65 | .endm | 71 | .endm |
66 | 72 | ||
67 | /* up to 4 interleaved encryption rounds with the same round key */ | 73 | /* up to 5 interleaved encryption rounds with the same round key */ |
68 | .macro round_Nx, enc, k, i0, i1, i2, i3 | 74 | .macro round_Nx, enc, k, i0, i1, i2, i3, i4 |
69 | .ifc \enc, e | 75 | .ifc \enc, e |
70 | do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3 | 76 | do_enc_Nx e, mc, \k, \i0, \i1, \i2, \i3, \i4 |
71 | .else | 77 | .else |
72 | do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3 | 78 | do_enc_Nx d, imc, \k, \i0, \i1, \i2, \i3, \i4 |
73 | .endif | 79 | .endif |
74 | .endm | 80 | .endm |
75 | 81 | ||
76 | /* up to 4 interleaved final rounds */ | 82 | /* up to 5 interleaved final rounds */ |
77 | .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3 | 83 | .macro fin_round_Nx, de, k, k2, i0, i1, i2, i3, i4 |
78 | aes\de \i0\().16b, \k\().16b | 84 | aes\de \i0\().16b, \k\().16b |
79 | .ifnb \i1 | 85 | .ifnb \i1 |
80 | aes\de \i1\().16b, \k\().16b | 86 | aes\de \i1\().16b, \k\().16b |
81 | .ifnb \i3 | 87 | .ifnb \i3 |
82 | aes\de \i2\().16b, \k\().16b | 88 | aes\de \i2\().16b, \k\().16b |
83 | aes\de \i3\().16b, \k\().16b | 89 | aes\de \i3\().16b, \k\().16b |
90 | .ifnb \i4 | ||
91 | aes\de \i4\().16b, \k\().16b | ||
92 | .endif | ||
84 | .endif | 93 | .endif |
85 | .endif | 94 | .endif |
86 | eor \i0\().16b, \i0\().16b, \k2\().16b | 95 | eor \i0\().16b, \i0\().16b, \k2\().16b |
@@ -89,47 +98,52 @@ | |||
89 | .ifnb \i3 | 98 | .ifnb \i3 |
90 | eor \i2\().16b, \i2\().16b, \k2\().16b | 99 | eor \i2\().16b, \i2\().16b, \k2\().16b |
91 | eor \i3\().16b, \i3\().16b, \k2\().16b | 100 | eor \i3\().16b, \i3\().16b, \k2\().16b |
101 | .ifnb \i4 | ||
102 | eor \i4\().16b, \i4\().16b, \k2\().16b | ||
103 | .endif | ||
92 | .endif | 104 | .endif |
93 | .endif | 105 | .endif |
94 | .endm | 106 | .endm |
95 | 107 | ||
96 | /* up to 4 interleaved blocks */ | 108 | /* up to 5 interleaved blocks */ |
97 | .macro do_block_Nx, enc, rounds, i0, i1, i2, i3 | 109 | .macro do_block_Nx, enc, rounds, i0, i1, i2, i3, i4 |
98 | cmp \rounds, #12 | 110 | cmp \rounds, #12 |
99 | blo 2222f /* 128 bits */ | 111 | blo 2222f /* 128 bits */ |
100 | beq 1111f /* 192 bits */ | 112 | beq 1111f /* 192 bits */ |
101 | round_Nx \enc, v17, \i0, \i1, \i2, \i3 | 113 | round_Nx \enc, v17, \i0, \i1, \i2, \i3, \i4 |
102 | round_Nx \enc, v18, \i0, \i1, \i2, \i3 | 114 | round_Nx \enc, v18, \i0, \i1, \i2, \i3, \i4 |
103 | 1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3 | 115 | 1111: round_Nx \enc, v19, \i0, \i1, \i2, \i3, \i4 |
104 | round_Nx \enc, v20, \i0, \i1, \i2, \i3 | 116 | round_Nx \enc, v20, \i0, \i1, \i2, \i3, \i4 |
105 | 2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29 | 117 | 2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29 |
106 | round_Nx \enc, \key, \i0, \i1, \i2, \i3 | 118 | round_Nx \enc, \key, \i0, \i1, \i2, \i3, \i4 |
107 | .endr | 119 | .endr |
108 | fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3 | 120 | fin_round_Nx \enc, v30, v31, \i0, \i1, \i2, \i3, \i4 |
109 | .endm | 121 | .endm |
110 | 122 | ||
111 | .macro encrypt_block, in, rounds, t0, t1, t2 | 123 | .macro encrypt_block, in, rounds, t0, t1, t2 |
112 | do_block_Nx e, \rounds, \in | 124 | do_block_Nx e, \rounds, \in |
113 | .endm | 125 | .endm |
114 | 126 | ||
115 | .macro encrypt_block2x, i0, i1, rounds, t0, t1, t2 | ||
116 | do_block_Nx e, \rounds, \i0, \i1 | ||
117 | .endm | ||
118 | |||
119 | .macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 | 127 | .macro encrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 |
120 | do_block_Nx e, \rounds, \i0, \i1, \i2, \i3 | 128 | do_block_Nx e, \rounds, \i0, \i1, \i2, \i3 |
121 | .endm | 129 | .endm |
122 | 130 | ||
123 | .macro decrypt_block, in, rounds, t0, t1, t2 | 131 | .macro encrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2 |
124 | do_block_Nx d, \rounds, \in | 132 | do_block_Nx e, \rounds, \i0, \i1, \i2, \i3, \i4 |
125 | .endm | 133 | .endm |
126 | 134 | ||
127 | .macro decrypt_block2x, i0, i1, rounds, t0, t1, t2 | 135 | .macro decrypt_block, in, rounds, t0, t1, t2 |
128 | do_block_Nx d, \rounds, \i0, \i1 | 136 | do_block_Nx d, \rounds, \in |
129 | .endm | 137 | .endm |
130 | 138 | ||
131 | .macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 | 139 | .macro decrypt_block4x, i0, i1, i2, i3, rounds, t0, t1, t2 |
132 | do_block_Nx d, \rounds, \i0, \i1, \i2, \i3 | 140 | do_block_Nx d, \rounds, \i0, \i1, \i2, \i3 |
133 | .endm | 141 | .endm |
134 | 142 | ||
143 | .macro decrypt_block5x, i0, i1, i2, i3, i4, rounds, t0, t1, t2 | ||
144 | do_block_Nx d, \rounds, \i0, \i1, \i2, \i3, \i4 | ||
145 | .endm | ||
146 | |||
147 | #define MAX_STRIDE 5 | ||
148 | |||
135 | #include "aes-modes.S" | 149 | #include "aes-modes.S" |
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index 2883def14be5..324039b72094 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S | |||
@@ -10,6 +10,18 @@ | |||
10 | .text | 10 | .text |
11 | .align 4 | 11 | .align 4 |
12 | 12 | ||
13 | #ifndef MAX_STRIDE | ||
14 | #define MAX_STRIDE 4 | ||
15 | #endif | ||
16 | |||
17 | #if MAX_STRIDE == 4 | ||
18 | #define ST4(x...) x | ||
19 | #define ST5(x...) | ||
20 | #else | ||
21 | #define ST4(x...) | ||
22 | #define ST5(x...) x | ||
23 | #endif | ||
24 | |||
13 | aes_encrypt_block4x: | 25 | aes_encrypt_block4x: |
14 | encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 | 26 | encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 |
15 | ret | 27 | ret |
@@ -20,6 +32,18 @@ aes_decrypt_block4x: | |||
20 | ret | 32 | ret |
21 | ENDPROC(aes_decrypt_block4x) | 33 | ENDPROC(aes_decrypt_block4x) |
22 | 34 | ||
35 | #if MAX_STRIDE == 5 | ||
36 | aes_encrypt_block5x: | ||
37 | encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 | ||
38 | ret | ||
39 | ENDPROC(aes_encrypt_block5x) | ||
40 | |||
41 | aes_decrypt_block5x: | ||
42 | decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 | ||
43 | ret | ||
44 | ENDPROC(aes_decrypt_block5x) | ||
45 | #endif | ||
46 | |||
23 | /* | 47 | /* |
24 | * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, | 48 | * aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, |
25 | * int blocks) | 49 | * int blocks) |
@@ -34,14 +58,17 @@ AES_ENTRY(aes_ecb_encrypt) | |||
34 | enc_prepare w3, x2, x5 | 58 | enc_prepare w3, x2, x5 |
35 | 59 | ||
36 | .LecbencloopNx: | 60 | .LecbencloopNx: |
37 | subs w4, w4, #4 | 61 | subs w4, w4, #MAX_STRIDE |
38 | bmi .Lecbenc1x | 62 | bmi .Lecbenc1x |
39 | ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ | 63 | ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ |
40 | bl aes_encrypt_block4x | 64 | ST4( bl aes_encrypt_block4x ) |
65 | ST5( ld1 {v4.16b}, [x1], #16 ) | ||
66 | ST5( bl aes_encrypt_block5x ) | ||
41 | st1 {v0.16b-v3.16b}, [x0], #64 | 67 | st1 {v0.16b-v3.16b}, [x0], #64 |
68 | ST5( st1 {v4.16b}, [x0], #16 ) | ||
42 | b .LecbencloopNx | 69 | b .LecbencloopNx |
43 | .Lecbenc1x: | 70 | .Lecbenc1x: |
44 | adds w4, w4, #4 | 71 | adds w4, w4, #MAX_STRIDE |
45 | beq .Lecbencout | 72 | beq .Lecbencout |
46 | .Lecbencloop: | 73 | .Lecbencloop: |
47 | ld1 {v0.16b}, [x1], #16 /* get next pt block */ | 74 | ld1 {v0.16b}, [x1], #16 /* get next pt block */ |
@@ -62,14 +89,17 @@ AES_ENTRY(aes_ecb_decrypt) | |||
62 | dec_prepare w3, x2, x5 | 89 | dec_prepare w3, x2, x5 |
63 | 90 | ||
64 | .LecbdecloopNx: | 91 | .LecbdecloopNx: |
65 | subs w4, w4, #4 | 92 | subs w4, w4, #MAX_STRIDE |
66 | bmi .Lecbdec1x | 93 | bmi .Lecbdec1x |
67 | ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ | 94 | ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ |
68 | bl aes_decrypt_block4x | 95 | ST4( bl aes_decrypt_block4x ) |
96 | ST5( ld1 {v4.16b}, [x1], #16 ) | ||
97 | ST5( bl aes_decrypt_block5x ) | ||
69 | st1 {v0.16b-v3.16b}, [x0], #64 | 98 | st1 {v0.16b-v3.16b}, [x0], #64 |
99 | ST5( st1 {v4.16b}, [x0], #16 ) | ||
70 | b .LecbdecloopNx | 100 | b .LecbdecloopNx |
71 | .Lecbdec1x: | 101 | .Lecbdec1x: |
72 | adds w4, w4, #4 | 102 | adds w4, w4, #MAX_STRIDE |
73 | beq .Lecbdecout | 103 | beq .Lecbdecout |
74 | .Lecbdecloop: | 104 | .Lecbdecloop: |
75 | ld1 {v0.16b}, [x1], #16 /* get next ct block */ | 105 | ld1 {v0.16b}, [x1], #16 /* get next ct block */ |
@@ -129,39 +159,56 @@ AES_ENTRY(aes_cbc_decrypt) | |||
129 | stp x29, x30, [sp, #-16]! | 159 | stp x29, x30, [sp, #-16]! |
130 | mov x29, sp | 160 | mov x29, sp |
131 | 161 | ||
132 | ld1 {v7.16b}, [x5] /* get iv */ | 162 | ld1 {cbciv.16b}, [x5] /* get iv */ |
133 | dec_prepare w3, x2, x6 | 163 | dec_prepare w3, x2, x6 |
134 | 164 | ||
135 | .LcbcdecloopNx: | 165 | .LcbcdecloopNx: |
136 | subs w4, w4, #4 | 166 | subs w4, w4, #MAX_STRIDE |
137 | bmi .Lcbcdec1x | 167 | bmi .Lcbcdec1x |
138 | ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ | 168 | ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ |
169 | #if MAX_STRIDE == 5 | ||
170 | ld1 {v4.16b}, [x1], #16 /* get 1 ct block */ | ||
171 | mov v5.16b, v0.16b | ||
172 | mov v6.16b, v1.16b | ||
173 | mov v7.16b, v2.16b | ||
174 | bl aes_decrypt_block5x | ||
175 | sub x1, x1, #32 | ||
176 | eor v0.16b, v0.16b, cbciv.16b | ||
177 | eor v1.16b, v1.16b, v5.16b | ||
178 | ld1 {v5.16b}, [x1], #16 /* reload 1 ct block */ | ||
179 | ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */ | ||
180 | eor v2.16b, v2.16b, v6.16b | ||
181 | eor v3.16b, v3.16b, v7.16b | ||
182 | eor v4.16b, v4.16b, v5.16b | ||
183 | #else | ||
139 | mov v4.16b, v0.16b | 184 | mov v4.16b, v0.16b |
140 | mov v5.16b, v1.16b | 185 | mov v5.16b, v1.16b |
141 | mov v6.16b, v2.16b | 186 | mov v6.16b, v2.16b |
142 | bl aes_decrypt_block4x | 187 | bl aes_decrypt_block4x |
143 | sub x1, x1, #16 | 188 | sub x1, x1, #16 |
144 | eor v0.16b, v0.16b, v7.16b | 189 | eor v0.16b, v0.16b, cbciv.16b |
145 | eor v1.16b, v1.16b, v4.16b | 190 | eor v1.16b, v1.16b, v4.16b |
146 | ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */ | 191 | ld1 {cbciv.16b}, [x1], #16 /* reload 1 ct block */ |
147 | eor v2.16b, v2.16b, v5.16b | 192 | eor v2.16b, v2.16b, v5.16b |
148 | eor v3.16b, v3.16b, v6.16b | 193 | eor v3.16b, v3.16b, v6.16b |
194 | #endif | ||
149 | st1 {v0.16b-v3.16b}, [x0], #64 | 195 | st1 {v0.16b-v3.16b}, [x0], #64 |
196 | ST5( st1 {v4.16b}, [x0], #16 ) | ||
150 | b .LcbcdecloopNx | 197 | b .LcbcdecloopNx |
151 | .Lcbcdec1x: | 198 | .Lcbcdec1x: |
152 | adds w4, w4, #4 | 199 | adds w4, w4, #MAX_STRIDE |
153 | beq .Lcbcdecout | 200 | beq .Lcbcdecout |
154 | .Lcbcdecloop: | 201 | .Lcbcdecloop: |
155 | ld1 {v1.16b}, [x1], #16 /* get next ct block */ | 202 | ld1 {v1.16b}, [x1], #16 /* get next ct block */ |
156 | mov v0.16b, v1.16b /* ...and copy to v0 */ | 203 | mov v0.16b, v1.16b /* ...and copy to v0 */ |
157 | decrypt_block v0, w3, x2, x6, w7 | 204 | decrypt_block v0, w3, x2, x6, w7 |
158 | eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ | 205 | eor v0.16b, v0.16b, cbciv.16b /* xor with iv => pt */ |
159 | mov v7.16b, v1.16b /* ct is next iv */ | 206 | mov cbciv.16b, v1.16b /* ct is next iv */ |
160 | st1 {v0.16b}, [x0], #16 | 207 | st1 {v0.16b}, [x0], #16 |
161 | subs w4, w4, #1 | 208 | subs w4, w4, #1 |
162 | bne .Lcbcdecloop | 209 | bne .Lcbcdecloop |
163 | .Lcbcdecout: | 210 | .Lcbcdecout: |
164 | st1 {v7.16b}, [x5] /* return iv */ | 211 | st1 {cbciv.16b}, [x5] /* return iv */ |
165 | ldp x29, x30, [sp], #16 | 212 | ldp x29, x30, [sp], #16 |
166 | ret | 213 | ret |
167 | AES_ENDPROC(aes_cbc_decrypt) | 214 | AES_ENDPROC(aes_cbc_decrypt) |
@@ -255,51 +302,60 @@ AES_ENTRY(aes_ctr_encrypt) | |||
255 | mov x29, sp | 302 | mov x29, sp |
256 | 303 | ||
257 | enc_prepare w3, x2, x6 | 304 | enc_prepare w3, x2, x6 |
258 | ld1 {v4.16b}, [x5] | 305 | ld1 {vctr.16b}, [x5] |
259 | 306 | ||
260 | umov x6, v4.d[1] /* keep swabbed ctr in reg */ | 307 | umov x6, vctr.d[1] /* keep swabbed ctr in reg */ |
261 | rev x6, x6 | 308 | rev x6, x6 |
262 | cmn w6, w4 /* 32 bit overflow? */ | 309 | cmn w6, w4 /* 32 bit overflow? */ |
263 | bcs .Lctrloop | 310 | bcs .Lctrloop |
264 | .LctrloopNx: | 311 | .LctrloopNx: |
265 | subs w4, w4, #4 | 312 | subs w4, w4, #MAX_STRIDE |
266 | bmi .Lctr1x | 313 | bmi .Lctr1x |
267 | add w7, w6, #1 | 314 | add w7, w6, #1 |
268 | mov v0.16b, v4.16b | 315 | mov v0.16b, vctr.16b |
269 | add w8, w6, #2 | 316 | add w8, w6, #2 |
270 | mov v1.16b, v4.16b | 317 | mov v1.16b, vctr.16b |
318 | add w9, w6, #3 | ||
319 | mov v2.16b, vctr.16b | ||
271 | add w9, w6, #3 | 320 | add w9, w6, #3 |
272 | mov v2.16b, v4.16b | ||
273 | rev w7, w7 | 321 | rev w7, w7 |
274 | mov v3.16b, v4.16b | 322 | mov v3.16b, vctr.16b |
275 | rev w8, w8 | 323 | rev w8, w8 |
324 | ST5( mov v4.16b, vctr.16b ) | ||
276 | mov v1.s[3], w7 | 325 | mov v1.s[3], w7 |
277 | rev w9, w9 | 326 | rev w9, w9 |
327 | ST5( add w10, w6, #4 ) | ||
278 | mov v2.s[3], w8 | 328 | mov v2.s[3], w8 |
329 | ST5( rev w10, w10 ) | ||
279 | mov v3.s[3], w9 | 330 | mov v3.s[3], w9 |
331 | ST5( mov v4.s[3], w10 ) | ||
280 | ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */ | 332 | ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */ |
281 | bl aes_encrypt_block4x | 333 | ST4( bl aes_encrypt_block4x ) |
334 | ST5( bl aes_encrypt_block5x ) | ||
282 | eor v0.16b, v5.16b, v0.16b | 335 | eor v0.16b, v5.16b, v0.16b |
283 | ld1 {v5.16b}, [x1], #16 /* get 1 input block */ | 336 | ST4( ld1 {v5.16b}, [x1], #16 ) |
284 | eor v1.16b, v6.16b, v1.16b | 337 | eor v1.16b, v6.16b, v1.16b |
338 | ST5( ld1 {v5.16b-v6.16b}, [x1], #32 ) | ||
285 | eor v2.16b, v7.16b, v2.16b | 339 | eor v2.16b, v7.16b, v2.16b |
286 | eor v3.16b, v5.16b, v3.16b | 340 | eor v3.16b, v5.16b, v3.16b |
341 | ST5( eor v4.16b, v6.16b, v4.16b ) | ||
287 | st1 {v0.16b-v3.16b}, [x0], #64 | 342 | st1 {v0.16b-v3.16b}, [x0], #64 |
288 | add x6, x6, #4 | 343 | ST5( st1 {v4.16b}, [x0], #16 ) |
344 | add x6, x6, #MAX_STRIDE | ||
289 | rev x7, x6 | 345 | rev x7, x6 |
290 | ins v4.d[1], x7 | 346 | ins vctr.d[1], x7 |
291 | cbz w4, .Lctrout | 347 | cbz w4, .Lctrout |
292 | b .LctrloopNx | 348 | b .LctrloopNx |
293 | .Lctr1x: | 349 | .Lctr1x: |
294 | adds w4, w4, #4 | 350 | adds w4, w4, #MAX_STRIDE |
295 | beq .Lctrout | 351 | beq .Lctrout |
296 | .Lctrloop: | 352 | .Lctrloop: |
297 | mov v0.16b, v4.16b | 353 | mov v0.16b, vctr.16b |
298 | encrypt_block v0, w3, x2, x8, w7 | 354 | encrypt_block v0, w3, x2, x8, w7 |
299 | 355 | ||
300 | adds x6, x6, #1 /* increment BE ctr */ | 356 | adds x6, x6, #1 /* increment BE ctr */ |
301 | rev x7, x6 | 357 | rev x7, x6 |
302 | ins v4.d[1], x7 | 358 | ins vctr.d[1], x7 |
303 | bcs .Lctrcarry /* overflow? */ | 359 | bcs .Lctrcarry /* overflow? */ |
304 | 360 | ||
305 | .Lctrcarrydone: | 361 | .Lctrcarrydone: |
@@ -311,7 +367,7 @@ AES_ENTRY(aes_ctr_encrypt) | |||
311 | bne .Lctrloop | 367 | bne .Lctrloop |
312 | 368 | ||
313 | .Lctrout: | 369 | .Lctrout: |
314 | st1 {v4.16b}, [x5] /* return next CTR value */ | 370 | st1 {vctr.16b}, [x5] /* return next CTR value */ |
315 | ldp x29, x30, [sp], #16 | 371 | ldp x29, x30, [sp], #16 |
316 | ret | 372 | ret |
317 | 373 | ||
@@ -320,11 +376,11 @@ AES_ENTRY(aes_ctr_encrypt) | |||
320 | b .Lctrout | 376 | b .Lctrout |
321 | 377 | ||
322 | .Lctrcarry: | 378 | .Lctrcarry: |
323 | umov x7, v4.d[0] /* load upper word of ctr */ | 379 | umov x7, vctr.d[0] /* load upper word of ctr */ |
324 | rev x7, x7 /* ... to handle the carry */ | 380 | rev x7, x7 /* ... to handle the carry */ |
325 | add x7, x7, #1 | 381 | add x7, x7, #1 |
326 | rev x7, x7 | 382 | rev x7, x7 |
327 | ins v4.d[0], x7 | 383 | ins vctr.d[0], x7 |
328 | b .Lctrcarrydone | 384 | b .Lctrcarrydone |
329 | AES_ENDPROC(aes_ctr_encrypt) | 385 | AES_ENDPROC(aes_ctr_encrypt) |
330 | 386 | ||
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S index d261331747f2..2bebccc73869 100644 --- a/arch/arm64/crypto/aes-neon.S +++ b/arch/arm64/crypto/aes-neon.S | |||
@@ -12,6 +12,8 @@ | |||
12 | #define AES_ENDPROC(func) ENDPROC(neon_ ## func) | 12 | #define AES_ENDPROC(func) ENDPROC(neon_ ## func) |
13 | 13 | ||
14 | xtsmask .req v7 | 14 | xtsmask .req v7 |
15 | cbciv .req v7 | ||
16 | vctr .req v4 | ||
15 | 17 | ||
16 | .macro xts_reload_mask, tmp | 18 | .macro xts_reload_mask, tmp |
17 | xts_load_mask \tmp | 19 | xts_load_mask \tmp |
@@ -114,26 +116,9 @@ | |||
114 | 116 | ||
115 | /* | 117 | /* |
116 | * Interleaved versions: functionally equivalent to the | 118 | * Interleaved versions: functionally equivalent to the |
117 | * ones above, but applied to 2 or 4 AES states in parallel. | 119 | * ones above, but applied to AES states in parallel. |
118 | */ | 120 | */ |
119 | 121 | ||
120 | .macro sub_bytes_2x, in0, in1 | ||
121 | sub v8.16b, \in0\().16b, v15.16b | ||
122 | tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b | ||
123 | sub v9.16b, \in1\().16b, v15.16b | ||
124 | tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b | ||
125 | sub v10.16b, v8.16b, v15.16b | ||
126 | tbx \in0\().16b, {v20.16b-v23.16b}, v8.16b | ||
127 | sub v11.16b, v9.16b, v15.16b | ||
128 | tbx \in1\().16b, {v20.16b-v23.16b}, v9.16b | ||
129 | sub v8.16b, v10.16b, v15.16b | ||
130 | tbx \in0\().16b, {v24.16b-v27.16b}, v10.16b | ||
131 | sub v9.16b, v11.16b, v15.16b | ||
132 | tbx \in1\().16b, {v24.16b-v27.16b}, v11.16b | ||
133 | tbx \in0\().16b, {v28.16b-v31.16b}, v8.16b | ||
134 | tbx \in1\().16b, {v28.16b-v31.16b}, v9.16b | ||
135 | .endm | ||
136 | |||
137 | .macro sub_bytes_4x, in0, in1, in2, in3 | 122 | .macro sub_bytes_4x, in0, in1, in2, in3 |
138 | sub v8.16b, \in0\().16b, v15.16b | 123 | sub v8.16b, \in0\().16b, v15.16b |
139 | tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b | 124 | tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b |
@@ -212,25 +197,6 @@ | |||
212 | eor \in1\().16b, \in1\().16b, v11.16b | 197 | eor \in1\().16b, \in1\().16b, v11.16b |
213 | .endm | 198 | .endm |
214 | 199 | ||
215 | .macro do_block_2x, enc, in0, in1, rounds, rk, rkp, i | ||
216 | ld1 {v15.4s}, [\rk] | ||
217 | add \rkp, \rk, #16 | ||
218 | mov \i, \rounds | ||
219 | 1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ | ||
220 | eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ | ||
221 | movi v15.16b, #0x40 | ||
222 | tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ | ||
223 | tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ | ||
224 | sub_bytes_2x \in0, \in1 | ||
225 | subs \i, \i, #1 | ||
226 | ld1 {v15.4s}, [\rkp], #16 | ||
227 | beq 2222f | ||
228 | mix_columns_2x \in0, \in1, \enc | ||
229 | b 1111b | ||
230 | 2222: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */ | ||
231 | eor \in1\().16b, \in1\().16b, v15.16b /* ^round key */ | ||
232 | .endm | ||
233 | |||
234 | .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i | 200 | .macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i |
235 | ld1 {v15.4s}, [\rk] | 201 | ld1 {v15.4s}, [\rk] |
236 | add \rkp, \rk, #16 | 202 | add \rkp, \rk, #16 |
@@ -257,14 +223,6 @@ | |||
257 | eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */ | 223 | eor \in3\().16b, \in3\().16b, v15.16b /* ^round key */ |
258 | .endm | 224 | .endm |
259 | 225 | ||
260 | .macro encrypt_block2x, in0, in1, rounds, rk, rkp, i | ||
261 | do_block_2x 1, \in0, \in1, \rounds, \rk, \rkp, \i | ||
262 | .endm | ||
263 | |||
264 | .macro decrypt_block2x, in0, in1, rounds, rk, rkp, i | ||
265 | do_block_2x 0, \in0, \in1, \rounds, \rk, \rkp, \i | ||
266 | .endm | ||
267 | |||
268 | .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i | 226 | .macro encrypt_block4x, in0, in1, in2, in3, rounds, rk, rkp, i |
269 | do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i | 227 | do_block_4x 1, \in0, \in1, \in2, \in3, \rounds, \rk, \rkp, \i |
270 | .endm | 228 | .endm |
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c index 82029cda2e77..1495d2b18518 100644 --- a/arch/arm64/crypto/chacha-neon-glue.c +++ b/arch/arm64/crypto/chacha-neon-glue.c | |||
@@ -60,7 +60,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, | |||
60 | } | 60 | } |
61 | 61 | ||
62 | static int chacha_neon_stream_xor(struct skcipher_request *req, | 62 | static int chacha_neon_stream_xor(struct skcipher_request *req, |
63 | struct chacha_ctx *ctx, u8 *iv) | 63 | const struct chacha_ctx *ctx, const u8 *iv) |
64 | { | 64 | { |
65 | struct skcipher_walk walk; | 65 | struct skcipher_walk walk; |
66 | u32 state[16]; | 66 | u32 state[16]; |
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index ecb0f67e5998..bdc1b6d7aff7 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c | |||
@@ -52,7 +52,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, | |||
52 | unsigned int len, u8 *out) | 52 | unsigned int len, u8 *out) |
53 | { | 53 | { |
54 | struct sha1_ce_state *sctx = shash_desc_ctx(desc); | 54 | struct sha1_ce_state *sctx = shash_desc_ctx(desc); |
55 | bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); | 55 | bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len; |
56 | 56 | ||
57 | if (!crypto_simd_usable()) | 57 | if (!crypto_simd_usable()) |
58 | return crypto_sha1_finup(desc, data, len, out); | 58 | return crypto_sha1_finup(desc, data, len, out); |
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 955c3c2d3f5a..604a01a4ede6 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c | |||
@@ -57,7 +57,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, | |||
57 | unsigned int len, u8 *out) | 57 | unsigned int len, u8 *out) |
58 | { | 58 | { |
59 | struct sha256_ce_state *sctx = shash_desc_ctx(desc); | 59 | struct sha256_ce_state *sctx = shash_desc_ctx(desc); |
60 | bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); | 60 | bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len; |
61 | 61 | ||
62 | if (!crypto_simd_usable()) { | 62 | if (!crypto_simd_usable()) { |
63 | if (len) | 63 | if (len) |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index e9b866e87d48..73c0ccb009a0 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -371,20 +371,6 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
371 | } | 371 | } |
372 | } | 372 | } |
373 | 373 | ||
374 | static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
375 | { | ||
376 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | ||
377 | |||
378 | aesni_enc(ctx, dst, src); | ||
379 | } | ||
380 | |||
381 | static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
382 | { | ||
383 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | ||
384 | |||
385 | aesni_dec(ctx, dst, src); | ||
386 | } | ||
387 | |||
388 | static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, | 374 | static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, |
389 | unsigned int len) | 375 | unsigned int len) |
390 | { | 376 | { |
@@ -920,7 +906,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req) | |||
920 | } | 906 | } |
921 | #endif | 907 | #endif |
922 | 908 | ||
923 | static struct crypto_alg aesni_algs[] = { { | 909 | static struct crypto_alg aesni_cipher_alg = { |
924 | .cra_name = "aes", | 910 | .cra_name = "aes", |
925 | .cra_driver_name = "aes-aesni", | 911 | .cra_driver_name = "aes-aesni", |
926 | .cra_priority = 300, | 912 | .cra_priority = 300, |
@@ -937,24 +923,7 @@ static struct crypto_alg aesni_algs[] = { { | |||
937 | .cia_decrypt = aes_decrypt | 923 | .cia_decrypt = aes_decrypt |
938 | } | 924 | } |
939 | } | 925 | } |
940 | }, { | 926 | }; |
941 | .cra_name = "__aes", | ||
942 | .cra_driver_name = "__aes-aesni", | ||
943 | .cra_priority = 300, | ||
944 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL, | ||
945 | .cra_blocksize = AES_BLOCK_SIZE, | ||
946 | .cra_ctxsize = CRYPTO_AES_CTX_SIZE, | ||
947 | .cra_module = THIS_MODULE, | ||
948 | .cra_u = { | ||
949 | .cipher = { | ||
950 | .cia_min_keysize = AES_MIN_KEY_SIZE, | ||
951 | .cia_max_keysize = AES_MAX_KEY_SIZE, | ||
952 | .cia_setkey = aes_set_key, | ||
953 | .cia_encrypt = __aes_encrypt, | ||
954 | .cia_decrypt = __aes_decrypt | ||
955 | } | ||
956 | } | ||
957 | } }; | ||
958 | 927 | ||
959 | static struct skcipher_alg aesni_skciphers[] = { | 928 | static struct skcipher_alg aesni_skciphers[] = { |
960 | { | 929 | { |
@@ -1150,7 +1119,7 @@ static int __init aesni_init(void) | |||
1150 | #endif | 1119 | #endif |
1151 | #endif | 1120 | #endif |
1152 | 1121 | ||
1153 | err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); | 1122 | err = crypto_register_alg(&aesni_cipher_alg); |
1154 | if (err) | 1123 | if (err) |
1155 | return err; | 1124 | return err; |
1156 | 1125 | ||
@@ -1158,7 +1127,7 @@ static int __init aesni_init(void) | |||
1158 | ARRAY_SIZE(aesni_skciphers), | 1127 | ARRAY_SIZE(aesni_skciphers), |
1159 | aesni_simd_skciphers); | 1128 | aesni_simd_skciphers); |
1160 | if (err) | 1129 | if (err) |
1161 | goto unregister_algs; | 1130 | goto unregister_cipher; |
1162 | 1131 | ||
1163 | err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads), | 1132 | err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads), |
1164 | aesni_simd_aeads); | 1133 | aesni_simd_aeads); |
@@ -1170,8 +1139,8 @@ static int __init aesni_init(void) | |||
1170 | unregister_skciphers: | 1139 | unregister_skciphers: |
1171 | simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), | 1140 | simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), |
1172 | aesni_simd_skciphers); | 1141 | aesni_simd_skciphers); |
1173 | unregister_algs: | 1142 | unregister_cipher: |
1174 | crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); | 1143 | crypto_unregister_alg(&aesni_cipher_alg); |
1175 | return err; | 1144 | return err; |
1176 | } | 1145 | } |
1177 | 1146 | ||
@@ -1181,7 +1150,7 @@ static void __exit aesni_exit(void) | |||
1181 | aesni_simd_aeads); | 1150 | aesni_simd_aeads); |
1182 | simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), | 1151 | simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), |
1183 | aesni_simd_skciphers); | 1152 | aesni_simd_skciphers); |
1184 | crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); | 1153 | crypto_unregister_alg(&aesni_cipher_alg); |
1185 | } | 1154 | } |
1186 | 1155 | ||
1187 | late_initcall(aesni_init); | 1156 | late_initcall(aesni_init); |
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index 1ce0019c059c..388f95a4ec24 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c | |||
@@ -124,7 +124,7 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, | |||
124 | } | 124 | } |
125 | 125 | ||
126 | static int chacha_simd_stream_xor(struct skcipher_walk *walk, | 126 | static int chacha_simd_stream_xor(struct skcipher_walk *walk, |
127 | struct chacha_ctx *ctx, u8 *iv) | 127 | const struct chacha_ctx *ctx, const u8 *iv) |
128 | { | 128 | { |
129 | u32 *state, state_buf[16 + 2] __aligned(8); | 129 | u32 *state, state_buf[16 + 2] __aligned(8); |
130 | int next_yield = 4096; /* bytes until next FPU yield */ | 130 | int next_yield = 4096; /* bytes until next FPU yield */ |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 3d056e7da65f..e801450bcb1c 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -61,7 +61,6 @@ config CRYPTO_BLKCIPHER2 | |||
61 | tristate | 61 | tristate |
62 | select CRYPTO_ALGAPI2 | 62 | select CRYPTO_ALGAPI2 |
63 | select CRYPTO_RNG2 | 63 | select CRYPTO_RNG2 |
64 | select CRYPTO_WORKQUEUE | ||
65 | 64 | ||
66 | config CRYPTO_HASH | 65 | config CRYPTO_HASH |
67 | tristate | 66 | tristate |
@@ -137,10 +136,11 @@ config CRYPTO_USER | |||
137 | Userspace configuration for cryptographic instantiations such as | 136 | Userspace configuration for cryptographic instantiations such as |
138 | cbc(aes). | 137 | cbc(aes). |
139 | 138 | ||
139 | if CRYPTO_MANAGER2 | ||
140 | |||
140 | config CRYPTO_MANAGER_DISABLE_TESTS | 141 | config CRYPTO_MANAGER_DISABLE_TESTS |
141 | bool "Disable run-time self tests" | 142 | bool "Disable run-time self tests" |
142 | default y | 143 | default y |
143 | depends on CRYPTO_MANAGER2 | ||
144 | help | 144 | help |
145 | Disable run-time self tests that normally take place at | 145 | Disable run-time self tests that normally take place at |
146 | algorithm registration. | 146 | algorithm registration. |
@@ -155,14 +155,10 @@ config CRYPTO_MANAGER_EXTRA_TESTS | |||
155 | This is intended for developer use only, as these tests take much | 155 | This is intended for developer use only, as these tests take much |
156 | longer to run than the normal self tests. | 156 | longer to run than the normal self tests. |
157 | 157 | ||
158 | endif # if CRYPTO_MANAGER2 | ||
159 | |||
158 | config CRYPTO_GF128MUL | 160 | config CRYPTO_GF128MUL |
159 | tristate "GF(2^128) multiplication functions" | 161 | tristate |
160 | help | ||
161 | Efficient table driven implementation of multiplications in the | ||
162 | field GF(2^128). This is needed by some cypher modes. This | ||
163 | option will be selected automatically if you select such a | ||
164 | cipher mode. Only select this option by hand if you expect to load | ||
165 | an external module that requires these functions. | ||
166 | 162 | ||
167 | config CRYPTO_NULL | 163 | config CRYPTO_NULL |
168 | tristate "Null algorithms" | 164 | tristate "Null algorithms" |
@@ -186,15 +182,11 @@ config CRYPTO_PCRYPT | |||
186 | This converts an arbitrary crypto algorithm into a parallel | 182 | This converts an arbitrary crypto algorithm into a parallel |
187 | algorithm that executes in kernel threads. | 183 | algorithm that executes in kernel threads. |
188 | 184 | ||
189 | config CRYPTO_WORKQUEUE | ||
190 | tristate | ||
191 | |||
192 | config CRYPTO_CRYPTD | 185 | config CRYPTO_CRYPTD |
193 | tristate "Software async crypto daemon" | 186 | tristate "Software async crypto daemon" |
194 | select CRYPTO_BLKCIPHER | 187 | select CRYPTO_BLKCIPHER |
195 | select CRYPTO_HASH | 188 | select CRYPTO_HASH |
196 | select CRYPTO_MANAGER | 189 | select CRYPTO_MANAGER |
197 | select CRYPTO_WORKQUEUE | ||
198 | help | 190 | help |
199 | This is a generic software asynchronous crypto daemon that | 191 | This is a generic software asynchronous crypto daemon that |
200 | converts an arbitrary synchronous software crypto algorithm | 192 | converts an arbitrary synchronous software crypto algorithm |
@@ -279,6 +271,7 @@ config CRYPTO_CCM | |||
279 | select CRYPTO_CTR | 271 | select CRYPTO_CTR |
280 | select CRYPTO_HASH | 272 | select CRYPTO_HASH |
281 | select CRYPTO_AEAD | 273 | select CRYPTO_AEAD |
274 | select CRYPTO_MANAGER | ||
282 | help | 275 | help |
283 | Support for Counter with CBC MAC. Required for IPsec. | 276 | Support for Counter with CBC MAC. Required for IPsec. |
284 | 277 | ||
@@ -288,6 +281,7 @@ config CRYPTO_GCM | |||
288 | select CRYPTO_AEAD | 281 | select CRYPTO_AEAD |
289 | select CRYPTO_GHASH | 282 | select CRYPTO_GHASH |
290 | select CRYPTO_NULL | 283 | select CRYPTO_NULL |
284 | select CRYPTO_MANAGER | ||
291 | help | 285 | help |
292 | Support for Galois/Counter Mode (GCM) and Galois Message | 286 | Support for Galois/Counter Mode (GCM) and Galois Message |
293 | Authentication Code (GMAC). Required for IPSec. | 287 | Authentication Code (GMAC). Required for IPSec. |
@@ -297,6 +291,7 @@ config CRYPTO_CHACHA20POLY1305 | |||
297 | select CRYPTO_CHACHA20 | 291 | select CRYPTO_CHACHA20 |
298 | select CRYPTO_POLY1305 | 292 | select CRYPTO_POLY1305 |
299 | select CRYPTO_AEAD | 293 | select CRYPTO_AEAD |
294 | select CRYPTO_MANAGER | ||
300 | help | 295 | help |
301 | ChaCha20-Poly1305 AEAD support, RFC7539. | 296 | ChaCha20-Poly1305 AEAD support, RFC7539. |
302 | 297 | ||
@@ -411,6 +406,7 @@ config CRYPTO_SEQIV | |||
411 | select CRYPTO_BLKCIPHER | 406 | select CRYPTO_BLKCIPHER |
412 | select CRYPTO_NULL | 407 | select CRYPTO_NULL |
413 | select CRYPTO_RNG_DEFAULT | 408 | select CRYPTO_RNG_DEFAULT |
409 | select CRYPTO_MANAGER | ||
414 | help | 410 | help |
415 | This IV generator generates an IV based on a sequence number by | 411 | This IV generator generates an IV based on a sequence number by |
416 | xoring it with a salt. This algorithm is mainly useful for CTR | 412 | xoring it with a salt. This algorithm is mainly useful for CTR |
@@ -420,7 +416,7 @@ config CRYPTO_ECHAINIV | |||
420 | select CRYPTO_AEAD | 416 | select CRYPTO_AEAD |
421 | select CRYPTO_NULL | 417 | select CRYPTO_NULL |
422 | select CRYPTO_RNG_DEFAULT | 418 | select CRYPTO_RNG_DEFAULT |
423 | default m | 419 | select CRYPTO_MANAGER |
424 | help | 420 | help |
425 | This IV generator generates an IV based on the encryption of | 421 | This IV generator generates an IV based on the encryption of |
426 | a sequence number xored with a salt. This is the default | 422 | a sequence number xored with a salt. This is the default |
@@ -456,6 +452,7 @@ config CRYPTO_CTR | |||
456 | config CRYPTO_CTS | 452 | config CRYPTO_CTS |
457 | tristate "CTS support" | 453 | tristate "CTS support" |
458 | select CRYPTO_BLKCIPHER | 454 | select CRYPTO_BLKCIPHER |
455 | select CRYPTO_MANAGER | ||
459 | help | 456 | help |
460 | CTS: Cipher Text Stealing | 457 | CTS: Cipher Text Stealing |
461 | This is the Cipher Text Stealing mode as described by | 458 | This is the Cipher Text Stealing mode as described by |
@@ -521,6 +518,7 @@ config CRYPTO_XTS | |||
521 | config CRYPTO_KEYWRAP | 518 | config CRYPTO_KEYWRAP |
522 | tristate "Key wrapping support" | 519 | tristate "Key wrapping support" |
523 | select CRYPTO_BLKCIPHER | 520 | select CRYPTO_BLKCIPHER |
521 | select CRYPTO_MANAGER | ||
524 | help | 522 | help |
525 | Support for key wrapping (NIST SP800-38F / RFC3394) without | 523 | Support for key wrapping (NIST SP800-38F / RFC3394) without |
526 | padding. | 524 | padding. |
@@ -551,6 +549,7 @@ config CRYPTO_ADIANTUM | |||
551 | select CRYPTO_CHACHA20 | 549 | select CRYPTO_CHACHA20 |
552 | select CRYPTO_POLY1305 | 550 | select CRYPTO_POLY1305 |
553 | select CRYPTO_NHPOLY1305 | 551 | select CRYPTO_NHPOLY1305 |
552 | select CRYPTO_MANAGER | ||
554 | help | 553 | help |
555 | Adiantum is a tweakable, length-preserving encryption mode | 554 | Adiantum is a tweakable, length-preserving encryption mode |
556 | designed for fast and secure disk encryption, especially on | 555 | designed for fast and secure disk encryption, especially on |
@@ -684,6 +683,14 @@ config CRYPTO_CRC32_MIPS | |||
684 | instructions, when available. | 683 | instructions, when available. |
685 | 684 | ||
686 | 685 | ||
686 | config CRYPTO_XXHASH | ||
687 | tristate "xxHash hash algorithm" | ||
688 | select CRYPTO_HASH | ||
689 | select XXHASH | ||
690 | help | ||
691 | xxHash non-cryptographic hash algorithm. Extremely fast, working at | ||
692 | speeds close to RAM limits. | ||
693 | |||
687 | config CRYPTO_CRCT10DIF | 694 | config CRYPTO_CRCT10DIF |
688 | tristate "CRCT10DIF algorithm" | 695 | tristate "CRCT10DIF algorithm" |
689 | select CRYPTO_HASH | 696 | select CRYPTO_HASH |
@@ -1230,9 +1237,13 @@ config CRYPTO_ANUBIS | |||
1230 | <https://www.cosic.esat.kuleuven.be/nessie/reports/> | 1237 | <https://www.cosic.esat.kuleuven.be/nessie/reports/> |
1231 | <http://www.larc.usp.br/~pbarreto/AnubisPage.html> | 1238 | <http://www.larc.usp.br/~pbarreto/AnubisPage.html> |
1232 | 1239 | ||
1240 | config CRYPTO_LIB_ARC4 | ||
1241 | tristate | ||
1242 | |||
1233 | config CRYPTO_ARC4 | 1243 | config CRYPTO_ARC4 |
1234 | tristate "ARC4 cipher algorithm" | 1244 | tristate "ARC4 cipher algorithm" |
1235 | select CRYPTO_BLKCIPHER | 1245 | select CRYPTO_BLKCIPHER |
1246 | select CRYPTO_LIB_ARC4 | ||
1236 | help | 1247 | help |
1237 | ARC4 cipher algorithm. | 1248 | ARC4 cipher algorithm. |
1238 | 1249 | ||
diff --git a/crypto/Makefile b/crypto/Makefile index 266a4cdbb9e2..9479e1a45d8c 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -6,8 +6,6 @@ | |||
6 | obj-$(CONFIG_CRYPTO) += crypto.o | 6 | obj-$(CONFIG_CRYPTO) += crypto.o |
7 | crypto-y := api.o cipher.o compress.o memneq.o | 7 | crypto-y := api.o cipher.o compress.o memneq.o |
8 | 8 | ||
9 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o | ||
10 | |||
11 | obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o | 9 | obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o |
12 | obj-$(CONFIG_CRYPTO_FIPS) += fips.o | 10 | obj-$(CONFIG_CRYPTO_FIPS) += fips.o |
13 | 11 | ||
@@ -131,6 +129,7 @@ obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o | |||
131 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o | 129 | obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o |
132 | obj-$(CONFIG_CRYPTO_LZ4) += lz4.o | 130 | obj-$(CONFIG_CRYPTO_LZ4) += lz4.o |
133 | obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o | 131 | obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o |
132 | obj-$(CONFIG_CRYPTO_XXHASH) += xxhash_generic.o | ||
134 | obj-$(CONFIG_CRYPTO_842) += 842.o | 133 | obj-$(CONFIG_CRYPTO_842) += 842.o |
135 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o | 134 | obj-$(CONFIG_CRYPTO_RNG2) += rng.o |
136 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o | 135 | obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o |
diff --git a/crypto/aead.c b/crypto/aead.c index c3c158ba9883..fbf0ec93bc8e 100644 --- a/crypto/aead.c +++ b/crypto/aead.c | |||
@@ -84,6 +84,42 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | |||
84 | } | 84 | } |
85 | EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); | 85 | EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); |
86 | 86 | ||
87 | int crypto_aead_encrypt(struct aead_request *req) | ||
88 | { | ||
89 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
90 | struct crypto_alg *alg = aead->base.__crt_alg; | ||
91 | unsigned int cryptlen = req->cryptlen; | ||
92 | int ret; | ||
93 | |||
94 | crypto_stats_get(alg); | ||
95 | if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) | ||
96 | ret = -ENOKEY; | ||
97 | else | ||
98 | ret = crypto_aead_alg(aead)->encrypt(req); | ||
99 | crypto_stats_aead_encrypt(cryptlen, alg, ret); | ||
100 | return ret; | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(crypto_aead_encrypt); | ||
103 | |||
104 | int crypto_aead_decrypt(struct aead_request *req) | ||
105 | { | ||
106 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
107 | struct crypto_alg *alg = aead->base.__crt_alg; | ||
108 | unsigned int cryptlen = req->cryptlen; | ||
109 | int ret; | ||
110 | |||
111 | crypto_stats_get(alg); | ||
112 | if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) | ||
113 | ret = -ENOKEY; | ||
114 | else if (req->cryptlen < crypto_aead_authsize(aead)) | ||
115 | ret = -EINVAL; | ||
116 | else | ||
117 | ret = crypto_aead_alg(aead)->decrypt(req); | ||
118 | crypto_stats_aead_decrypt(cryptlen, alg, ret); | ||
119 | return ret; | ||
120 | } | ||
121 | EXPORT_SYMBOL_GPL(crypto_aead_decrypt); | ||
122 | |||
87 | static void crypto_aead_exit_tfm(struct crypto_tfm *tfm) | 123 | static void crypto_aead_exit_tfm(struct crypto_tfm *tfm) |
88 | { | 124 | { |
89 | struct crypto_aead *aead = __crypto_aead_cast(tfm); | 125 | struct crypto_aead *aead = __crypto_aead_cast(tfm); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 313a7682cef1..de30ddc952d8 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -21,23 +21,6 @@ | |||
21 | 21 | ||
22 | static LIST_HEAD(crypto_template_list); | 22 | static LIST_HEAD(crypto_template_list); |
23 | 23 | ||
24 | static inline int crypto_set_driver_name(struct crypto_alg *alg) | ||
25 | { | ||
26 | static const char suffix[] = "-generic"; | ||
27 | char *driver_name = alg->cra_driver_name; | ||
28 | int len; | ||
29 | |||
30 | if (*driver_name) | ||
31 | return 0; | ||
32 | |||
33 | len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | ||
34 | if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) | ||
35 | return -ENAMETOOLONG; | ||
36 | |||
37 | memcpy(driver_name + len, suffix, sizeof(suffix)); | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static inline void crypto_check_module_sig(struct module *mod) | 24 | static inline void crypto_check_module_sig(struct module *mod) |
42 | { | 25 | { |
43 | if (fips_enabled && mod && !module_sig_ok(mod)) | 26 | if (fips_enabled && mod && !module_sig_ok(mod)) |
@@ -49,6 +32,9 @@ static int crypto_check_alg(struct crypto_alg *alg) | |||
49 | { | 32 | { |
50 | crypto_check_module_sig(alg->cra_module); | 33 | crypto_check_module_sig(alg->cra_module); |
51 | 34 | ||
35 | if (!alg->cra_name[0] || !alg->cra_driver_name[0]) | ||
36 | return -EINVAL; | ||
37 | |||
52 | if (alg->cra_alignmask & (alg->cra_alignmask + 1)) | 38 | if (alg->cra_alignmask & (alg->cra_alignmask + 1)) |
53 | return -EINVAL; | 39 | return -EINVAL; |
54 | 40 | ||
@@ -74,7 +60,7 @@ static int crypto_check_alg(struct crypto_alg *alg) | |||
74 | 60 | ||
75 | refcount_set(&alg->cra_refcnt, 1); | 61 | refcount_set(&alg->cra_refcnt, 1); |
76 | 62 | ||
77 | return crypto_set_driver_name(alg); | 63 | return 0; |
78 | } | 64 | } |
79 | 65 | ||
80 | static void crypto_free_instance(struct crypto_instance *inst) | 66 | static void crypto_free_instance(struct crypto_instance *inst) |
@@ -947,19 +933,6 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) | |||
947 | } | 933 | } |
948 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); | 934 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); |
949 | 935 | ||
950 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) | ||
951 | { | ||
952 | struct crypto_async_request *req; | ||
953 | |||
954 | list_for_each_entry(req, &queue->list, list) { | ||
955 | if (req->tfm == tfm) | ||
956 | return 1; | ||
957 | } | ||
958 | |||
959 | return 0; | ||
960 | } | ||
961 | EXPORT_SYMBOL_GPL(crypto_tfm_in_queue); | ||
962 | |||
963 | static inline void crypto_inc_byte(u8 *a, unsigned int size) | 936 | static inline void crypto_inc_byte(u8 *a, unsigned int size) |
964 | { | 937 | { |
965 | u8 *b = (a + size); | 938 | u8 *b = (a + size); |
diff --git a/crypto/anubis.c b/crypto/anubis.c index 673927de0eb9..f9ce78fde6ee 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c | |||
@@ -673,6 +673,7 @@ static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
673 | 673 | ||
674 | static struct crypto_alg anubis_alg = { | 674 | static struct crypto_alg anubis_alg = { |
675 | .cra_name = "anubis", | 675 | .cra_name = "anubis", |
676 | .cra_driver_name = "anubis-generic", | ||
676 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 677 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
677 | .cra_blocksize = ANUBIS_BLOCK_SIZE, | 678 | .cra_blocksize = ANUBIS_BLOCK_SIZE, |
678 | .cra_ctxsize = sizeof (struct anubis_ctx), | 679 | .cra_ctxsize = sizeof (struct anubis_ctx), |
diff --git a/crypto/arc4.c b/crypto/arc4.c index a2120e06bf84..aa79571dbd49 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c | |||
@@ -13,84 +13,15 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | 15 | ||
16 | struct arc4_ctx { | 16 | static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key, |
17 | u32 S[256]; | 17 | unsigned int key_len) |
18 | u32 x, y; | ||
19 | }; | ||
20 | |||
21 | static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, | ||
22 | unsigned int key_len) | ||
23 | { | ||
24 | struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); | ||
25 | int i, j = 0, k = 0; | ||
26 | |||
27 | ctx->x = 1; | ||
28 | ctx->y = 0; | ||
29 | |||
30 | for (i = 0; i < 256; i++) | ||
31 | ctx->S[i] = i; | ||
32 | |||
33 | for (i = 0; i < 256; i++) { | ||
34 | u32 a = ctx->S[i]; | ||
35 | j = (j + in_key[k] + a) & 0xff; | ||
36 | ctx->S[i] = ctx->S[j]; | ||
37 | ctx->S[j] = a; | ||
38 | if (++k >= key_len) | ||
39 | k = 0; | ||
40 | } | ||
41 | |||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, | ||
46 | unsigned int key_len) | ||
47 | { | 18 | { |
48 | return arc4_set_key(&tfm->base, in_key, key_len); | 19 | struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); |
49 | } | ||
50 | |||
51 | static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, | ||
52 | unsigned int len) | ||
53 | { | ||
54 | u32 *const S = ctx->S; | ||
55 | u32 x, y, a, b; | ||
56 | u32 ty, ta, tb; | ||
57 | |||
58 | if (len == 0) | ||
59 | return; | ||
60 | |||
61 | x = ctx->x; | ||
62 | y = ctx->y; | ||
63 | |||
64 | a = S[x]; | ||
65 | y = (y + a) & 0xff; | ||
66 | b = S[y]; | ||
67 | |||
68 | do { | ||
69 | S[y] = a; | ||
70 | a = (a + b) & 0xff; | ||
71 | S[x] = b; | ||
72 | x = (x + 1) & 0xff; | ||
73 | ta = S[x]; | ||
74 | ty = (y + ta) & 0xff; | ||
75 | tb = S[ty]; | ||
76 | *out++ = *in++ ^ S[a]; | ||
77 | if (--len == 0) | ||
78 | break; | ||
79 | y = ty; | ||
80 | a = ta; | ||
81 | b = tb; | ||
82 | } while (true); | ||
83 | |||
84 | ctx->x = x; | ||
85 | ctx->y = y; | ||
86 | } | ||
87 | 20 | ||
88 | static void arc4_crypt_one(struct crypto_tfm *tfm, u8 *out, const u8 *in) | 21 | return arc4_setkey(ctx, in_key, key_len); |
89 | { | ||
90 | arc4_crypt(crypto_tfm_ctx(tfm), out, in, 1); | ||
91 | } | 22 | } |
92 | 23 | ||
93 | static int ecb_arc4_crypt(struct skcipher_request *req) | 24 | static int crypto_arc4_crypt(struct skcipher_request *req) |
94 | { | 25 | { |
95 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 26 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
96 | struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); | 27 | struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); |
@@ -108,54 +39,32 @@ static int ecb_arc4_crypt(struct skcipher_request *req) | |||
108 | return err; | 39 | return err; |
109 | } | 40 | } |
110 | 41 | ||
111 | static struct crypto_alg arc4_cipher = { | 42 | static struct skcipher_alg arc4_alg = { |
112 | .cra_name = "arc4", | 43 | /* |
113 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 44 | * For legacy reasons, this is named "ecb(arc4)", not "arc4". |
114 | .cra_blocksize = ARC4_BLOCK_SIZE, | 45 | * Nevertheless it's actually a stream cipher, not a block cipher. |
115 | .cra_ctxsize = sizeof(struct arc4_ctx), | 46 | */ |
116 | .cra_module = THIS_MODULE, | ||
117 | .cra_u = { | ||
118 | .cipher = { | ||
119 | .cia_min_keysize = ARC4_MIN_KEY_SIZE, | ||
120 | .cia_max_keysize = ARC4_MAX_KEY_SIZE, | ||
121 | .cia_setkey = arc4_set_key, | ||
122 | .cia_encrypt = arc4_crypt_one, | ||
123 | .cia_decrypt = arc4_crypt_one, | ||
124 | }, | ||
125 | }, | ||
126 | }; | ||
127 | |||
128 | static struct skcipher_alg arc4_skcipher = { | ||
129 | .base.cra_name = "ecb(arc4)", | 47 | .base.cra_name = "ecb(arc4)", |
48 | .base.cra_driver_name = "ecb(arc4)-generic", | ||
130 | .base.cra_priority = 100, | 49 | .base.cra_priority = 100, |
131 | .base.cra_blocksize = ARC4_BLOCK_SIZE, | 50 | .base.cra_blocksize = ARC4_BLOCK_SIZE, |
132 | .base.cra_ctxsize = sizeof(struct arc4_ctx), | 51 | .base.cra_ctxsize = sizeof(struct arc4_ctx), |
133 | .base.cra_module = THIS_MODULE, | 52 | .base.cra_module = THIS_MODULE, |
134 | .min_keysize = ARC4_MIN_KEY_SIZE, | 53 | .min_keysize = ARC4_MIN_KEY_SIZE, |
135 | .max_keysize = ARC4_MAX_KEY_SIZE, | 54 | .max_keysize = ARC4_MAX_KEY_SIZE, |
136 | .setkey = arc4_set_key_skcipher, | 55 | .setkey = crypto_arc4_setkey, |
137 | .encrypt = ecb_arc4_crypt, | 56 | .encrypt = crypto_arc4_crypt, |
138 | .decrypt = ecb_arc4_crypt, | 57 | .decrypt = crypto_arc4_crypt, |
139 | }; | 58 | }; |
140 | 59 | ||
141 | static int __init arc4_init(void) | 60 | static int __init arc4_init(void) |
142 | { | 61 | { |
143 | int err; | 62 | return crypto_register_skcipher(&arc4_alg); |
144 | |||
145 | err = crypto_register_alg(&arc4_cipher); | ||
146 | if (err) | ||
147 | return err; | ||
148 | |||
149 | err = crypto_register_skcipher(&arc4_skcipher); | ||
150 | if (err) | ||
151 | crypto_unregister_alg(&arc4_cipher); | ||
152 | return err; | ||
153 | } | 63 | } |
154 | 64 | ||
155 | static void __exit arc4_exit(void) | 65 | static void __exit arc4_exit(void) |
156 | { | 66 | { |
157 | crypto_unregister_alg(&arc4_cipher); | 67 | crypto_unregister_skcipher(&arc4_alg); |
158 | crypto_unregister_skcipher(&arc4_skcipher); | ||
159 | } | 68 | } |
160 | 69 | ||
161 | subsys_initcall(arc4_init); | 70 | subsys_initcall(arc4_init); |
@@ -164,4 +73,4 @@ module_exit(arc4_exit); | |||
164 | MODULE_LICENSE("GPL"); | 73 | MODULE_LICENSE("GPL"); |
165 | MODULE_DESCRIPTION("ARC4 Cipher Algorithm"); | 74 | MODULE_DESCRIPTION("ARC4 Cipher Algorithm"); |
166 | MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>"); | 75 | MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>"); |
167 | MODULE_ALIAS_CRYPTO("arc4"); | 76 | MODULE_ALIAS_CRYPTO("ecb(arc4)"); |
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index be70ca6c85d3..1f1f004dc757 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig | |||
@@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE | |||
15 | select MPILIB | 15 | select MPILIB |
16 | select CRYPTO_HASH_INFO | 16 | select CRYPTO_HASH_INFO |
17 | select CRYPTO_AKCIPHER | 17 | select CRYPTO_AKCIPHER |
18 | select CRYPTO_HASH | ||
18 | help | 19 | help |
19 | This option provides support for asymmetric public key type handling. | 20 | This option provides support for asymmetric public key type handling. |
20 | If signature generation and/or verification are to be used, | 21 | If signature generation and/or verification are to be used, |
@@ -65,6 +66,7 @@ config TPM_KEY_PARSER | |||
65 | config PKCS7_MESSAGE_PARSER | 66 | config PKCS7_MESSAGE_PARSER |
66 | tristate "PKCS#7 message parser" | 67 | tristate "PKCS#7 message parser" |
67 | depends on X509_CERTIFICATE_PARSER | 68 | depends on X509_CERTIFICATE_PARSER |
69 | select CRYPTO_HASH | ||
68 | select ASN1 | 70 | select ASN1 |
69 | select OID_REGISTRY | 71 | select OID_REGISTRY |
70 | help | 72 | help |
@@ -87,6 +89,7 @@ config SIGNED_PE_FILE_VERIFICATION | |||
87 | bool "Support for PE file signature verification" | 89 | bool "Support for PE file signature verification" |
88 | depends on PKCS7_MESSAGE_PARSER=y | 90 | depends on PKCS7_MESSAGE_PARSER=y |
89 | depends on SYSTEM_DATA_VERIFICATION | 91 | depends on SYSTEM_DATA_VERIFICATION |
92 | select CRYPTO_HASH | ||
90 | select ASN1 | 93 | select ASN1 |
91 | select OID_REGISTRY | 94 | select OID_REGISTRY |
92 | help | 95 | help |
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index 2db7eac4bf3b..74e824e537e6 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c | |||
@@ -61,6 +61,8 @@ struct chachapoly_req_ctx { | |||
61 | unsigned int cryptlen; | 61 | unsigned int cryptlen; |
62 | /* Actual AD, excluding IV */ | 62 | /* Actual AD, excluding IV */ |
63 | unsigned int assoclen; | 63 | unsigned int assoclen; |
64 | /* request flags, with MAY_SLEEP cleared if needed */ | ||
65 | u32 flags; | ||
64 | union { | 66 | union { |
65 | struct poly_req poly; | 67 | struct poly_req poly; |
66 | struct chacha_req chacha; | 68 | struct chacha_req chacha; |
@@ -70,8 +72,12 @@ struct chachapoly_req_ctx { | |||
70 | static inline void async_done_continue(struct aead_request *req, int err, | 72 | static inline void async_done_continue(struct aead_request *req, int err, |
71 | int (*cont)(struct aead_request *)) | 73 | int (*cont)(struct aead_request *)) |
72 | { | 74 | { |
73 | if (!err) | 75 | if (!err) { |
76 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | ||
77 | |||
78 | rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | ||
74 | err = cont(req); | 79 | err = cont(req); |
80 | } | ||
75 | 81 | ||
76 | if (err != -EINPROGRESS && err != -EBUSY) | 82 | if (err != -EINPROGRESS && err != -EBUSY) |
77 | aead_request_complete(req, err); | 83 | aead_request_complete(req, err); |
@@ -129,16 +135,12 @@ static int chacha_decrypt(struct aead_request *req) | |||
129 | 135 | ||
130 | chacha_iv(creq->iv, req, 1); | 136 | chacha_iv(creq->iv, req, 1); |
131 | 137 | ||
132 | sg_init_table(rctx->src, 2); | ||
133 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); | 138 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); |
134 | dst = src; | 139 | dst = src; |
135 | 140 | if (req->src != req->dst) | |
136 | if (req->src != req->dst) { | ||
137 | sg_init_table(rctx->dst, 2); | ||
138 | dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); | 141 | dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); |
139 | } | ||
140 | 142 | ||
141 | skcipher_request_set_callback(&creq->req, aead_request_flags(req), | 143 | skcipher_request_set_callback(&creq->req, rctx->flags, |
142 | chacha_decrypt_done, req); | 144 | chacha_decrypt_done, req); |
143 | skcipher_request_set_tfm(&creq->req, ctx->chacha); | 145 | skcipher_request_set_tfm(&creq->req, ctx->chacha); |
144 | skcipher_request_set_crypt(&creq->req, src, dst, | 146 | skcipher_request_set_crypt(&creq->req, src, dst, |
@@ -172,17 +174,13 @@ static int poly_tail(struct aead_request *req) | |||
172 | struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); | 174 | struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); |
173 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 175 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
174 | struct poly_req *preq = &rctx->u.poly; | 176 | struct poly_req *preq = &rctx->u.poly; |
175 | __le64 len; | ||
176 | int err; | 177 | int err; |
177 | 178 | ||
178 | sg_init_table(preq->src, 1); | 179 | preq->tail.assoclen = cpu_to_le64(rctx->assoclen); |
179 | len = cpu_to_le64(rctx->assoclen); | 180 | preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen); |
180 | memcpy(&preq->tail.assoclen, &len, sizeof(len)); | 181 | sg_init_one(preq->src, &preq->tail, sizeof(preq->tail)); |
181 | len = cpu_to_le64(rctx->cryptlen); | ||
182 | memcpy(&preq->tail.cryptlen, &len, sizeof(len)); | ||
183 | sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail)); | ||
184 | 182 | ||
185 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | 183 | ahash_request_set_callback(&preq->req, rctx->flags, |
186 | poly_tail_done, req); | 184 | poly_tail_done, req); |
187 | ahash_request_set_tfm(&preq->req, ctx->poly); | 185 | ahash_request_set_tfm(&preq->req, ctx->poly); |
188 | ahash_request_set_crypt(&preq->req, preq->src, | 186 | ahash_request_set_crypt(&preq->req, preq->src, |
@@ -205,15 +203,14 @@ static int poly_cipherpad(struct aead_request *req) | |||
205 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | 203 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
206 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 204 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
207 | struct poly_req *preq = &rctx->u.poly; | 205 | struct poly_req *preq = &rctx->u.poly; |
208 | unsigned int padlen, bs = POLY1305_BLOCK_SIZE; | 206 | unsigned int padlen; |
209 | int err; | 207 | int err; |
210 | 208 | ||
211 | padlen = (bs - (rctx->cryptlen % bs)) % bs; | 209 | padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE; |
212 | memset(preq->pad, 0, sizeof(preq->pad)); | 210 | memset(preq->pad, 0, sizeof(preq->pad)); |
213 | sg_init_table(preq->src, 1); | 211 | sg_init_one(preq->src, preq->pad, padlen); |
214 | sg_set_buf(preq->src, &preq->pad, padlen); | ||
215 | 212 | ||
216 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | 213 | ahash_request_set_callback(&preq->req, rctx->flags, |
217 | poly_cipherpad_done, req); | 214 | poly_cipherpad_done, req); |
218 | ahash_request_set_tfm(&preq->req, ctx->poly); | 215 | ahash_request_set_tfm(&preq->req, ctx->poly); |
219 | ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); | 216 | ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); |
@@ -241,10 +238,9 @@ static int poly_cipher(struct aead_request *req) | |||
241 | if (rctx->cryptlen == req->cryptlen) /* encrypting */ | 238 | if (rctx->cryptlen == req->cryptlen) /* encrypting */ |
242 | crypt = req->dst; | 239 | crypt = req->dst; |
243 | 240 | ||
244 | sg_init_table(rctx->src, 2); | ||
245 | crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen); | 241 | crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen); |
246 | 242 | ||
247 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | 243 | ahash_request_set_callback(&preq->req, rctx->flags, |
248 | poly_cipher_done, req); | 244 | poly_cipher_done, req); |
249 | ahash_request_set_tfm(&preq->req, ctx->poly); | 245 | ahash_request_set_tfm(&preq->req, ctx->poly); |
250 | ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); | 246 | ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); |
@@ -266,15 +262,14 @@ static int poly_adpad(struct aead_request *req) | |||
266 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | 262 | struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); |
267 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 263 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
268 | struct poly_req *preq = &rctx->u.poly; | 264 | struct poly_req *preq = &rctx->u.poly; |
269 | unsigned int padlen, bs = POLY1305_BLOCK_SIZE; | 265 | unsigned int padlen; |
270 | int err; | 266 | int err; |
271 | 267 | ||
272 | padlen = (bs - (rctx->assoclen % bs)) % bs; | 268 | padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE; |
273 | memset(preq->pad, 0, sizeof(preq->pad)); | 269 | memset(preq->pad, 0, sizeof(preq->pad)); |
274 | sg_init_table(preq->src, 1); | 270 | sg_init_one(preq->src, preq->pad, padlen); |
275 | sg_set_buf(preq->src, preq->pad, padlen); | ||
276 | 271 | ||
277 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | 272 | ahash_request_set_callback(&preq->req, rctx->flags, |
278 | poly_adpad_done, req); | 273 | poly_adpad_done, req); |
279 | ahash_request_set_tfm(&preq->req, ctx->poly); | 274 | ahash_request_set_tfm(&preq->req, ctx->poly); |
280 | ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); | 275 | ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); |
@@ -298,7 +293,7 @@ static int poly_ad(struct aead_request *req) | |||
298 | struct poly_req *preq = &rctx->u.poly; | 293 | struct poly_req *preq = &rctx->u.poly; |
299 | int err; | 294 | int err; |
300 | 295 | ||
301 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | 296 | ahash_request_set_callback(&preq->req, rctx->flags, |
302 | poly_ad_done, req); | 297 | poly_ad_done, req); |
303 | ahash_request_set_tfm(&preq->req, ctx->poly); | 298 | ahash_request_set_tfm(&preq->req, ctx->poly); |
304 | ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); | 299 | ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); |
@@ -322,10 +317,9 @@ static int poly_setkey(struct aead_request *req) | |||
322 | struct poly_req *preq = &rctx->u.poly; | 317 | struct poly_req *preq = &rctx->u.poly; |
323 | int err; | 318 | int err; |
324 | 319 | ||
325 | sg_init_table(preq->src, 1); | 320 | sg_init_one(preq->src, rctx->key, sizeof(rctx->key)); |
326 | sg_set_buf(preq->src, rctx->key, sizeof(rctx->key)); | ||
327 | 321 | ||
328 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | 322 | ahash_request_set_callback(&preq->req, rctx->flags, |
329 | poly_setkey_done, req); | 323 | poly_setkey_done, req); |
330 | ahash_request_set_tfm(&preq->req, ctx->poly); | 324 | ahash_request_set_tfm(&preq->req, ctx->poly); |
331 | ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key)); | 325 | ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key)); |
@@ -349,7 +343,7 @@ static int poly_init(struct aead_request *req) | |||
349 | struct poly_req *preq = &rctx->u.poly; | 343 | struct poly_req *preq = &rctx->u.poly; |
350 | int err; | 344 | int err; |
351 | 345 | ||
352 | ahash_request_set_callback(&preq->req, aead_request_flags(req), | 346 | ahash_request_set_callback(&preq->req, rctx->flags, |
353 | poly_init_done, req); | 347 | poly_init_done, req); |
354 | ahash_request_set_tfm(&preq->req, ctx->poly); | 348 | ahash_request_set_tfm(&preq->req, ctx->poly); |
355 | 349 | ||
@@ -381,13 +375,12 @@ static int poly_genkey(struct aead_request *req) | |||
381 | rctx->assoclen -= 8; | 375 | rctx->assoclen -= 8; |
382 | } | 376 | } |
383 | 377 | ||
384 | sg_init_table(creq->src, 1); | ||
385 | memset(rctx->key, 0, sizeof(rctx->key)); | 378 | memset(rctx->key, 0, sizeof(rctx->key)); |
386 | sg_set_buf(creq->src, rctx->key, sizeof(rctx->key)); | 379 | sg_init_one(creq->src, rctx->key, sizeof(rctx->key)); |
387 | 380 | ||
388 | chacha_iv(creq->iv, req, 0); | 381 | chacha_iv(creq->iv, req, 0); |
389 | 382 | ||
390 | skcipher_request_set_callback(&creq->req, aead_request_flags(req), | 383 | skcipher_request_set_callback(&creq->req, rctx->flags, |
391 | poly_genkey_done, req); | 384 | poly_genkey_done, req); |
392 | skcipher_request_set_tfm(&creq->req, ctx->chacha); | 385 | skcipher_request_set_tfm(&creq->req, ctx->chacha); |
393 | skcipher_request_set_crypt(&creq->req, creq->src, creq->src, | 386 | skcipher_request_set_crypt(&creq->req, creq->src, creq->src, |
@@ -418,16 +411,12 @@ static int chacha_encrypt(struct aead_request *req) | |||
418 | 411 | ||
419 | chacha_iv(creq->iv, req, 1); | 412 | chacha_iv(creq->iv, req, 1); |
420 | 413 | ||
421 | sg_init_table(rctx->src, 2); | ||
422 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); | 414 | src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); |
423 | dst = src; | 415 | dst = src; |
424 | 416 | if (req->src != req->dst) | |
425 | if (req->src != req->dst) { | ||
426 | sg_init_table(rctx->dst, 2); | ||
427 | dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); | 417 | dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); |
428 | } | ||
429 | 418 | ||
430 | skcipher_request_set_callback(&creq->req, aead_request_flags(req), | 419 | skcipher_request_set_callback(&creq->req, rctx->flags, |
431 | chacha_encrypt_done, req); | 420 | chacha_encrypt_done, req); |
432 | skcipher_request_set_tfm(&creq->req, ctx->chacha); | 421 | skcipher_request_set_tfm(&creq->req, ctx->chacha); |
433 | skcipher_request_set_crypt(&creq->req, src, dst, | 422 | skcipher_request_set_crypt(&creq->req, src, dst, |
@@ -445,6 +434,7 @@ static int chachapoly_encrypt(struct aead_request *req) | |||
445 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 434 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
446 | 435 | ||
447 | rctx->cryptlen = req->cryptlen; | 436 | rctx->cryptlen = req->cryptlen; |
437 | rctx->flags = aead_request_flags(req); | ||
448 | 438 | ||
449 | /* encrypt call chain: | 439 | /* encrypt call chain: |
450 | * - chacha_encrypt/done() | 440 | * - chacha_encrypt/done() |
@@ -466,6 +456,7 @@ static int chachapoly_decrypt(struct aead_request *req) | |||
466 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); | 456 | struct chachapoly_req_ctx *rctx = aead_request_ctx(req); |
467 | 457 | ||
468 | rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE; | 458 | rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE; |
459 | rctx->flags = aead_request_flags(req); | ||
469 | 460 | ||
470 | /* decrypt call chain: | 461 | /* decrypt call chain: |
471 | * - poly_genkey/done() | 462 | * - poly_genkey/done() |
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c index 04404c479e68..085d8d219987 100644 --- a/crypto/chacha_generic.c +++ b/crypto/chacha_generic.c | |||
@@ -32,7 +32,7 @@ static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src, | |||
32 | } | 32 | } |
33 | 33 | ||
34 | static int chacha_stream_xor(struct skcipher_request *req, | 34 | static int chacha_stream_xor(struct skcipher_request *req, |
35 | struct chacha_ctx *ctx, u8 *iv) | 35 | const struct chacha_ctx *ctx, const u8 *iv) |
36 | { | 36 | { |
37 | struct skcipher_walk walk; | 37 | struct skcipher_walk walk; |
38 | u32 state[16]; | 38 | u32 state[16]; |
@@ -56,7 +56,7 @@ static int chacha_stream_xor(struct skcipher_request *req, | |||
56 | return err; | 56 | return err; |
57 | } | 57 | } |
58 | 58 | ||
59 | void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv) | 59 | void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv) |
60 | { | 60 | { |
61 | state[0] = 0x61707865; /* "expa" */ | 61 | state[0] = 0x61707865; /* "expa" */ |
62 | state[1] = 0x3320646e; /* "nd 3" */ | 62 | state[1] = 0x3320646e; /* "nd 3" */ |
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 5f76c6e222c6..3748f9b4516d 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <crypto/internal/aead.h> | 16 | #include <crypto/internal/aead.h> |
17 | #include <crypto/internal/skcipher.h> | 17 | #include <crypto/internal/skcipher.h> |
18 | #include <crypto/cryptd.h> | 18 | #include <crypto/cryptd.h> |
19 | #include <crypto/crypto_wq.h> | ||
20 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
21 | #include <linux/err.h> | 20 | #include <linux/err.h> |
22 | #include <linux/init.h> | 21 | #include <linux/init.h> |
@@ -26,11 +25,14 @@ | |||
26 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
27 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
28 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/workqueue.h> | ||
29 | 29 | ||
30 | static unsigned int cryptd_max_cpu_qlen = 1000; | 30 | static unsigned int cryptd_max_cpu_qlen = 1000; |
31 | module_param(cryptd_max_cpu_qlen, uint, 0); | 31 | module_param(cryptd_max_cpu_qlen, uint, 0); |
32 | MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); | 32 | MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); |
33 | 33 | ||
34 | static struct workqueue_struct *cryptd_wq; | ||
35 | |||
34 | struct cryptd_cpu_queue { | 36 | struct cryptd_cpu_queue { |
35 | struct crypto_queue queue; | 37 | struct crypto_queue queue; |
36 | struct work_struct work; | 38 | struct work_struct work; |
@@ -136,7 +138,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, | |||
136 | if (err == -ENOSPC) | 138 | if (err == -ENOSPC) |
137 | goto out_put_cpu; | 139 | goto out_put_cpu; |
138 | 140 | ||
139 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | 141 | queue_work_on(cpu, cryptd_wq, &cpu_queue->work); |
140 | 142 | ||
141 | if (!atomic_read(refcnt)) | 143 | if (!atomic_read(refcnt)) |
142 | goto out_put_cpu; | 144 | goto out_put_cpu; |
@@ -179,7 +181,7 @@ static void cryptd_queue_worker(struct work_struct *work) | |||
179 | req->complete(req, 0); | 181 | req->complete(req, 0); |
180 | 182 | ||
181 | if (cpu_queue->queue.qlen) | 183 | if (cpu_queue->queue.qlen) |
182 | queue_work(kcrypto_wq, &cpu_queue->work); | 184 | queue_work(cryptd_wq, &cpu_queue->work); |
183 | } | 185 | } |
184 | 186 | ||
185 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | 187 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) |
@@ -919,7 +921,7 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
919 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 921 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
920 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 922 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
921 | return cryptd_create_skcipher(tmpl, tb, &queue); | 923 | return cryptd_create_skcipher(tmpl, tb, &queue); |
922 | case CRYPTO_ALG_TYPE_DIGEST: | 924 | case CRYPTO_ALG_TYPE_HASH: |
923 | return cryptd_create_hash(tmpl, tb, &queue); | 925 | return cryptd_create_hash(tmpl, tb, &queue); |
924 | case CRYPTO_ALG_TYPE_AEAD: | 926 | case CRYPTO_ALG_TYPE_AEAD: |
925 | return cryptd_create_aead(tmpl, tb, &queue); | 927 | return cryptd_create_aead(tmpl, tb, &queue); |
@@ -1119,19 +1121,31 @@ static int __init cryptd_init(void) | |||
1119 | { | 1121 | { |
1120 | int err; | 1122 | int err; |
1121 | 1123 | ||
1124 | cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, | ||
1125 | 1); | ||
1126 | if (!cryptd_wq) | ||
1127 | return -ENOMEM; | ||
1128 | |||
1122 | err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); | 1129 | err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); |
1123 | if (err) | 1130 | if (err) |
1124 | return err; | 1131 | goto err_destroy_wq; |
1125 | 1132 | ||
1126 | err = crypto_register_template(&cryptd_tmpl); | 1133 | err = crypto_register_template(&cryptd_tmpl); |
1127 | if (err) | 1134 | if (err) |
1128 | cryptd_fini_queue(&queue); | 1135 | goto err_fini_queue; |
1129 | 1136 | ||
1137 | return 0; | ||
1138 | |||
1139 | err_fini_queue: | ||
1140 | cryptd_fini_queue(&queue); | ||
1141 | err_destroy_wq: | ||
1142 | destroy_workqueue(cryptd_wq); | ||
1130 | return err; | 1143 | return err; |
1131 | } | 1144 | } |
1132 | 1145 | ||
1133 | static void __exit cryptd_exit(void) | 1146 | static void __exit cryptd_exit(void) |
1134 | { | 1147 | { |
1148 | destroy_workqueue(cryptd_wq); | ||
1135 | cryptd_fini_queue(&queue); | 1149 | cryptd_fini_queue(&queue); |
1136 | crypto_unregister_template(&cryptd_tmpl); | 1150 | crypto_unregister_template(&cryptd_tmpl); |
1137 | } | 1151 | } |
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 0d341ddecd54..5b84b0f7cc17 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c | |||
@@ -100,6 +100,7 @@ static struct shash_alg digest_null = { | |||
100 | .final = null_final, | 100 | .final = null_final, |
101 | .base = { | 101 | .base = { |
102 | .cra_name = "digest_null", | 102 | .cra_name = "digest_null", |
103 | .cra_driver_name = "digest_null-generic", | ||
103 | .cra_blocksize = NULL_BLOCK_SIZE, | 104 | .cra_blocksize = NULL_BLOCK_SIZE, |
104 | .cra_module = THIS_MODULE, | 105 | .cra_module = THIS_MODULE, |
105 | } | 106 | } |
@@ -122,6 +123,7 @@ static struct skcipher_alg skcipher_null = { | |||
122 | 123 | ||
123 | static struct crypto_alg null_algs[] = { { | 124 | static struct crypto_alg null_algs[] = { { |
124 | .cra_name = "cipher_null", | 125 | .cra_name = "cipher_null", |
126 | .cra_driver_name = "cipher_null-generic", | ||
125 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 127 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
126 | .cra_blocksize = NULL_BLOCK_SIZE, | 128 | .cra_blocksize = NULL_BLOCK_SIZE, |
127 | .cra_ctxsize = 0, | 129 | .cra_ctxsize = 0, |
@@ -134,6 +136,7 @@ static struct crypto_alg null_algs[] = { { | |||
134 | .cia_decrypt = null_crypt } } | 136 | .cia_decrypt = null_crypt } } |
135 | }, { | 137 | }, { |
136 | .cra_name = "compress_null", | 138 | .cra_name = "compress_null", |
139 | .cra_driver_name = "compress_null-generic", | ||
137 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 140 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
138 | .cra_blocksize = NULL_BLOCK_SIZE, | 141 | .cra_blocksize = NULL_BLOCK_SIZE, |
139 | .cra_ctxsize = 0, | 142 | .cra_ctxsize = 0, |
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c deleted file mode 100644 index 80501928e0bb..000000000000 --- a/crypto/crypto_wq.c +++ /dev/null | |||
@@ -1,35 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Workqueue for crypto subsystem | ||
4 | * | ||
5 | * Copyright (c) 2009 Intel Corp. | ||
6 | * Author: Huang Ying <ying.huang@intel.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/workqueue.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <crypto/algapi.h> | ||
12 | #include <crypto/crypto_wq.h> | ||
13 | |||
14 | struct workqueue_struct *kcrypto_wq; | ||
15 | EXPORT_SYMBOL_GPL(kcrypto_wq); | ||
16 | |||
17 | static int __init crypto_wq_init(void) | ||
18 | { | ||
19 | kcrypto_wq = alloc_workqueue("crypto", | ||
20 | WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); | ||
21 | if (unlikely(!kcrypto_wq)) | ||
22 | return -ENOMEM; | ||
23 | return 0; | ||
24 | } | ||
25 | |||
26 | static void __exit crypto_wq_exit(void) | ||
27 | { | ||
28 | destroy_workqueue(kcrypto_wq); | ||
29 | } | ||
30 | |||
31 | subsys_initcall(crypto_wq_init); | ||
32 | module_exit(crypto_wq_exit); | ||
33 | |||
34 | MODULE_LICENSE("GPL"); | ||
35 | MODULE_DESCRIPTION("Workqueue for crypto subsystem"); | ||
diff --git a/crypto/deflate.c b/crypto/deflate.c index 65be51456398..4c0e6c9d942a 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c | |||
@@ -275,6 +275,7 @@ static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src, | |||
275 | 275 | ||
276 | static struct crypto_alg alg = { | 276 | static struct crypto_alg alg = { |
277 | .cra_name = "deflate", | 277 | .cra_name = "deflate", |
278 | .cra_driver_name = "deflate-generic", | ||
278 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 279 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
279 | .cra_ctxsize = sizeof(struct deflate_ctx), | 280 | .cra_ctxsize = sizeof(struct deflate_ctx), |
280 | .cra_module = THIS_MODULE, | 281 | .cra_module = THIS_MODULE, |
diff --git a/crypto/drbg.c b/crypto/drbg.c index 2a5b16bb000c..b6929eb5f565 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
@@ -220,6 +220,57 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags) | |||
220 | } | 220 | } |
221 | 221 | ||
222 | /* | 222 | /* |
223 | * FIPS 140-2 continuous self test for the noise source | ||
224 | * The test is performed on the noise source input data. Thus, the function | ||
225 | * implicitly knows the size of the buffer to be equal to the security | ||
226 | * strength. | ||
227 | * | ||
228 | * Note, this function disregards the nonce trailing the entropy data during | ||
229 | * initial seeding. | ||
230 | * | ||
231 | * drbg->drbg_mutex must have been taken. | ||
232 | * | ||
233 | * @drbg DRBG handle | ||
234 | * @entropy buffer of seed data to be checked | ||
235 | * | ||
236 | * return: | ||
237 | * 0 on success | ||
238 | * -EAGAIN on when the CTRNG is not yet primed | ||
239 | * < 0 on error | ||
240 | */ | ||
241 | static int drbg_fips_continuous_test(struct drbg_state *drbg, | ||
242 | const unsigned char *entropy) | ||
243 | { | ||
244 | unsigned short entropylen = drbg_sec_strength(drbg->core->flags); | ||
245 | int ret = 0; | ||
246 | |||
247 | if (!IS_ENABLED(CONFIG_CRYPTO_FIPS)) | ||
248 | return 0; | ||
249 | |||
250 | /* skip test if we test the overall system */ | ||
251 | if (list_empty(&drbg->test_data.list)) | ||
252 | return 0; | ||
253 | /* only perform test in FIPS mode */ | ||
254 | if (!fips_enabled) | ||
255 | return 0; | ||
256 | |||
257 | if (!drbg->fips_primed) { | ||
258 | /* Priming of FIPS test */ | ||
259 | memcpy(drbg->prev, entropy, entropylen); | ||
260 | drbg->fips_primed = true; | ||
261 | /* priming: another round is needed */ | ||
262 | return -EAGAIN; | ||
263 | } | ||
264 | ret = memcmp(drbg->prev, entropy, entropylen); | ||
265 | if (!ret) | ||
266 | panic("DRBG continuous self test failed\n"); | ||
267 | memcpy(drbg->prev, entropy, entropylen); | ||
268 | |||
269 | /* the test shall pass when the two values are not equal */ | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /* | ||
223 | * Convert an integer into a byte representation of this integer. | 274 | * Convert an integer into a byte representation of this integer. |
224 | * The byte representation is big-endian | 275 | * The byte representation is big-endian |
225 | * | 276 | * |
@@ -998,6 +1049,22 @@ static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed, | |||
998 | return ret; | 1049 | return ret; |
999 | } | 1050 | } |
1000 | 1051 | ||
1052 | static inline int drbg_get_random_bytes(struct drbg_state *drbg, | ||
1053 | unsigned char *entropy, | ||
1054 | unsigned int entropylen) | ||
1055 | { | ||
1056 | int ret; | ||
1057 | |||
1058 | do { | ||
1059 | get_random_bytes(entropy, entropylen); | ||
1060 | ret = drbg_fips_continuous_test(drbg, entropy); | ||
1061 | if (ret && ret != -EAGAIN) | ||
1062 | return ret; | ||
1063 | } while (ret); | ||
1064 | |||
1065 | return 0; | ||
1066 | } | ||
1067 | |||
1001 | static void drbg_async_seed(struct work_struct *work) | 1068 | static void drbg_async_seed(struct work_struct *work) |
1002 | { | 1069 | { |
1003 | struct drbg_string data; | 1070 | struct drbg_string data; |
@@ -1006,16 +1073,20 @@ static void drbg_async_seed(struct work_struct *work) | |||
1006 | seed_work); | 1073 | seed_work); |
1007 | unsigned int entropylen = drbg_sec_strength(drbg->core->flags); | 1074 | unsigned int entropylen = drbg_sec_strength(drbg->core->flags); |
1008 | unsigned char entropy[32]; | 1075 | unsigned char entropy[32]; |
1076 | int ret; | ||
1009 | 1077 | ||
1010 | BUG_ON(!entropylen); | 1078 | BUG_ON(!entropylen); |
1011 | BUG_ON(entropylen > sizeof(entropy)); | 1079 | BUG_ON(entropylen > sizeof(entropy)); |
1012 | get_random_bytes(entropy, entropylen); | ||
1013 | 1080 | ||
1014 | drbg_string_fill(&data, entropy, entropylen); | 1081 | drbg_string_fill(&data, entropy, entropylen); |
1015 | list_add_tail(&data.list, &seedlist); | 1082 | list_add_tail(&data.list, &seedlist); |
1016 | 1083 | ||
1017 | mutex_lock(&drbg->drbg_mutex); | 1084 | mutex_lock(&drbg->drbg_mutex); |
1018 | 1085 | ||
1086 | ret = drbg_get_random_bytes(drbg, entropy, entropylen); | ||
1087 | if (ret) | ||
1088 | goto unlock; | ||
1089 | |||
1019 | /* If nonblocking pool is initialized, deactivate Jitter RNG */ | 1090 | /* If nonblocking pool is initialized, deactivate Jitter RNG */ |
1020 | crypto_free_rng(drbg->jent); | 1091 | crypto_free_rng(drbg->jent); |
1021 | drbg->jent = NULL; | 1092 | drbg->jent = NULL; |
@@ -1030,6 +1101,7 @@ static void drbg_async_seed(struct work_struct *work) | |||
1030 | if (drbg->seeded) | 1101 | if (drbg->seeded) |
1031 | drbg->reseed_threshold = drbg_max_requests(drbg); | 1102 | drbg->reseed_threshold = drbg_max_requests(drbg); |
1032 | 1103 | ||
1104 | unlock: | ||
1033 | mutex_unlock(&drbg->drbg_mutex); | 1105 | mutex_unlock(&drbg->drbg_mutex); |
1034 | 1106 | ||
1035 | memzero_explicit(entropy, entropylen); | 1107 | memzero_explicit(entropy, entropylen); |
@@ -1081,7 +1153,9 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, | |||
1081 | BUG_ON((entropylen * 2) > sizeof(entropy)); | 1153 | BUG_ON((entropylen * 2) > sizeof(entropy)); |
1082 | 1154 | ||
1083 | /* Get seed from in-kernel /dev/urandom */ | 1155 | /* Get seed from in-kernel /dev/urandom */ |
1084 | get_random_bytes(entropy, entropylen); | 1156 | ret = drbg_get_random_bytes(drbg, entropy, entropylen); |
1157 | if (ret) | ||
1158 | goto out; | ||
1085 | 1159 | ||
1086 | if (!drbg->jent) { | 1160 | if (!drbg->jent) { |
1087 | drbg_string_fill(&data1, entropy, entropylen); | 1161 | drbg_string_fill(&data1, entropy, entropylen); |
@@ -1094,7 +1168,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, | |||
1094 | entropylen); | 1168 | entropylen); |
1095 | if (ret) { | 1169 | if (ret) { |
1096 | pr_devel("DRBG: jent failed with %d\n", ret); | 1170 | pr_devel("DRBG: jent failed with %d\n", ret); |
1097 | return ret; | 1171 | goto out; |
1098 | } | 1172 | } |
1099 | 1173 | ||
1100 | drbg_string_fill(&data1, entropy, entropylen * 2); | 1174 | drbg_string_fill(&data1, entropy, entropylen * 2); |
@@ -1121,6 +1195,7 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, | |||
1121 | 1195 | ||
1122 | ret = __drbg_seed(drbg, &seedlist, reseed); | 1196 | ret = __drbg_seed(drbg, &seedlist, reseed); |
1123 | 1197 | ||
1198 | out: | ||
1124 | memzero_explicit(entropy, entropylen * 2); | 1199 | memzero_explicit(entropy, entropylen * 2); |
1125 | 1200 | ||
1126 | return ret; | 1201 | return ret; |
@@ -1142,6 +1217,11 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg) | |||
1142 | drbg->reseed_ctr = 0; | 1217 | drbg->reseed_ctr = 0; |
1143 | drbg->d_ops = NULL; | 1218 | drbg->d_ops = NULL; |
1144 | drbg->core = NULL; | 1219 | drbg->core = NULL; |
1220 | if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { | ||
1221 | kzfree(drbg->prev); | ||
1222 | drbg->prev = NULL; | ||
1223 | drbg->fips_primed = false; | ||
1224 | } | ||
1145 | } | 1225 | } |
1146 | 1226 | ||
1147 | /* | 1227 | /* |
@@ -1211,6 +1291,14 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) | |||
1211 | drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1); | 1291 | drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1); |
1212 | } | 1292 | } |
1213 | 1293 | ||
1294 | if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { | ||
1295 | drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags), | ||
1296 | GFP_KERNEL); | ||
1297 | if (!drbg->prev) | ||
1298 | goto fini; | ||
1299 | drbg->fips_primed = false; | ||
1300 | } | ||
1301 | |||
1214 | return 0; | 1302 | return 0; |
1215 | 1303 | ||
1216 | fini: | 1304 | fini: |
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 4e8704405a3b..58f935315cf8 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c | |||
@@ -391,6 +391,7 @@ static int fcrypt_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key | |||
391 | 391 | ||
392 | static struct crypto_alg fcrypt_alg = { | 392 | static struct crypto_alg fcrypt_alg = { |
393 | .cra_name = "fcrypt", | 393 | .cra_name = "fcrypt", |
394 | .cra_driver_name = "fcrypt-generic", | ||
394 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 395 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
395 | .cra_blocksize = 8, | 396 | .cra_blocksize = 8, |
396 | .cra_ctxsize = sizeof(struct fcrypt_ctx), | 397 | .cra_ctxsize = sizeof(struct fcrypt_ctx), |
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index 6425b9cd718e..dad9e1f91a78 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c | |||
@@ -31,6 +31,7 @@ static int ghash_setkey(struct crypto_shash *tfm, | |||
31 | const u8 *key, unsigned int keylen) | 31 | const u8 *key, unsigned int keylen) |
32 | { | 32 | { |
33 | struct ghash_ctx *ctx = crypto_shash_ctx(tfm); | 33 | struct ghash_ctx *ctx = crypto_shash_ctx(tfm); |
34 | be128 k; | ||
34 | 35 | ||
35 | if (keylen != GHASH_BLOCK_SIZE) { | 36 | if (keylen != GHASH_BLOCK_SIZE) { |
36 | crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 37 | crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
@@ -39,7 +40,12 @@ static int ghash_setkey(struct crypto_shash *tfm, | |||
39 | 40 | ||
40 | if (ctx->gf128) | 41 | if (ctx->gf128) |
41 | gf128mul_free_4k(ctx->gf128); | 42 | gf128mul_free_4k(ctx->gf128); |
42 | ctx->gf128 = gf128mul_init_4k_lle((be128 *)key); | 43 | |
44 | BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE); | ||
45 | memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */ | ||
46 | ctx->gf128 = gf128mul_init_4k_lle(&k); | ||
47 | memzero_explicit(&k, GHASH_BLOCK_SIZE); | ||
48 | |||
43 | if (!ctx->gf128) | 49 | if (!ctx->gf128) |
44 | return -ENOMEM; | 50 | return -ENOMEM; |
45 | 51 | ||
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index 787dccca3715..701b8d86ab49 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c | |||
@@ -56,11 +56,6 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector); | |||
56 | * Helper function | 56 | * Helper function |
57 | ***************************************************************************/ | 57 | ***************************************************************************/ |
58 | 58 | ||
59 | __u64 jent_rol64(__u64 word, unsigned int shift) | ||
60 | { | ||
61 | return rol64(word, shift); | ||
62 | } | ||
63 | |||
64 | void *jent_zalloc(unsigned int len) | 59 | void *jent_zalloc(unsigned int len) |
65 | { | 60 | { |
66 | return kzalloc(len, GFP_KERNEL); | 61 | return kzalloc(len, GFP_KERNEL); |
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index acf44b2d2d1d..77fa2120fe0c 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Non-physical true random number generator based on timing jitter -- | 2 | * Non-physical true random number generator based on timing jitter -- |
3 | * Jitter RNG standalone code. | 3 | * Jitter RNG standalone code. |
4 | * | 4 | * |
5 | * Copyright Stephan Mueller <smueller@chronox.de>, 2015 | 5 | * Copyright Stephan Mueller <smueller@chronox.de>, 2015 - 2019 |
6 | * | 6 | * |
7 | * Design | 7 | * Design |
8 | * ====== | 8 | * ====== |
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | /* | 48 | /* |
49 | * This Jitterentropy RNG is based on the jitterentropy library | 49 | * This Jitterentropy RNG is based on the jitterentropy library |
50 | * version 1.1.0 provided at http://www.chronox.de/jent.html | 50 | * version 2.1.2 provided at http://www.chronox.de/jent.html |
51 | */ | 51 | */ |
52 | 52 | ||
53 | #ifdef __OPTIMIZE__ | 53 | #ifdef __OPTIMIZE__ |
@@ -71,10 +71,7 @@ struct rand_data { | |||
71 | #define DATA_SIZE_BITS ((sizeof(__u64)) * 8) | 71 | #define DATA_SIZE_BITS ((sizeof(__u64)) * 8) |
72 | __u64 last_delta; /* SENSITIVE stuck test */ | 72 | __u64 last_delta; /* SENSITIVE stuck test */ |
73 | __s64 last_delta2; /* SENSITIVE stuck test */ | 73 | __s64 last_delta2; /* SENSITIVE stuck test */ |
74 | unsigned int stuck:1; /* Time measurement stuck */ | ||
75 | unsigned int osr; /* Oversample rate */ | 74 | unsigned int osr; /* Oversample rate */ |
76 | unsigned int stir:1; /* Post-processing stirring */ | ||
77 | unsigned int disable_unbias:1; /* Deactivate Von-Neuman unbias */ | ||
78 | #define JENT_MEMORY_BLOCKS 64 | 75 | #define JENT_MEMORY_BLOCKS 64 |
79 | #define JENT_MEMORY_BLOCKSIZE 32 | 76 | #define JENT_MEMORY_BLOCKSIZE 32 |
80 | #define JENT_MEMORY_ACCESSLOOPS 128 | 77 | #define JENT_MEMORY_ACCESSLOOPS 128 |
@@ -89,8 +86,6 @@ struct rand_data { | |||
89 | }; | 86 | }; |
90 | 87 | ||
91 | /* Flags that can be used to initialize the RNG */ | 88 | /* Flags that can be used to initialize the RNG */ |
92 | #define JENT_DISABLE_STIR (1<<0) /* Disable stirring the entropy pool */ | ||
93 | #define JENT_DISABLE_UNBIAS (1<<1) /* Disable the Von-Neuman Unbiaser */ | ||
94 | #define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more | 89 | #define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more |
95 | * entropy, saves MEMORY_SIZE RAM for | 90 | * entropy, saves MEMORY_SIZE RAM for |
96 | * entropy collector */ | 91 | * entropy collector */ |
@@ -99,19 +94,16 @@ struct rand_data { | |||
99 | #define JENT_ENOTIME 1 /* Timer service not available */ | 94 | #define JENT_ENOTIME 1 /* Timer service not available */ |
100 | #define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */ | 95 | #define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */ |
101 | #define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */ | 96 | #define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */ |
102 | #define JENT_EMINVARIATION 4 /* Timer variations too small for RNG */ | ||
103 | #define JENT_EVARVAR 5 /* Timer does not produce variations of | 97 | #define JENT_EVARVAR 5 /* Timer does not produce variations of |
104 | * variations (2nd derivation of time is | 98 | * variations (2nd derivation of time is |
105 | * zero). */ | 99 | * zero). */ |
106 | #define JENT_EMINVARVAR 6 /* Timer variations of variations is tooi | 100 | #define JENT_ESTUCK 8 /* Too many stuck results during init. */ |
107 | * small. */ | ||
108 | 101 | ||
109 | /*************************************************************************** | 102 | /*************************************************************************** |
110 | * Helper functions | 103 | * Helper functions |
111 | ***************************************************************************/ | 104 | ***************************************************************************/ |
112 | 105 | ||
113 | void jent_get_nstime(__u64 *out); | 106 | void jent_get_nstime(__u64 *out); |
114 | __u64 jent_rol64(__u64 word, unsigned int shift); | ||
115 | void *jent_zalloc(unsigned int len); | 107 | void *jent_zalloc(unsigned int len); |
116 | void jent_zfree(void *ptr); | 108 | void jent_zfree(void *ptr); |
117 | int jent_fips_enabled(void); | 109 | int jent_fips_enabled(void); |
@@ -140,16 +132,16 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, | |||
140 | 132 | ||
141 | jent_get_nstime(&time); | 133 | jent_get_nstime(&time); |
142 | /* | 134 | /* |
143 | * mix the current state of the random number into the shuffle | 135 | * Mix the current state of the random number into the shuffle |
144 | * calculation to balance that shuffle a bit more | 136 | * calculation to balance that shuffle a bit more. |
145 | */ | 137 | */ |
146 | if (ec) | 138 | if (ec) |
147 | time ^= ec->data; | 139 | time ^= ec->data; |
148 | /* | 140 | /* |
149 | * we fold the time value as much as possible to ensure that as many | 141 | * We fold the time value as much as possible to ensure that as many |
150 | * bits of the time stamp are included as possible | 142 | * bits of the time stamp are included as possible. |
151 | */ | 143 | */ |
152 | for (i = 0; (DATA_SIZE_BITS / bits) > i; i++) { | 144 | for (i = 0; ((DATA_SIZE_BITS + bits - 1) / bits) > i; i++) { |
153 | shuffle ^= time & mask; | 145 | shuffle ^= time & mask; |
154 | time = time >> bits; | 146 | time = time >> bits; |
155 | } | 147 | } |
@@ -169,38 +161,28 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, | |||
169 | * CPU Jitter noise source -- this is the noise source based on the CPU | 161 | * CPU Jitter noise source -- this is the noise source based on the CPU |
170 | * execution time jitter | 162 | * execution time jitter |
171 | * | 163 | * |
172 | * This function folds the time into one bit units by iterating | 164 | * This function injects the individual bits of the time value into the |
173 | * through the DATA_SIZE_BITS bit time value as follows: assume our time value | 165 | * entropy pool using an LFSR. |
174 | * is 0xabcd | ||
175 | * 1st loop, 1st shift generates 0xd000 | ||
176 | * 1st loop, 2nd shift generates 0x000d | ||
177 | * 2nd loop, 1st shift generates 0xcd00 | ||
178 | * 2nd loop, 2nd shift generates 0x000c | ||
179 | * 3rd loop, 1st shift generates 0xbcd0 | ||
180 | * 3rd loop, 2nd shift generates 0x000b | ||
181 | * 4th loop, 1st shift generates 0xabcd | ||
182 | * 4th loop, 2nd shift generates 0x000a | ||
183 | * Now, the values at the end of the 2nd shifts are XORed together. | ||
184 | * | 166 | * |
185 | * The code is deliberately inefficient and shall stay that way. This function | 167 | * The code is deliberately inefficient with respect to the bit shifting |
186 | * is the root cause why the code shall be compiled without optimization. This | 168 | * and shall stay that way. This function is the root cause why the code |
187 | * function not only acts as folding operation, but this function's execution | 169 | * shall be compiled without optimization. This function not only acts as |
188 | * is used to measure the CPU execution time jitter. Any change to the loop in | 170 | * folding operation, but this function's execution is used to measure |
189 | * this function implies that careful retesting must be done. | 171 | * the CPU execution time jitter. Any change to the loop in this function |
172 | * implies that careful retesting must be done. | ||
190 | * | 173 | * |
191 | * Input: | 174 | * Input: |
192 | * @ec entropy collector struct -- may be NULL | 175 | * @ec entropy collector struct -- may be NULL |
193 | * @time time stamp to be folded | 176 | * @time time stamp to be injected |
194 | * @loop_cnt if a value not equal to 0 is set, use the given value as number of | 177 | * @loop_cnt if a value not equal to 0 is set, use the given value as number of |
195 | * loops to perform the folding | 178 | * loops to perform the folding |
196 | * | 179 | * |
197 | * Output: | 180 | * Output: |
198 | * @folded result of folding operation | 181 | * updated ec->data |
199 | * | 182 | * |
200 | * @return Number of loops the folding operation is performed | 183 | * @return Number of loops the folding operation is performed |
201 | */ | 184 | */ |
202 | static __u64 jent_fold_time(struct rand_data *ec, __u64 time, | 185 | static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) |
203 | __u64 *folded, __u64 loop_cnt) | ||
204 | { | 186 | { |
205 | unsigned int i; | 187 | unsigned int i; |
206 | __u64 j = 0; | 188 | __u64 j = 0; |
@@ -217,15 +199,34 @@ static __u64 jent_fold_time(struct rand_data *ec, __u64 time, | |||
217 | if (loop_cnt) | 199 | if (loop_cnt) |
218 | fold_loop_cnt = loop_cnt; | 200 | fold_loop_cnt = loop_cnt; |
219 | for (j = 0; j < fold_loop_cnt; j++) { | 201 | for (j = 0; j < fold_loop_cnt; j++) { |
220 | new = 0; | 202 | new = ec->data; |
221 | for (i = 1; (DATA_SIZE_BITS) >= i; i++) { | 203 | for (i = 1; (DATA_SIZE_BITS) >= i; i++) { |
222 | __u64 tmp = time << (DATA_SIZE_BITS - i); | 204 | __u64 tmp = time << (DATA_SIZE_BITS - i); |
223 | 205 | ||
224 | tmp = tmp >> (DATA_SIZE_BITS - 1); | 206 | tmp = tmp >> (DATA_SIZE_BITS - 1); |
207 | |||
208 | /* | ||
209 | * Fibonacci LSFR with polynomial of | ||
210 | * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is | ||
211 | * primitive according to | ||
212 | * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf | ||
213 | * (the shift values are the polynomial values minus one | ||
214 | * due to counting bits from 0 to 63). As the current | ||
215 | * position is always the LSB, the polynomial only needs | ||
216 | * to shift data in from the left without wrap. | ||
217 | */ | ||
218 | tmp ^= ((new >> 63) & 1); | ||
219 | tmp ^= ((new >> 60) & 1); | ||
220 | tmp ^= ((new >> 55) & 1); | ||
221 | tmp ^= ((new >> 30) & 1); | ||
222 | tmp ^= ((new >> 27) & 1); | ||
223 | tmp ^= ((new >> 22) & 1); | ||
224 | new <<= 1; | ||
225 | new ^= tmp; | 225 | new ^= tmp; |
226 | } | 226 | } |
227 | } | 227 | } |
228 | *folded = new; | 228 | ec->data = new; |
229 | |||
229 | return fold_loop_cnt; | 230 | return fold_loop_cnt; |
230 | } | 231 | } |
231 | 232 | ||
@@ -258,7 +259,6 @@ static __u64 jent_fold_time(struct rand_data *ec, __u64 time, | |||
258 | */ | 259 | */ |
259 | static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) | 260 | static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) |
260 | { | 261 | { |
261 | unsigned char *tmpval = NULL; | ||
262 | unsigned int wrap = 0; | 262 | unsigned int wrap = 0; |
263 | __u64 i = 0; | 263 | __u64 i = 0; |
264 | #define MAX_ACC_LOOP_BIT 7 | 264 | #define MAX_ACC_LOOP_BIT 7 |
@@ -278,7 +278,7 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) | |||
278 | acc_loop_cnt = loop_cnt; | 278 | acc_loop_cnt = loop_cnt; |
279 | 279 | ||
280 | for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) { | 280 | for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) { |
281 | tmpval = ec->mem + ec->memlocation; | 281 | unsigned char *tmpval = ec->mem + ec->memlocation; |
282 | /* | 282 | /* |
283 | * memory access: just add 1 to one byte, | 283 | * memory access: just add 1 to one byte, |
284 | * wrap at 255 -- memory access implies read | 284 | * wrap at 255 -- memory access implies read |
@@ -316,7 +316,7 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) | |||
316 | * 0 jitter measurement not stuck (good bit) | 316 | * 0 jitter measurement not stuck (good bit) |
317 | * 1 jitter measurement stuck (reject bit) | 317 | * 1 jitter measurement stuck (reject bit) |
318 | */ | 318 | */ |
319 | static void jent_stuck(struct rand_data *ec, __u64 current_delta) | 319 | static int jent_stuck(struct rand_data *ec, __u64 current_delta) |
320 | { | 320 | { |
321 | __s64 delta2 = ec->last_delta - current_delta; | 321 | __s64 delta2 = ec->last_delta - current_delta; |
322 | __s64 delta3 = delta2 - ec->last_delta2; | 322 | __s64 delta3 = delta2 - ec->last_delta2; |
@@ -325,14 +325,15 @@ static void jent_stuck(struct rand_data *ec, __u64 current_delta) | |||
325 | ec->last_delta2 = delta2; | 325 | ec->last_delta2 = delta2; |
326 | 326 | ||
327 | if (!current_delta || !delta2 || !delta3) | 327 | if (!current_delta || !delta2 || !delta3) |
328 | ec->stuck = 1; | 328 | return 1; |
329 | |||
330 | return 0; | ||
329 | } | 331 | } |
330 | 332 | ||
331 | /** | 333 | /** |
332 | * This is the heart of the entropy generation: calculate time deltas and | 334 | * This is the heart of the entropy generation: calculate time deltas and |
333 | * use the CPU jitter in the time deltas. The jitter is folded into one | 335 | * use the CPU jitter in the time deltas. The jitter is injected into the |
334 | * bit. You can call this function the "random bit generator" as it | 336 | * entropy pool. |
335 | * produces one random bit per invocation. | ||
336 | * | 337 | * |
337 | * WARNING: ensure that ->prev_time is primed before using the output | 338 | * WARNING: ensure that ->prev_time is primed before using the output |
338 | * of this function! This can be done by calling this function | 339 | * of this function! This can be done by calling this function |
@@ -341,12 +342,11 @@ static void jent_stuck(struct rand_data *ec, __u64 current_delta) | |||
341 | * Input: | 342 | * Input: |
342 | * @entropy_collector Reference to entropy collector | 343 | * @entropy_collector Reference to entropy collector |
343 | * | 344 | * |
344 | * @return One random bit | 345 | * @return result of stuck test |
345 | */ | 346 | */ |
346 | static __u64 jent_measure_jitter(struct rand_data *ec) | 347 | static int jent_measure_jitter(struct rand_data *ec) |
347 | { | 348 | { |
348 | __u64 time = 0; | 349 | __u64 time = 0; |
349 | __u64 data = 0; | ||
350 | __u64 current_delta = 0; | 350 | __u64 current_delta = 0; |
351 | 351 | ||
352 | /* Invoke one noise source before time measurement to add variations */ | 352 | /* Invoke one noise source before time measurement to add variations */ |
@@ -360,109 +360,11 @@ static __u64 jent_measure_jitter(struct rand_data *ec) | |||
360 | current_delta = time - ec->prev_time; | 360 | current_delta = time - ec->prev_time; |
361 | ec->prev_time = time; | 361 | ec->prev_time = time; |
362 | 362 | ||
363 | /* Now call the next noise sources which also folds the data */ | 363 | /* Now call the next noise sources which also injects the data */ |
364 | jent_fold_time(ec, current_delta, &data, 0); | 364 | jent_lfsr_time(ec, current_delta, 0); |
365 | |||
366 | /* | ||
367 | * Check whether we have a stuck measurement. The enforcement | ||
368 | * is performed after the stuck value has been mixed into the | ||
369 | * entropy pool. | ||
370 | */ | ||
371 | jent_stuck(ec, current_delta); | ||
372 | |||
373 | return data; | ||
374 | } | ||
375 | |||
376 | /** | ||
377 | * Von Neuman unbias as explained in RFC 4086 section 4.2. As shown in the | ||
378 | * documentation of that RNG, the bits from jent_measure_jitter are considered | ||
379 | * independent which implies that the Von Neuman unbias operation is applicable. | ||
380 | * A proof of the Von-Neumann unbias operation to remove skews is given in the | ||
381 | * document "A proposal for: Functionality classes for random number | ||
382 | * generators", version 2.0 by Werner Schindler, section 5.4.1. | ||
383 | * | ||
384 | * Input: | ||
385 | * @entropy_collector Reference to entropy collector | ||
386 | * | ||
387 | * @return One random bit | ||
388 | */ | ||
389 | static __u64 jent_unbiased_bit(struct rand_data *entropy_collector) | ||
390 | { | ||
391 | do { | ||
392 | __u64 a = jent_measure_jitter(entropy_collector); | ||
393 | __u64 b = jent_measure_jitter(entropy_collector); | ||
394 | |||
395 | if (a == b) | ||
396 | continue; | ||
397 | if (1 == a) | ||
398 | return 1; | ||
399 | else | ||
400 | return 0; | ||
401 | } while (1); | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * Shuffle the pool a bit by mixing some value with a bijective function (XOR) | ||
406 | * into the pool. | ||
407 | * | ||
408 | * The function generates a mixer value that depends on the bits set and the | ||
409 | * location of the set bits in the random number generated by the entropy | ||
410 | * source. Therefore, based on the generated random number, this mixer value | ||
411 | * can have 2**64 different values. That mixer value is initialized with the | ||
412 | * first two SHA-1 constants. After obtaining the mixer value, it is XORed into | ||
413 | * the random number. | ||
414 | * | ||
415 | * The mixer value is not assumed to contain any entropy. But due to the XOR | ||
416 | * operation, it can also not destroy any entropy present in the entropy pool. | ||
417 | * | ||
418 | * Input: | ||
419 | * @entropy_collector Reference to entropy collector | ||
420 | */ | ||
421 | static void jent_stir_pool(struct rand_data *entropy_collector) | ||
422 | { | ||
423 | /* | ||
424 | * to shut up GCC on 32 bit, we have to initialize the 64 variable | ||
425 | * with two 32 bit variables | ||
426 | */ | ||
427 | union c { | ||
428 | __u64 u64; | ||
429 | __u32 u32[2]; | ||
430 | }; | ||
431 | /* | ||
432 | * This constant is derived from the first two 32 bit initialization | ||
433 | * vectors of SHA-1 as defined in FIPS 180-4 section 5.3.1 | ||
434 | */ | ||
435 | union c constant; | ||
436 | /* | ||
437 | * The start value of the mixer variable is derived from the third | ||
438 | * and fourth 32 bit initialization vector of SHA-1 as defined in | ||
439 | * FIPS 180-4 section 5.3.1 | ||
440 | */ | ||
441 | union c mixer; | ||
442 | unsigned int i = 0; | ||
443 | |||
444 | /* | ||
445 | * Store the SHA-1 constants in reverse order to make up the 64 bit | ||
446 | * value -- this applies to a little endian system, on a big endian | ||
447 | * system, it reverses as expected. But this really does not matter | ||
448 | * as we do not rely on the specific numbers. We just pick the SHA-1 | ||
449 | * constants as they have a good mix of bit set and unset. | ||
450 | */ | ||
451 | constant.u32[1] = 0x67452301; | ||
452 | constant.u32[0] = 0xefcdab89; | ||
453 | mixer.u32[1] = 0x98badcfe; | ||
454 | mixer.u32[0] = 0x10325476; | ||
455 | 365 | ||
456 | for (i = 0; i < DATA_SIZE_BITS; i++) { | 366 | /* Check whether we have a stuck measurement. */ |
457 | /* | 367 | return jent_stuck(ec, current_delta); |
458 | * get the i-th bit of the input random number and only XOR | ||
459 | * the constant into the mixer value when that bit is set | ||
460 | */ | ||
461 | if ((entropy_collector->data >> i) & 1) | ||
462 | mixer.u64 ^= constant.u64; | ||
463 | mixer.u64 = jent_rol64(mixer.u64, 1); | ||
464 | } | ||
465 | entropy_collector->data ^= mixer.u64; | ||
466 | } | 368 | } |
467 | 369 | ||
468 | /** | 370 | /** |
@@ -480,48 +382,9 @@ static void jent_gen_entropy(struct rand_data *ec) | |||
480 | jent_measure_jitter(ec); | 382 | jent_measure_jitter(ec); |
481 | 383 | ||
482 | while (1) { | 384 | while (1) { |
483 | __u64 data = 0; | 385 | /* If a stuck measurement is received, repeat measurement */ |
484 | 386 | if (jent_measure_jitter(ec)) | |
485 | if (ec->disable_unbias == 1) | ||
486 | data = jent_measure_jitter(ec); | ||
487 | else | ||
488 | data = jent_unbiased_bit(ec); | ||
489 | |||
490 | /* enforcement of the jent_stuck test */ | ||
491 | if (ec->stuck) { | ||
492 | /* | ||
493 | * We only mix in the bit considered not appropriate | ||
494 | * without the LSFR. The reason is that if we apply | ||
495 | * the LSFR and we do not rotate, the 2nd bit with LSFR | ||
496 | * will cancel out the first LSFR application on the | ||
497 | * bad bit. | ||
498 | * | ||
499 | * And we do not rotate as we apply the next bit to the | ||
500 | * current bit location again. | ||
501 | */ | ||
502 | ec->data ^= data; | ||
503 | ec->stuck = 0; | ||
504 | continue; | 387 | continue; |
505 | } | ||
506 | |||
507 | /* | ||
508 | * Fibonacci LSFR with polynom of | ||
509 | * x^64 + x^61 + x^56 + x^31 + x^28 + x^23 + 1 which is | ||
510 | * primitive according to | ||
511 | * http://poincare.matf.bg.ac.rs/~ezivkovm/publications/primpol1.pdf | ||
512 | * (the shift values are the polynom values minus one | ||
513 | * due to counting bits from 0 to 63). As the current | ||
514 | * position is always the LSB, the polynom only needs | ||
515 | * to shift data in from the left without wrap. | ||
516 | */ | ||
517 | ec->data ^= data; | ||
518 | ec->data ^= ((ec->data >> 63) & 1); | ||
519 | ec->data ^= ((ec->data >> 60) & 1); | ||
520 | ec->data ^= ((ec->data >> 55) & 1); | ||
521 | ec->data ^= ((ec->data >> 30) & 1); | ||
522 | ec->data ^= ((ec->data >> 27) & 1); | ||
523 | ec->data ^= ((ec->data >> 22) & 1); | ||
524 | ec->data = jent_rol64(ec->data, 1); | ||
525 | 388 | ||
526 | /* | 389 | /* |
527 | * We multiply the loop value with ->osr to obtain the | 390 | * We multiply the loop value with ->osr to obtain the |
@@ -530,8 +393,6 @@ static void jent_gen_entropy(struct rand_data *ec) | |||
530 | if (++k >= (DATA_SIZE_BITS * ec->osr)) | 393 | if (++k >= (DATA_SIZE_BITS * ec->osr)) |
531 | break; | 394 | break; |
532 | } | 395 | } |
533 | if (ec->stir) | ||
534 | jent_stir_pool(ec); | ||
535 | } | 396 | } |
536 | 397 | ||
537 | /** | 398 | /** |
@@ -639,12 +500,6 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, | |||
639 | osr = 1; /* minimum sampling rate is 1 */ | 500 | osr = 1; /* minimum sampling rate is 1 */ |
640 | entropy_collector->osr = osr; | 501 | entropy_collector->osr = osr; |
641 | 502 | ||
642 | entropy_collector->stir = 1; | ||
643 | if (flags & JENT_DISABLE_STIR) | ||
644 | entropy_collector->stir = 0; | ||
645 | if (flags & JENT_DISABLE_UNBIAS) | ||
646 | entropy_collector->disable_unbias = 1; | ||
647 | |||
648 | /* fill the data pad with non-zero values */ | 503 | /* fill the data pad with non-zero values */ |
649 | jent_gen_entropy(entropy_collector); | 504 | jent_gen_entropy(entropy_collector); |
650 | 505 | ||
@@ -656,7 +511,6 @@ void jent_entropy_collector_free(struct rand_data *entropy_collector) | |||
656 | jent_zfree(entropy_collector->mem); | 511 | jent_zfree(entropy_collector->mem); |
657 | entropy_collector->mem = NULL; | 512 | entropy_collector->mem = NULL; |
658 | jent_zfree(entropy_collector); | 513 | jent_zfree(entropy_collector); |
659 | entropy_collector = NULL; | ||
660 | } | 514 | } |
661 | 515 | ||
662 | int jent_entropy_init(void) | 516 | int jent_entropy_init(void) |
@@ -665,8 +519,9 @@ int jent_entropy_init(void) | |||
665 | __u64 delta_sum = 0; | 519 | __u64 delta_sum = 0; |
666 | __u64 old_delta = 0; | 520 | __u64 old_delta = 0; |
667 | int time_backwards = 0; | 521 | int time_backwards = 0; |
668 | int count_var = 0; | ||
669 | int count_mod = 0; | 522 | int count_mod = 0; |
523 | int count_stuck = 0; | ||
524 | struct rand_data ec = { 0 }; | ||
670 | 525 | ||
671 | /* We could perform statistical tests here, but the problem is | 526 | /* We could perform statistical tests here, but the problem is |
672 | * that we only have a few loop counts to do testing. These | 527 | * that we only have a few loop counts to do testing. These |
@@ -695,12 +550,14 @@ int jent_entropy_init(void) | |||
695 | for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { | 550 | for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { |
696 | __u64 time = 0; | 551 | __u64 time = 0; |
697 | __u64 time2 = 0; | 552 | __u64 time2 = 0; |
698 | __u64 folded = 0; | ||
699 | __u64 delta = 0; | 553 | __u64 delta = 0; |
700 | unsigned int lowdelta = 0; | 554 | unsigned int lowdelta = 0; |
555 | int stuck; | ||
701 | 556 | ||
557 | /* Invoke core entropy collection logic */ | ||
702 | jent_get_nstime(&time); | 558 | jent_get_nstime(&time); |
703 | jent_fold_time(NULL, time, &folded, 1<<MIN_FOLD_LOOP_BIT); | 559 | ec.prev_time = time; |
560 | jent_lfsr_time(&ec, time, 0); | ||
704 | jent_get_nstime(&time2); | 561 | jent_get_nstime(&time2); |
705 | 562 | ||
706 | /* test whether timer works */ | 563 | /* test whether timer works */ |
@@ -715,6 +572,8 @@ int jent_entropy_init(void) | |||
715 | if (!delta) | 572 | if (!delta) |
716 | return JENT_ECOARSETIME; | 573 | return JENT_ECOARSETIME; |
717 | 574 | ||
575 | stuck = jent_stuck(&ec, delta); | ||
576 | |||
718 | /* | 577 | /* |
719 | * up to here we did not modify any variable that will be | 578 | * up to here we did not modify any variable that will be |
720 | * evaluated later, but we already performed some work. Thus we | 579 | * evaluated later, but we already performed some work. Thus we |
@@ -725,14 +584,14 @@ int jent_entropy_init(void) | |||
725 | if (CLEARCACHE > i) | 584 | if (CLEARCACHE > i) |
726 | continue; | 585 | continue; |
727 | 586 | ||
587 | if (stuck) | ||
588 | count_stuck++; | ||
589 | |||
728 | /* test whether we have an increasing timer */ | 590 | /* test whether we have an increasing timer */ |
729 | if (!(time2 > time)) | 591 | if (!(time2 > time)) |
730 | time_backwards++; | 592 | time_backwards++; |
731 | 593 | ||
732 | /* | 594 | /* use 32 bit value to ensure compilation on 32 bit arches */ |
733 | * Avoid modulo of 64 bit integer to allow code to compile | ||
734 | * on 32 bit architectures. | ||
735 | */ | ||
736 | lowdelta = time2 - time; | 595 | lowdelta = time2 - time; |
737 | if (!(lowdelta % 100)) | 596 | if (!(lowdelta % 100)) |
738 | count_mod++; | 597 | count_mod++; |
@@ -743,14 +602,10 @@ int jent_entropy_init(void) | |||
743 | * only after the first loop is executed as we need to prime | 602 | * only after the first loop is executed as we need to prime |
744 | * the old_data value | 603 | * the old_data value |
745 | */ | 604 | */ |
746 | if (i) { | 605 | if (delta > old_delta) |
747 | if (delta != old_delta) | 606 | delta_sum += (delta - old_delta); |
748 | count_var++; | 607 | else |
749 | if (delta > old_delta) | 608 | delta_sum += (old_delta - delta); |
750 | delta_sum += (delta - old_delta); | ||
751 | else | ||
752 | delta_sum += (old_delta - delta); | ||
753 | } | ||
754 | old_delta = delta; | 609 | old_delta = delta; |
755 | } | 610 | } |
756 | 611 | ||
@@ -763,25 +618,29 @@ int jent_entropy_init(void) | |||
763 | */ | 618 | */ |
764 | if (3 < time_backwards) | 619 | if (3 < time_backwards) |
765 | return JENT_ENOMONOTONIC; | 620 | return JENT_ENOMONOTONIC; |
766 | /* Error if the time variances are always identical */ | ||
767 | if (!delta_sum) | ||
768 | return JENT_EVARVAR; | ||
769 | 621 | ||
770 | /* | 622 | /* |
771 | * Variations of deltas of time must on average be larger | 623 | * Variations of deltas of time must on average be larger |
772 | * than 1 to ensure the entropy estimation | 624 | * than 1 to ensure the entropy estimation |
773 | * implied with 1 is preserved | 625 | * implied with 1 is preserved |
774 | */ | 626 | */ |
775 | if (delta_sum <= 1) | 627 | if ((delta_sum) <= 1) |
776 | return JENT_EMINVARVAR; | 628 | return JENT_EVARVAR; |
777 | 629 | ||
778 | /* | 630 | /* |
779 | * Ensure that we have variations in the time stamp below 10 for at | 631 | * Ensure that we have variations in the time stamp below 10 for at |
780 | * least 10% of all checks -- on some platforms, the counter | 632 | * least 10% of all checks -- on some platforms, the counter increments |
781 | * increments in multiples of 100, but not always | 633 | * in multiples of 100, but not always |
782 | */ | 634 | */ |
783 | if ((TESTLOOPCOUNT/10 * 9) < count_mod) | 635 | if ((TESTLOOPCOUNT/10 * 9) < count_mod) |
784 | return JENT_ECOARSETIME; | 636 | return JENT_ECOARSETIME; |
785 | 637 | ||
638 | /* | ||
639 | * If we have more than 90% stuck results, then this Jitter RNG is | ||
640 | * likely to not work well. | ||
641 | */ | ||
642 | if ((TESTLOOPCOUNT/10 * 9) < count_stuck) | ||
643 | return JENT_ESTUCK; | ||
644 | |||
786 | return 0; | 645 | return 0; |
787 | } | 646 | } |
diff --git a/crypto/khazad.c b/crypto/khazad.c index b50aa8a3ab4c..14ca7f1631c7 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c | |||
@@ -848,6 +848,7 @@ static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
848 | 848 | ||
849 | static struct crypto_alg khazad_alg = { | 849 | static struct crypto_alg khazad_alg = { |
850 | .cra_name = "khazad", | 850 | .cra_name = "khazad", |
851 | .cra_driver_name = "khazad-generic", | ||
851 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 852 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
852 | .cra_blocksize = KHAZAD_BLOCK_SIZE, | 853 | .cra_blocksize = KHAZAD_BLOCK_SIZE, |
853 | .cra_ctxsize = sizeof (struct khazad_ctx), | 854 | .cra_ctxsize = sizeof (struct khazad_ctx), |
diff --git a/crypto/lrw.c b/crypto/lrw.c index 58009cf63a6e..be829f6afc8e 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c | |||
@@ -384,7 +384,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) | |||
384 | inst->alg.base.cra_priority = alg->base.cra_priority; | 384 | inst->alg.base.cra_priority = alg->base.cra_priority; |
385 | inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; | 385 | inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; |
386 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | | 386 | inst->alg.base.cra_alignmask = alg->base.cra_alignmask | |
387 | (__alignof__(__be32) - 1); | 387 | (__alignof__(be128) - 1); |
388 | 388 | ||
389 | inst->alg.ivsize = LRW_BLOCK_SIZE; | 389 | inst->alg.ivsize = LRW_BLOCK_SIZE; |
390 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + | 390 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + |
diff --git a/crypto/lz4.c b/crypto/lz4.c index 54673cf88385..0606f8862e78 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c | |||
@@ -106,6 +106,7 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
106 | 106 | ||
107 | static struct crypto_alg alg_lz4 = { | 107 | static struct crypto_alg alg_lz4 = { |
108 | .cra_name = "lz4", | 108 | .cra_name = "lz4", |
109 | .cra_driver_name = "lz4-generic", | ||
109 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 110 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
110 | .cra_ctxsize = sizeof(struct lz4_ctx), | 111 | .cra_ctxsize = sizeof(struct lz4_ctx), |
111 | .cra_module = THIS_MODULE, | 112 | .cra_module = THIS_MODULE, |
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index daae7bfb385d..d7cc94aa2fcf 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c | |||
@@ -107,6 +107,7 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, | |||
107 | 107 | ||
108 | static struct crypto_alg alg_lz4hc = { | 108 | static struct crypto_alg alg_lz4hc = { |
109 | .cra_name = "lz4hc", | 109 | .cra_name = "lz4hc", |
110 | .cra_driver_name = "lz4hc-generic", | ||
110 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 111 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
111 | .cra_ctxsize = sizeof(struct lz4hc_ctx), | 112 | .cra_ctxsize = sizeof(struct lz4hc_ctx), |
112 | .cra_module = THIS_MODULE, | 113 | .cra_module = THIS_MODULE, |
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c index c4303e96f2b1..0631d975bfac 100644 --- a/crypto/lzo-rle.c +++ b/crypto/lzo-rle.c | |||
@@ -109,6 +109,7 @@ static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src, | |||
109 | 109 | ||
110 | static struct crypto_alg alg = { | 110 | static struct crypto_alg alg = { |
111 | .cra_name = "lzo-rle", | 111 | .cra_name = "lzo-rle", |
112 | .cra_driver_name = "lzo-rle-generic", | ||
112 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 113 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
113 | .cra_ctxsize = sizeof(struct lzorle_ctx), | 114 | .cra_ctxsize = sizeof(struct lzorle_ctx), |
114 | .cra_module = THIS_MODULE, | 115 | .cra_module = THIS_MODULE, |
diff --git a/crypto/lzo.c b/crypto/lzo.c index 97051a2ca08e..ebda132dd22b 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c | |||
@@ -109,6 +109,7 @@ static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, | |||
109 | 109 | ||
110 | static struct crypto_alg alg = { | 110 | static struct crypto_alg alg = { |
111 | .cra_name = "lzo", | 111 | .cra_name = "lzo", |
112 | .cra_driver_name = "lzo-generic", | ||
112 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 113 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
113 | .cra_ctxsize = sizeof(struct lzo_ctx), | 114 | .cra_ctxsize = sizeof(struct lzo_ctx), |
114 | .cra_module = THIS_MODULE, | 115 | .cra_module = THIS_MODULE, |
diff --git a/crypto/md4.c b/crypto/md4.c index 9a1a228a0c69..2e7f2f319f95 100644 --- a/crypto/md4.c +++ b/crypto/md4.c | |||
@@ -216,9 +216,10 @@ static struct shash_alg alg = { | |||
216 | .final = md4_final, | 216 | .final = md4_final, |
217 | .descsize = sizeof(struct md4_ctx), | 217 | .descsize = sizeof(struct md4_ctx), |
218 | .base = { | 218 | .base = { |
219 | .cra_name = "md4", | 219 | .cra_name = "md4", |
220 | .cra_blocksize = MD4_HMAC_BLOCK_SIZE, | 220 | .cra_driver_name = "md4-generic", |
221 | .cra_module = THIS_MODULE, | 221 | .cra_blocksize = MD4_HMAC_BLOCK_SIZE, |
222 | .cra_module = THIS_MODULE, | ||
222 | } | 223 | } |
223 | }; | 224 | }; |
224 | 225 | ||
diff --git a/crypto/md5.c b/crypto/md5.c index 221c2c0932f8..22dc60bc0437 100644 --- a/crypto/md5.c +++ b/crypto/md5.c | |||
@@ -228,9 +228,10 @@ static struct shash_alg alg = { | |||
228 | .descsize = sizeof(struct md5_state), | 228 | .descsize = sizeof(struct md5_state), |
229 | .statesize = sizeof(struct md5_state), | 229 | .statesize = sizeof(struct md5_state), |
230 | .base = { | 230 | .base = { |
231 | .cra_name = "md5", | 231 | .cra_name = "md5", |
232 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, | 232 | .cra_driver_name = "md5-generic", |
233 | .cra_module = THIS_MODULE, | 233 | .cra_blocksize = MD5_HMAC_BLOCK_SIZE, |
234 | .cra_module = THIS_MODULE, | ||
234 | } | 235 | } |
235 | }; | 236 | }; |
236 | 237 | ||
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index b3d83ff709d3..20e6220f46f6 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c | |||
@@ -156,6 +156,7 @@ static struct shash_alg alg = { | |||
156 | .descsize = sizeof(struct michael_mic_desc_ctx), | 156 | .descsize = sizeof(struct michael_mic_desc_ctx), |
157 | .base = { | 157 | .base = { |
158 | .cra_name = "michael_mic", | 158 | .cra_name = "michael_mic", |
159 | .cra_driver_name = "michael_mic-generic", | ||
159 | .cra_blocksize = 8, | 160 | .cra_blocksize = 8, |
160 | .cra_alignmask = 3, | 161 | .cra_alignmask = 3, |
161 | .cra_ctxsize = sizeof(struct michael_mic_ctx), | 162 | .cra_ctxsize = sizeof(struct michael_mic_ctx), |
diff --git a/crypto/rmd128.c b/crypto/rmd128.c index d6c031a9fd14..29308fb97e7e 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c | |||
@@ -298,6 +298,7 @@ static struct shash_alg alg = { | |||
298 | .descsize = sizeof(struct rmd128_ctx), | 298 | .descsize = sizeof(struct rmd128_ctx), |
299 | .base = { | 299 | .base = { |
300 | .cra_name = "rmd128", | 300 | .cra_name = "rmd128", |
301 | .cra_driver_name = "rmd128-generic", | ||
301 | .cra_blocksize = RMD128_BLOCK_SIZE, | 302 | .cra_blocksize = RMD128_BLOCK_SIZE, |
302 | .cra_module = THIS_MODULE, | 303 | .cra_module = THIS_MODULE, |
303 | } | 304 | } |
diff --git a/crypto/rmd160.c b/crypto/rmd160.c index f3add4d54a22..c5fe4034b153 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c | |||
@@ -342,6 +342,7 @@ static struct shash_alg alg = { | |||
342 | .descsize = sizeof(struct rmd160_ctx), | 342 | .descsize = sizeof(struct rmd160_ctx), |
343 | .base = { | 343 | .base = { |
344 | .cra_name = "rmd160", | 344 | .cra_name = "rmd160", |
345 | .cra_driver_name = "rmd160-generic", | ||
345 | .cra_blocksize = RMD160_BLOCK_SIZE, | 346 | .cra_blocksize = RMD160_BLOCK_SIZE, |
346 | .cra_module = THIS_MODULE, | 347 | .cra_module = THIS_MODULE, |
347 | } | 348 | } |
diff --git a/crypto/rmd256.c b/crypto/rmd256.c index 79ca3029848f..3c730e9de5fd 100644 --- a/crypto/rmd256.c +++ b/crypto/rmd256.c | |||
@@ -317,6 +317,7 @@ static struct shash_alg alg = { | |||
317 | .descsize = sizeof(struct rmd256_ctx), | 317 | .descsize = sizeof(struct rmd256_ctx), |
318 | .base = { | 318 | .base = { |
319 | .cra_name = "rmd256", | 319 | .cra_name = "rmd256", |
320 | .cra_driver_name = "rmd256-generic", | ||
320 | .cra_blocksize = RMD256_BLOCK_SIZE, | 321 | .cra_blocksize = RMD256_BLOCK_SIZE, |
321 | .cra_module = THIS_MODULE, | 322 | .cra_module = THIS_MODULE, |
322 | } | 323 | } |
diff --git a/crypto/rmd320.c b/crypto/rmd320.c index b2392ef7467b..c919ad6c4705 100644 --- a/crypto/rmd320.c +++ b/crypto/rmd320.c | |||
@@ -366,6 +366,7 @@ static struct shash_alg alg = { | |||
366 | .descsize = sizeof(struct rmd320_ctx), | 366 | .descsize = sizeof(struct rmd320_ctx), |
367 | .base = { | 367 | .base = { |
368 | .cra_name = "rmd320", | 368 | .cra_name = "rmd320", |
369 | .cra_driver_name = "rmd320-generic", | ||
369 | .cra_blocksize = RMD320_BLOCK_SIZE, | 370 | .cra_blocksize = RMD320_BLOCK_SIZE, |
370 | .cra_module = THIS_MODULE, | 371 | .cra_module = THIS_MODULE, |
371 | } | 372 | } |
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 16f612b6dbca..56fa665a4f01 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c | |||
@@ -225,7 +225,13 @@ | |||
225 | x4 ^= x2; \ | 225 | x4 ^= x2; \ |
226 | }) | 226 | }) |
227 | 227 | ||
228 | static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k) | 228 | /* |
229 | * both gcc and clang have misoptimized this function in the past, | ||
230 | * producing horrible object code from spilling temporary variables | ||
231 | * on the stack. Forcing this part out of line avoids that. | ||
232 | */ | ||
233 | static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, | ||
234 | u32 r3, u32 r4, u32 *k) | ||
229 | { | 235 | { |
230 | k += 100; | 236 | k += 100; |
231 | S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24); | 237 | S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24); |
@@ -637,6 +643,7 @@ static struct crypto_alg srp_algs[2] = { { | |||
637 | .cia_decrypt = serpent_decrypt } } | 643 | .cia_decrypt = serpent_decrypt } } |
638 | }, { | 644 | }, { |
639 | .cra_name = "tnepres", | 645 | .cra_name = "tnepres", |
646 | .cra_driver_name = "tnepres-generic", | ||
640 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 647 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
641 | .cra_blocksize = SERPENT_BLOCK_SIZE, | 648 | .cra_blocksize = SERPENT_BLOCK_SIZE, |
642 | .cra_ctxsize = sizeof(struct serpent_ctx), | 649 | .cra_ctxsize = sizeof(struct serpent_ctx), |
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index df735148000f..5d836fc3df3e 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c | |||
@@ -837,6 +837,40 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, | |||
837 | return 0; | 837 | return 0; |
838 | } | 838 | } |
839 | 839 | ||
840 | int crypto_skcipher_encrypt(struct skcipher_request *req) | ||
841 | { | ||
842 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
843 | struct crypto_alg *alg = tfm->base.__crt_alg; | ||
844 | unsigned int cryptlen = req->cryptlen; | ||
845 | int ret; | ||
846 | |||
847 | crypto_stats_get(alg); | ||
848 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
849 | ret = -ENOKEY; | ||
850 | else | ||
851 | ret = tfm->encrypt(req); | ||
852 | crypto_stats_skcipher_encrypt(cryptlen, ret, alg); | ||
853 | return ret; | ||
854 | } | ||
855 | EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); | ||
856 | |||
857 | int crypto_skcipher_decrypt(struct skcipher_request *req) | ||
858 | { | ||
859 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
860 | struct crypto_alg *alg = tfm->base.__crt_alg; | ||
861 | unsigned int cryptlen = req->cryptlen; | ||
862 | int ret; | ||
863 | |||
864 | crypto_stats_get(alg); | ||
865 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
866 | ret = -ENOKEY; | ||
867 | else | ||
868 | ret = tfm->decrypt(req); | ||
869 | crypto_stats_skcipher_decrypt(cryptlen, ret, alg); | ||
870 | return ret; | ||
871 | } | ||
872 | EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); | ||
873 | |||
840 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) | 874 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) |
841 | { | 875 | { |
842 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); | 876 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
diff --git a/crypto/tea.c b/crypto/tea.c index 37a18a9be2f4..02efc5d81690 100644 --- a/crypto/tea.c +++ b/crypto/tea.c | |||
@@ -216,6 +216,7 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |||
216 | 216 | ||
217 | static struct crypto_alg tea_algs[3] = { { | 217 | static struct crypto_alg tea_algs[3] = { { |
218 | .cra_name = "tea", | 218 | .cra_name = "tea", |
219 | .cra_driver_name = "tea-generic", | ||
219 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 220 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
220 | .cra_blocksize = TEA_BLOCK_SIZE, | 221 | .cra_blocksize = TEA_BLOCK_SIZE, |
221 | .cra_ctxsize = sizeof (struct tea_ctx), | 222 | .cra_ctxsize = sizeof (struct tea_ctx), |
@@ -229,6 +230,7 @@ static struct crypto_alg tea_algs[3] = { { | |||
229 | .cia_decrypt = tea_decrypt } } | 230 | .cia_decrypt = tea_decrypt } } |
230 | }, { | 231 | }, { |
231 | .cra_name = "xtea", | 232 | .cra_name = "xtea", |
233 | .cra_driver_name = "xtea-generic", | ||
232 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 234 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
233 | .cra_blocksize = XTEA_BLOCK_SIZE, | 235 | .cra_blocksize = XTEA_BLOCK_SIZE, |
234 | .cra_ctxsize = sizeof (struct xtea_ctx), | 236 | .cra_ctxsize = sizeof (struct xtea_ctx), |
@@ -242,6 +244,7 @@ static struct crypto_alg tea_algs[3] = { { | |||
242 | .cia_decrypt = xtea_decrypt } } | 244 | .cia_decrypt = xtea_decrypt } } |
243 | }, { | 245 | }, { |
244 | .cra_name = "xeta", | 246 | .cra_name = "xeta", |
247 | .cra_driver_name = "xeta-generic", | ||
245 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 248 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
246 | .cra_blocksize = XTEA_BLOCK_SIZE, | 249 | .cra_blocksize = XTEA_BLOCK_SIZE, |
247 | .cra_ctxsize = sizeof (struct xtea_ctx), | 250 | .cra_ctxsize = sizeof (struct xtea_ctx), |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 658a7eeebab2..d0b5b33806a6 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -1032,6 +1032,205 @@ static void crypto_reenable_simd_for_test(void) | |||
1032 | } | 1032 | } |
1033 | #endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ | 1033 | #endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ |
1034 | 1034 | ||
1035 | static int build_hash_sglist(struct test_sglist *tsgl, | ||
1036 | const struct hash_testvec *vec, | ||
1037 | const struct testvec_config *cfg, | ||
1038 | unsigned int alignmask, | ||
1039 | const struct test_sg_division *divs[XBUFSIZE]) | ||
1040 | { | ||
1041 | struct kvec kv; | ||
1042 | struct iov_iter input; | ||
1043 | |||
1044 | kv.iov_base = (void *)vec->plaintext; | ||
1045 | kv.iov_len = vec->psize; | ||
1046 | iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize); | ||
1047 | return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize, | ||
1048 | &input, divs); | ||
1049 | } | ||
1050 | |||
1051 | static int check_hash_result(const char *type, | ||
1052 | const u8 *result, unsigned int digestsize, | ||
1053 | const struct hash_testvec *vec, | ||
1054 | const char *vec_name, | ||
1055 | const char *driver, | ||
1056 | const struct testvec_config *cfg) | ||
1057 | { | ||
1058 | if (memcmp(result, vec->digest, digestsize) != 0) { | ||
1059 | pr_err("alg: %s: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n", | ||
1060 | type, driver, vec_name, cfg->name); | ||
1061 | return -EINVAL; | ||
1062 | } | ||
1063 | if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) { | ||
1064 | pr_err("alg: %s: %s overran result buffer on test vector %s, cfg=\"%s\"\n", | ||
1065 | type, driver, vec_name, cfg->name); | ||
1066 | return -EOVERFLOW; | ||
1067 | } | ||
1068 | return 0; | ||
1069 | } | ||
1070 | |||
1071 | static inline int check_shash_op(const char *op, int err, | ||
1072 | const char *driver, const char *vec_name, | ||
1073 | const struct testvec_config *cfg) | ||
1074 | { | ||
1075 | if (err) | ||
1076 | pr_err("alg: shash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n", | ||
1077 | driver, op, err, vec_name, cfg->name); | ||
1078 | return err; | ||
1079 | } | ||
1080 | |||
1081 | static inline const void *sg_data(struct scatterlist *sg) | ||
1082 | { | ||
1083 | return page_address(sg_page(sg)) + sg->offset; | ||
1084 | } | ||
1085 | |||
1086 | /* Test one hash test vector in one configuration, using the shash API */ | ||
1087 | static int test_shash_vec_cfg(const char *driver, | ||
1088 | const struct hash_testvec *vec, | ||
1089 | const char *vec_name, | ||
1090 | const struct testvec_config *cfg, | ||
1091 | struct shash_desc *desc, | ||
1092 | struct test_sglist *tsgl, | ||
1093 | u8 *hashstate) | ||
1094 | { | ||
1095 | struct crypto_shash *tfm = desc->tfm; | ||
1096 | const unsigned int alignmask = crypto_shash_alignmask(tfm); | ||
1097 | const unsigned int digestsize = crypto_shash_digestsize(tfm); | ||
1098 | const unsigned int statesize = crypto_shash_statesize(tfm); | ||
1099 | const struct test_sg_division *divs[XBUFSIZE]; | ||
1100 | unsigned int i; | ||
1101 | u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN]; | ||
1102 | int err; | ||
1103 | |||
1104 | /* Set the key, if specified */ | ||
1105 | if (vec->ksize) { | ||
1106 | err = crypto_shash_setkey(tfm, vec->key, vec->ksize); | ||
1107 | if (err) { | ||
1108 | if (err == vec->setkey_error) | ||
1109 | return 0; | ||
1110 | pr_err("alg: shash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n", | ||
1111 | driver, vec_name, vec->setkey_error, err, | ||
1112 | crypto_shash_get_flags(tfm)); | ||
1113 | return err; | ||
1114 | } | ||
1115 | if (vec->setkey_error) { | ||
1116 | pr_err("alg: shash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n", | ||
1117 | driver, vec_name, vec->setkey_error); | ||
1118 | return -EINVAL; | ||
1119 | } | ||
1120 | } | ||
1121 | |||
1122 | /* Build the scatterlist for the source data */ | ||
1123 | err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); | ||
1124 | if (err) { | ||
1125 | pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", | ||
1126 | driver, vec_name, cfg->name); | ||
1127 | return err; | ||
1128 | } | ||
1129 | |||
1130 | /* Do the actual hashing */ | ||
1131 | |||
1132 | testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm)); | ||
1133 | testmgr_poison(result, digestsize + TESTMGR_POISON_LEN); | ||
1134 | |||
1135 | if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST || | ||
1136 | vec->digest_error) { | ||
1137 | /* Just using digest() */ | ||
1138 | if (tsgl->nents != 1) | ||
1139 | return 0; | ||
1140 | if (cfg->nosimd) | ||
1141 | crypto_disable_simd_for_test(); | ||
1142 | err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]), | ||
1143 | tsgl->sgl[0].length, result); | ||
1144 | if (cfg->nosimd) | ||
1145 | crypto_reenable_simd_for_test(); | ||
1146 | if (err) { | ||
1147 | if (err == vec->digest_error) | ||
1148 | return 0; | ||
1149 | pr_err("alg: shash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n", | ||
1150 | driver, vec_name, vec->digest_error, err, | ||
1151 | cfg->name); | ||
1152 | return err; | ||
1153 | } | ||
1154 | if (vec->digest_error) { | ||
1155 | pr_err("alg: shash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n", | ||
1156 | driver, vec_name, vec->digest_error, cfg->name); | ||
1157 | return -EINVAL; | ||
1158 | } | ||
1159 | goto result_ready; | ||
1160 | } | ||
1161 | |||
1162 | /* Using init(), zero or more update(), then final() or finup() */ | ||
1163 | |||
1164 | if (cfg->nosimd) | ||
1165 | crypto_disable_simd_for_test(); | ||
1166 | err = crypto_shash_init(desc); | ||
1167 | if (cfg->nosimd) | ||
1168 | crypto_reenable_simd_for_test(); | ||
1169 | err = check_shash_op("init", err, driver, vec_name, cfg); | ||
1170 | if (err) | ||
1171 | return err; | ||
1172 | |||
1173 | for (i = 0; i < tsgl->nents; i++) { | ||
1174 | if (i + 1 == tsgl->nents && | ||
1175 | cfg->finalization_type == FINALIZATION_TYPE_FINUP) { | ||
1176 | if (divs[i]->nosimd) | ||
1177 | crypto_disable_simd_for_test(); | ||
1178 | err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]), | ||
1179 | tsgl->sgl[i].length, result); | ||
1180 | if (divs[i]->nosimd) | ||
1181 | crypto_reenable_simd_for_test(); | ||
1182 | err = check_shash_op("finup", err, driver, vec_name, | ||
1183 | cfg); | ||
1184 | if (err) | ||
1185 | return err; | ||
1186 | goto result_ready; | ||
1187 | } | ||
1188 | if (divs[i]->nosimd) | ||
1189 | crypto_disable_simd_for_test(); | ||
1190 | err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]), | ||
1191 | tsgl->sgl[i].length); | ||
1192 | if (divs[i]->nosimd) | ||
1193 | crypto_reenable_simd_for_test(); | ||
1194 | err = check_shash_op("update", err, driver, vec_name, cfg); | ||
1195 | if (err) | ||
1196 | return err; | ||
1197 | if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) { | ||
1198 | /* Test ->export() and ->import() */ | ||
1199 | testmgr_poison(hashstate + statesize, | ||
1200 | TESTMGR_POISON_LEN); | ||
1201 | err = crypto_shash_export(desc, hashstate); | ||
1202 | err = check_shash_op("export", err, driver, vec_name, | ||
1203 | cfg); | ||
1204 | if (err) | ||
1205 | return err; | ||
1206 | if (!testmgr_is_poison(hashstate + statesize, | ||
1207 | TESTMGR_POISON_LEN)) { | ||
1208 | pr_err("alg: shash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n", | ||
1209 | driver, vec_name, cfg->name); | ||
1210 | return -EOVERFLOW; | ||
1211 | } | ||
1212 | testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm)); | ||
1213 | err = crypto_shash_import(desc, hashstate); | ||
1214 | err = check_shash_op("import", err, driver, vec_name, | ||
1215 | cfg); | ||
1216 | if (err) | ||
1217 | return err; | ||
1218 | } | ||
1219 | } | ||
1220 | |||
1221 | if (cfg->nosimd) | ||
1222 | crypto_disable_simd_for_test(); | ||
1223 | err = crypto_shash_final(desc, result); | ||
1224 | if (cfg->nosimd) | ||
1225 | crypto_reenable_simd_for_test(); | ||
1226 | err = check_shash_op("final", err, driver, vec_name, cfg); | ||
1227 | if (err) | ||
1228 | return err; | ||
1229 | result_ready: | ||
1230 | return check_hash_result("shash", result, digestsize, vec, vec_name, | ||
1231 | driver, cfg); | ||
1232 | } | ||
1233 | |||
1035 | static int do_ahash_op(int (*op)(struct ahash_request *req), | 1234 | static int do_ahash_op(int (*op)(struct ahash_request *req), |
1036 | struct ahash_request *req, | 1235 | struct ahash_request *req, |
1037 | struct crypto_wait *wait, bool nosimd) | 1236 | struct crypto_wait *wait, bool nosimd) |
@@ -1049,31 +1248,32 @@ static int do_ahash_op(int (*op)(struct ahash_request *req), | |||
1049 | return crypto_wait_req(err, wait); | 1248 | return crypto_wait_req(err, wait); |
1050 | } | 1249 | } |
1051 | 1250 | ||
1052 | static int check_nonfinal_hash_op(const char *op, int err, | 1251 | static int check_nonfinal_ahash_op(const char *op, int err, |
1053 | u8 *result, unsigned int digestsize, | 1252 | u8 *result, unsigned int digestsize, |
1054 | const char *driver, const char *vec_name, | 1253 | const char *driver, const char *vec_name, |
1055 | const struct testvec_config *cfg) | 1254 | const struct testvec_config *cfg) |
1056 | { | 1255 | { |
1057 | if (err) { | 1256 | if (err) { |
1058 | pr_err("alg: hash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n", | 1257 | pr_err("alg: ahash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n", |
1059 | driver, op, err, vec_name, cfg->name); | 1258 | driver, op, err, vec_name, cfg->name); |
1060 | return err; | 1259 | return err; |
1061 | } | 1260 | } |
1062 | if (!testmgr_is_poison(result, digestsize)) { | 1261 | if (!testmgr_is_poison(result, digestsize)) { |
1063 | pr_err("alg: hash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n", | 1262 | pr_err("alg: ahash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n", |
1064 | driver, op, vec_name, cfg->name); | 1263 | driver, op, vec_name, cfg->name); |
1065 | return -EINVAL; | 1264 | return -EINVAL; |
1066 | } | 1265 | } |
1067 | return 0; | 1266 | return 0; |
1068 | } | 1267 | } |
1069 | 1268 | ||
1070 | static int test_hash_vec_cfg(const char *driver, | 1269 | /* Test one hash test vector in one configuration, using the ahash API */ |
1071 | const struct hash_testvec *vec, | 1270 | static int test_ahash_vec_cfg(const char *driver, |
1072 | const char *vec_name, | 1271 | const struct hash_testvec *vec, |
1073 | const struct testvec_config *cfg, | 1272 | const char *vec_name, |
1074 | struct ahash_request *req, | 1273 | const struct testvec_config *cfg, |
1075 | struct test_sglist *tsgl, | 1274 | struct ahash_request *req, |
1076 | u8 *hashstate) | 1275 | struct test_sglist *tsgl, |
1276 | u8 *hashstate) | ||
1077 | { | 1277 | { |
1078 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | 1278 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
1079 | const unsigned int alignmask = crypto_ahash_alignmask(tfm); | 1279 | const unsigned int alignmask = crypto_ahash_alignmask(tfm); |
@@ -1082,8 +1282,6 @@ static int test_hash_vec_cfg(const char *driver, | |||
1082 | const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; | 1282 | const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; |
1083 | const struct test_sg_division *divs[XBUFSIZE]; | 1283 | const struct test_sg_division *divs[XBUFSIZE]; |
1084 | DECLARE_CRYPTO_WAIT(wait); | 1284 | DECLARE_CRYPTO_WAIT(wait); |
1085 | struct kvec _input; | ||
1086 | struct iov_iter input; | ||
1087 | unsigned int i; | 1285 | unsigned int i; |
1088 | struct scatterlist *pending_sgl; | 1286 | struct scatterlist *pending_sgl; |
1089 | unsigned int pending_len; | 1287 | unsigned int pending_len; |
@@ -1096,26 +1294,22 @@ static int test_hash_vec_cfg(const char *driver, | |||
1096 | if (err) { | 1294 | if (err) { |
1097 | if (err == vec->setkey_error) | 1295 | if (err == vec->setkey_error) |
1098 | return 0; | 1296 | return 0; |
1099 | pr_err("alg: hash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n", | 1297 | pr_err("alg: ahash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n", |
1100 | driver, vec_name, vec->setkey_error, err, | 1298 | driver, vec_name, vec->setkey_error, err, |
1101 | crypto_ahash_get_flags(tfm)); | 1299 | crypto_ahash_get_flags(tfm)); |
1102 | return err; | 1300 | return err; |
1103 | } | 1301 | } |
1104 | if (vec->setkey_error) { | 1302 | if (vec->setkey_error) { |
1105 | pr_err("alg: hash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n", | 1303 | pr_err("alg: ahash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n", |
1106 | driver, vec_name, vec->setkey_error); | 1304 | driver, vec_name, vec->setkey_error); |
1107 | return -EINVAL; | 1305 | return -EINVAL; |
1108 | } | 1306 | } |
1109 | } | 1307 | } |
1110 | 1308 | ||
1111 | /* Build the scatterlist for the source data */ | 1309 | /* Build the scatterlist for the source data */ |
1112 | _input.iov_base = (void *)vec->plaintext; | 1310 | err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs); |
1113 | _input.iov_len = vec->psize; | ||
1114 | iov_iter_kvec(&input, WRITE, &_input, 1, vec->psize); | ||
1115 | err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize, | ||
1116 | &input, divs); | ||
1117 | if (err) { | 1311 | if (err) { |
1118 | pr_err("alg: hash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", | 1312 | pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n", |
1119 | driver, vec_name, cfg->name); | 1313 | driver, vec_name, cfg->name); |
1120 | return err; | 1314 | return err; |
1121 | } | 1315 | } |
@@ -1135,13 +1329,13 @@ static int test_hash_vec_cfg(const char *driver, | |||
1135 | if (err) { | 1329 | if (err) { |
1136 | if (err == vec->digest_error) | 1330 | if (err == vec->digest_error) |
1137 | return 0; | 1331 | return 0; |
1138 | pr_err("alg: hash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n", | 1332 | pr_err("alg: ahash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n", |
1139 | driver, vec_name, vec->digest_error, err, | 1333 | driver, vec_name, vec->digest_error, err, |
1140 | cfg->name); | 1334 | cfg->name); |
1141 | return err; | 1335 | return err; |
1142 | } | 1336 | } |
1143 | if (vec->digest_error) { | 1337 | if (vec->digest_error) { |
1144 | pr_err("alg: hash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n", | 1338 | pr_err("alg: ahash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n", |
1145 | driver, vec_name, vec->digest_error, cfg->name); | 1339 | driver, vec_name, vec->digest_error, cfg->name); |
1146 | return -EINVAL; | 1340 | return -EINVAL; |
1147 | } | 1341 | } |
@@ -1153,8 +1347,8 @@ static int test_hash_vec_cfg(const char *driver, | |||
1153 | ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); | 1347 | ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); |
1154 | ahash_request_set_crypt(req, NULL, result, 0); | 1348 | ahash_request_set_crypt(req, NULL, result, 0); |
1155 | err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd); | 1349 | err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd); |
1156 | err = check_nonfinal_hash_op("init", err, result, digestsize, | 1350 | err = check_nonfinal_ahash_op("init", err, result, digestsize, |
1157 | driver, vec_name, cfg); | 1351 | driver, vec_name, cfg); |
1158 | if (err) | 1352 | if (err) |
1159 | return err; | 1353 | return err; |
1160 | 1354 | ||
@@ -1170,9 +1364,9 @@ static int test_hash_vec_cfg(const char *driver, | |||
1170 | pending_len); | 1364 | pending_len); |
1171 | err = do_ahash_op(crypto_ahash_update, req, &wait, | 1365 | err = do_ahash_op(crypto_ahash_update, req, &wait, |
1172 | divs[i]->nosimd); | 1366 | divs[i]->nosimd); |
1173 | err = check_nonfinal_hash_op("update", err, | 1367 | err = check_nonfinal_ahash_op("update", err, |
1174 | result, digestsize, | 1368 | result, digestsize, |
1175 | driver, vec_name, cfg); | 1369 | driver, vec_name, cfg); |
1176 | if (err) | 1370 | if (err) |
1177 | return err; | 1371 | return err; |
1178 | pending_sgl = NULL; | 1372 | pending_sgl = NULL; |
@@ -1183,23 +1377,23 @@ static int test_hash_vec_cfg(const char *driver, | |||
1183 | testmgr_poison(hashstate + statesize, | 1377 | testmgr_poison(hashstate + statesize, |
1184 | TESTMGR_POISON_LEN); | 1378 | TESTMGR_POISON_LEN); |
1185 | err = crypto_ahash_export(req, hashstate); | 1379 | err = crypto_ahash_export(req, hashstate); |
1186 | err = check_nonfinal_hash_op("export", err, | 1380 | err = check_nonfinal_ahash_op("export", err, |
1187 | result, digestsize, | 1381 | result, digestsize, |
1188 | driver, vec_name, cfg); | 1382 | driver, vec_name, cfg); |
1189 | if (err) | 1383 | if (err) |
1190 | return err; | 1384 | return err; |
1191 | if (!testmgr_is_poison(hashstate + statesize, | 1385 | if (!testmgr_is_poison(hashstate + statesize, |
1192 | TESTMGR_POISON_LEN)) { | 1386 | TESTMGR_POISON_LEN)) { |
1193 | pr_err("alg: hash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n", | 1387 | pr_err("alg: ahash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n", |
1194 | driver, vec_name, cfg->name); | 1388 | driver, vec_name, cfg->name); |
1195 | return -EOVERFLOW; | 1389 | return -EOVERFLOW; |
1196 | } | 1390 | } |
1197 | 1391 | ||
1198 | testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); | 1392 | testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); |
1199 | err = crypto_ahash_import(req, hashstate); | 1393 | err = crypto_ahash_import(req, hashstate); |
1200 | err = check_nonfinal_hash_op("import", err, | 1394 | err = check_nonfinal_ahash_op("import", err, |
1201 | result, digestsize, | 1395 | result, digestsize, |
1202 | driver, vec_name, cfg); | 1396 | driver, vec_name, cfg); |
1203 | if (err) | 1397 | if (err) |
1204 | return err; | 1398 | return err; |
1205 | } | 1399 | } |
@@ -1213,13 +1407,13 @@ static int test_hash_vec_cfg(const char *driver, | |||
1213 | if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) { | 1407 | if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) { |
1214 | /* finish with update() and final() */ | 1408 | /* finish with update() and final() */ |
1215 | err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd); | 1409 | err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd); |
1216 | err = check_nonfinal_hash_op("update", err, result, digestsize, | 1410 | err = check_nonfinal_ahash_op("update", err, result, digestsize, |
1217 | driver, vec_name, cfg); | 1411 | driver, vec_name, cfg); |
1218 | if (err) | 1412 | if (err) |
1219 | return err; | 1413 | return err; |
1220 | err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd); | 1414 | err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd); |
1221 | if (err) { | 1415 | if (err) { |
1222 | pr_err("alg: hash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n", | 1416 | pr_err("alg: ahash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n", |
1223 | driver, err, vec_name, cfg->name); | 1417 | driver, err, vec_name, cfg->name); |
1224 | return err; | 1418 | return err; |
1225 | } | 1419 | } |
@@ -1227,31 +1421,49 @@ static int test_hash_vec_cfg(const char *driver, | |||
1227 | /* finish with finup() */ | 1421 | /* finish with finup() */ |
1228 | err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd); | 1422 | err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd); |
1229 | if (err) { | 1423 | if (err) { |
1230 | pr_err("alg: hash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n", | 1424 | pr_err("alg: ahash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n", |
1231 | driver, err, vec_name, cfg->name); | 1425 | driver, err, vec_name, cfg->name); |
1232 | return err; | 1426 | return err; |
1233 | } | 1427 | } |
1234 | } | 1428 | } |
1235 | 1429 | ||
1236 | result_ready: | 1430 | result_ready: |
1237 | /* Check that the algorithm produced the correct digest */ | 1431 | return check_hash_result("ahash", result, digestsize, vec, vec_name, |
1238 | if (memcmp(result, vec->digest, digestsize) != 0) { | 1432 | driver, cfg); |
1239 | pr_err("alg: hash: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n", | 1433 | } |
1240 | driver, vec_name, cfg->name); | 1434 | |
1241 | return -EINVAL; | 1435 | static int test_hash_vec_cfg(const char *driver, |
1242 | } | 1436 | const struct hash_testvec *vec, |
1243 | if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) { | 1437 | const char *vec_name, |
1244 | pr_err("alg: hash: %s overran result buffer on test vector %s, cfg=\"%s\"\n", | 1438 | const struct testvec_config *cfg, |
1245 | driver, vec_name, cfg->name); | 1439 | struct ahash_request *req, |
1246 | return -EOVERFLOW; | 1440 | struct shash_desc *desc, |
1441 | struct test_sglist *tsgl, | ||
1442 | u8 *hashstate) | ||
1443 | { | ||
1444 | int err; | ||
1445 | |||
1446 | /* | ||
1447 | * For algorithms implemented as "shash", most bugs will be detected by | ||
1448 | * both the shash and ahash tests. Test the shash API first so that the | ||
1449 | * failures involve less indirection, so are easier to debug. | ||
1450 | */ | ||
1451 | |||
1452 | if (desc) { | ||
1453 | err = test_shash_vec_cfg(driver, vec, vec_name, cfg, desc, tsgl, | ||
1454 | hashstate); | ||
1455 | if (err) | ||
1456 | return err; | ||
1247 | } | 1457 | } |
1248 | 1458 | ||
1249 | return 0; | 1459 | return test_ahash_vec_cfg(driver, vec, vec_name, cfg, req, tsgl, |
1460 | hashstate); | ||
1250 | } | 1461 | } |
1251 | 1462 | ||
1252 | static int test_hash_vec(const char *driver, const struct hash_testvec *vec, | 1463 | static int test_hash_vec(const char *driver, const struct hash_testvec *vec, |
1253 | unsigned int vec_num, struct ahash_request *req, | 1464 | unsigned int vec_num, struct ahash_request *req, |
1254 | struct test_sglist *tsgl, u8 *hashstate) | 1465 | struct shash_desc *desc, struct test_sglist *tsgl, |
1466 | u8 *hashstate) | ||
1255 | { | 1467 | { |
1256 | char vec_name[16]; | 1468 | char vec_name[16]; |
1257 | unsigned int i; | 1469 | unsigned int i; |
@@ -1262,7 +1474,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec, | |||
1262 | for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) { | 1474 | for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) { |
1263 | err = test_hash_vec_cfg(driver, vec, vec_name, | 1475 | err = test_hash_vec_cfg(driver, vec, vec_name, |
1264 | &default_hash_testvec_configs[i], | 1476 | &default_hash_testvec_configs[i], |
1265 | req, tsgl, hashstate); | 1477 | req, desc, tsgl, hashstate); |
1266 | if (err) | 1478 | if (err) |
1267 | return err; | 1479 | return err; |
1268 | } | 1480 | } |
@@ -1276,9 +1488,10 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec, | |||
1276 | generate_random_testvec_config(&cfg, cfgname, | 1488 | generate_random_testvec_config(&cfg, cfgname, |
1277 | sizeof(cfgname)); | 1489 | sizeof(cfgname)); |
1278 | err = test_hash_vec_cfg(driver, vec, vec_name, &cfg, | 1490 | err = test_hash_vec_cfg(driver, vec, vec_name, &cfg, |
1279 | req, tsgl, hashstate); | 1491 | req, desc, tsgl, hashstate); |
1280 | if (err) | 1492 | if (err) |
1281 | return err; | 1493 | return err; |
1494 | cond_resched(); | ||
1282 | } | 1495 | } |
1283 | } | 1496 | } |
1284 | #endif | 1497 | #endif |
@@ -1290,14 +1503,12 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec, | |||
1290 | * Generate a hash test vector from the given implementation. | 1503 | * Generate a hash test vector from the given implementation. |
1291 | * Assumes the buffers in 'vec' were already allocated. | 1504 | * Assumes the buffers in 'vec' were already allocated. |
1292 | */ | 1505 | */ |
1293 | static void generate_random_hash_testvec(struct crypto_shash *tfm, | 1506 | static void generate_random_hash_testvec(struct shash_desc *desc, |
1294 | struct hash_testvec *vec, | 1507 | struct hash_testvec *vec, |
1295 | unsigned int maxkeysize, | 1508 | unsigned int maxkeysize, |
1296 | unsigned int maxdatasize, | 1509 | unsigned int maxdatasize, |
1297 | char *name, size_t max_namelen) | 1510 | char *name, size_t max_namelen) |
1298 | { | 1511 | { |
1299 | SHASH_DESC_ON_STACK(desc, tfm); | ||
1300 | |||
1301 | /* Data */ | 1512 | /* Data */ |
1302 | vec->psize = generate_random_length(maxdatasize); | 1513 | vec->psize = generate_random_length(maxdatasize); |
1303 | generate_random_bytes((u8 *)vec->plaintext, vec->psize); | 1514 | generate_random_bytes((u8 *)vec->plaintext, vec->psize); |
@@ -1314,7 +1525,7 @@ static void generate_random_hash_testvec(struct crypto_shash *tfm, | |||
1314 | vec->ksize = 1 + (prandom_u32() % maxkeysize); | 1525 | vec->ksize = 1 + (prandom_u32() % maxkeysize); |
1315 | generate_random_bytes((u8 *)vec->key, vec->ksize); | 1526 | generate_random_bytes((u8 *)vec->key, vec->ksize); |
1316 | 1527 | ||
1317 | vec->setkey_error = crypto_shash_setkey(tfm, vec->key, | 1528 | vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key, |
1318 | vec->ksize); | 1529 | vec->ksize); |
1319 | /* If the key couldn't be set, no need to continue to digest. */ | 1530 | /* If the key couldn't be set, no need to continue to digest. */ |
1320 | if (vec->setkey_error) | 1531 | if (vec->setkey_error) |
@@ -1322,7 +1533,6 @@ static void generate_random_hash_testvec(struct crypto_shash *tfm, | |||
1322 | } | 1533 | } |
1323 | 1534 | ||
1324 | /* Digest */ | 1535 | /* Digest */ |
1325 | desc->tfm = tfm; | ||
1326 | vec->digest_error = crypto_shash_digest(desc, vec->plaintext, | 1536 | vec->digest_error = crypto_shash_digest(desc, vec->plaintext, |
1327 | vec->psize, (u8 *)vec->digest); | 1537 | vec->psize, (u8 *)vec->digest); |
1328 | done: | 1538 | done: |
@@ -1338,6 +1548,7 @@ static int test_hash_vs_generic_impl(const char *driver, | |||
1338 | const char *generic_driver, | 1548 | const char *generic_driver, |
1339 | unsigned int maxkeysize, | 1549 | unsigned int maxkeysize, |
1340 | struct ahash_request *req, | 1550 | struct ahash_request *req, |
1551 | struct shash_desc *desc, | ||
1341 | struct test_sglist *tsgl, | 1552 | struct test_sglist *tsgl, |
1342 | u8 *hashstate) | 1553 | u8 *hashstate) |
1343 | { | 1554 | { |
@@ -1348,10 +1559,11 @@ static int test_hash_vs_generic_impl(const char *driver, | |||
1348 | const char *algname = crypto_hash_alg_common(tfm)->base.cra_name; | 1559 | const char *algname = crypto_hash_alg_common(tfm)->base.cra_name; |
1349 | char _generic_driver[CRYPTO_MAX_ALG_NAME]; | 1560 | char _generic_driver[CRYPTO_MAX_ALG_NAME]; |
1350 | struct crypto_shash *generic_tfm = NULL; | 1561 | struct crypto_shash *generic_tfm = NULL; |
1562 | struct shash_desc *generic_desc = NULL; | ||
1351 | unsigned int i; | 1563 | unsigned int i; |
1352 | struct hash_testvec vec = { 0 }; | 1564 | struct hash_testvec vec = { 0 }; |
1353 | char vec_name[64]; | 1565 | char vec_name[64]; |
1354 | struct testvec_config cfg; | 1566 | struct testvec_config *cfg; |
1355 | char cfgname[TESTVEC_CONFIG_NAMELEN]; | 1567 | char cfgname[TESTVEC_CONFIG_NAMELEN]; |
1356 | int err; | 1568 | int err; |
1357 | 1569 | ||
@@ -1381,6 +1593,20 @@ static int test_hash_vs_generic_impl(const char *driver, | |||
1381 | return err; | 1593 | return err; |
1382 | } | 1594 | } |
1383 | 1595 | ||
1596 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | ||
1597 | if (!cfg) { | ||
1598 | err = -ENOMEM; | ||
1599 | goto out; | ||
1600 | } | ||
1601 | |||
1602 | generic_desc = kzalloc(sizeof(*desc) + | ||
1603 | crypto_shash_descsize(generic_tfm), GFP_KERNEL); | ||
1604 | if (!generic_desc) { | ||
1605 | err = -ENOMEM; | ||
1606 | goto out; | ||
1607 | } | ||
1608 | generic_desc->tfm = generic_tfm; | ||
1609 | |||
1384 | /* Check the algorithm properties for consistency. */ | 1610 | /* Check the algorithm properties for consistency. */ |
1385 | 1611 | ||
1386 | if (digestsize != crypto_shash_digestsize(generic_tfm)) { | 1612 | if (digestsize != crypto_shash_digestsize(generic_tfm)) { |
@@ -1412,23 +1638,25 @@ static int test_hash_vs_generic_impl(const char *driver, | |||
1412 | } | 1638 | } |
1413 | 1639 | ||
1414 | for (i = 0; i < fuzz_iterations * 8; i++) { | 1640 | for (i = 0; i < fuzz_iterations * 8; i++) { |
1415 | generate_random_hash_testvec(generic_tfm, &vec, | 1641 | generate_random_hash_testvec(generic_desc, &vec, |
1416 | maxkeysize, maxdatasize, | 1642 | maxkeysize, maxdatasize, |
1417 | vec_name, sizeof(vec_name)); | 1643 | vec_name, sizeof(vec_name)); |
1418 | generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname)); | 1644 | generate_random_testvec_config(cfg, cfgname, sizeof(cfgname)); |
1419 | 1645 | ||
1420 | err = test_hash_vec_cfg(driver, &vec, vec_name, &cfg, | 1646 | err = test_hash_vec_cfg(driver, &vec, vec_name, cfg, |
1421 | req, tsgl, hashstate); | 1647 | req, desc, tsgl, hashstate); |
1422 | if (err) | 1648 | if (err) |
1423 | goto out; | 1649 | goto out; |
1424 | cond_resched(); | 1650 | cond_resched(); |
1425 | } | 1651 | } |
1426 | err = 0; | 1652 | err = 0; |
1427 | out: | 1653 | out: |
1654 | kfree(cfg); | ||
1428 | kfree(vec.key); | 1655 | kfree(vec.key); |
1429 | kfree(vec.plaintext); | 1656 | kfree(vec.plaintext); |
1430 | kfree(vec.digest); | 1657 | kfree(vec.digest); |
1431 | crypto_free_shash(generic_tfm); | 1658 | crypto_free_shash(generic_tfm); |
1659 | kzfree(generic_desc); | ||
1432 | return err; | 1660 | return err; |
1433 | } | 1661 | } |
1434 | #else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ | 1662 | #else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ |
@@ -1436,6 +1664,7 @@ static int test_hash_vs_generic_impl(const char *driver, | |||
1436 | const char *generic_driver, | 1664 | const char *generic_driver, |
1437 | unsigned int maxkeysize, | 1665 | unsigned int maxkeysize, |
1438 | struct ahash_request *req, | 1666 | struct ahash_request *req, |
1667 | struct shash_desc *desc, | ||
1439 | struct test_sglist *tsgl, | 1668 | struct test_sglist *tsgl, |
1440 | u8 *hashstate) | 1669 | u8 *hashstate) |
1441 | { | 1670 | { |
@@ -1443,26 +1672,67 @@ static int test_hash_vs_generic_impl(const char *driver, | |||
1443 | } | 1672 | } |
1444 | #endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ | 1673 | #endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ |
1445 | 1674 | ||
1675 | static int alloc_shash(const char *driver, u32 type, u32 mask, | ||
1676 | struct crypto_shash **tfm_ret, | ||
1677 | struct shash_desc **desc_ret) | ||
1678 | { | ||
1679 | struct crypto_shash *tfm; | ||
1680 | struct shash_desc *desc; | ||
1681 | |||
1682 | tfm = crypto_alloc_shash(driver, type, mask); | ||
1683 | if (IS_ERR(tfm)) { | ||
1684 | if (PTR_ERR(tfm) == -ENOENT) { | ||
1685 | /* | ||
1686 | * This algorithm is only available through the ahash | ||
1687 | * API, not the shash API, so skip the shash tests. | ||
1688 | */ | ||
1689 | return 0; | ||
1690 | } | ||
1691 | pr_err("alg: hash: failed to allocate shash transform for %s: %ld\n", | ||
1692 | driver, PTR_ERR(tfm)); | ||
1693 | return PTR_ERR(tfm); | ||
1694 | } | ||
1695 | |||
1696 | desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); | ||
1697 | if (!desc) { | ||
1698 | crypto_free_shash(tfm); | ||
1699 | return -ENOMEM; | ||
1700 | } | ||
1701 | desc->tfm = tfm; | ||
1702 | |||
1703 | *tfm_ret = tfm; | ||
1704 | *desc_ret = desc; | ||
1705 | return 0; | ||
1706 | } | ||
1707 | |||
1446 | static int __alg_test_hash(const struct hash_testvec *vecs, | 1708 | static int __alg_test_hash(const struct hash_testvec *vecs, |
1447 | unsigned int num_vecs, const char *driver, | 1709 | unsigned int num_vecs, const char *driver, |
1448 | u32 type, u32 mask, | 1710 | u32 type, u32 mask, |
1449 | const char *generic_driver, unsigned int maxkeysize) | 1711 | const char *generic_driver, unsigned int maxkeysize) |
1450 | { | 1712 | { |
1451 | struct crypto_ahash *tfm; | 1713 | struct crypto_ahash *atfm = NULL; |
1452 | struct ahash_request *req = NULL; | 1714 | struct ahash_request *req = NULL; |
1715 | struct crypto_shash *stfm = NULL; | ||
1716 | struct shash_desc *desc = NULL; | ||
1453 | struct test_sglist *tsgl = NULL; | 1717 | struct test_sglist *tsgl = NULL; |
1454 | u8 *hashstate = NULL; | 1718 | u8 *hashstate = NULL; |
1719 | unsigned int statesize; | ||
1455 | unsigned int i; | 1720 | unsigned int i; |
1456 | int err; | 1721 | int err; |
1457 | 1722 | ||
1458 | tfm = crypto_alloc_ahash(driver, type, mask); | 1723 | /* |
1459 | if (IS_ERR(tfm)) { | 1724 | * Always test the ahash API. This works regardless of whether the |
1725 | * algorithm is implemented as ahash or shash. | ||
1726 | */ | ||
1727 | |||
1728 | atfm = crypto_alloc_ahash(driver, type, mask); | ||
1729 | if (IS_ERR(atfm)) { | ||
1460 | pr_err("alg: hash: failed to allocate transform for %s: %ld\n", | 1730 | pr_err("alg: hash: failed to allocate transform for %s: %ld\n", |
1461 | driver, PTR_ERR(tfm)); | 1731 | driver, PTR_ERR(atfm)); |
1462 | return PTR_ERR(tfm); | 1732 | return PTR_ERR(atfm); |
1463 | } | 1733 | } |
1464 | 1734 | ||
1465 | req = ahash_request_alloc(tfm, GFP_KERNEL); | 1735 | req = ahash_request_alloc(atfm, GFP_KERNEL); |
1466 | if (!req) { | 1736 | if (!req) { |
1467 | pr_err("alg: hash: failed to allocate request for %s\n", | 1737 | pr_err("alg: hash: failed to allocate request for %s\n", |
1468 | driver); | 1738 | driver); |
@@ -1470,6 +1740,14 @@ static int __alg_test_hash(const struct hash_testvec *vecs, | |||
1470 | goto out; | 1740 | goto out; |
1471 | } | 1741 | } |
1472 | 1742 | ||
1743 | /* | ||
1744 | * If available also test the shash API, to cover corner cases that may | ||
1745 | * be missed by testing the ahash API only. | ||
1746 | */ | ||
1747 | err = alloc_shash(driver, type, mask, &stfm, &desc); | ||
1748 | if (err) | ||
1749 | goto out; | ||
1750 | |||
1473 | tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL); | 1751 | tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL); |
1474 | if (!tsgl || init_test_sglist(tsgl) != 0) { | 1752 | if (!tsgl || init_test_sglist(tsgl) != 0) { |
1475 | pr_err("alg: hash: failed to allocate test buffers for %s\n", | 1753 | pr_err("alg: hash: failed to allocate test buffers for %s\n", |
@@ -1480,8 +1758,10 @@ static int __alg_test_hash(const struct hash_testvec *vecs, | |||
1480 | goto out; | 1758 | goto out; |
1481 | } | 1759 | } |
1482 | 1760 | ||
1483 | hashstate = kmalloc(crypto_ahash_statesize(tfm) + TESTMGR_POISON_LEN, | 1761 | statesize = crypto_ahash_statesize(atfm); |
1484 | GFP_KERNEL); | 1762 | if (stfm) |
1763 | statesize = max(statesize, crypto_shash_statesize(stfm)); | ||
1764 | hashstate = kmalloc(statesize + TESTMGR_POISON_LEN, GFP_KERNEL); | ||
1485 | if (!hashstate) { | 1765 | if (!hashstate) { |
1486 | pr_err("alg: hash: failed to allocate hash state buffer for %s\n", | 1766 | pr_err("alg: hash: failed to allocate hash state buffer for %s\n", |
1487 | driver); | 1767 | driver); |
@@ -1490,20 +1770,24 @@ static int __alg_test_hash(const struct hash_testvec *vecs, | |||
1490 | } | 1770 | } |
1491 | 1771 | ||
1492 | for (i = 0; i < num_vecs; i++) { | 1772 | for (i = 0; i < num_vecs; i++) { |
1493 | err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate); | 1773 | err = test_hash_vec(driver, &vecs[i], i, req, desc, tsgl, |
1774 | hashstate); | ||
1494 | if (err) | 1775 | if (err) |
1495 | goto out; | 1776 | goto out; |
1777 | cond_resched(); | ||
1496 | } | 1778 | } |
1497 | err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req, | 1779 | err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req, |
1498 | tsgl, hashstate); | 1780 | desc, tsgl, hashstate); |
1499 | out: | 1781 | out: |
1500 | kfree(hashstate); | 1782 | kfree(hashstate); |
1501 | if (tsgl) { | 1783 | if (tsgl) { |
1502 | destroy_test_sglist(tsgl); | 1784 | destroy_test_sglist(tsgl); |
1503 | kfree(tsgl); | 1785 | kfree(tsgl); |
1504 | } | 1786 | } |
1787 | kfree(desc); | ||
1788 | crypto_free_shash(stfm); | ||
1505 | ahash_request_free(req); | 1789 | ahash_request_free(req); |
1506 | crypto_free_ahash(tfm); | 1790 | crypto_free_ahash(atfm); |
1507 | return err; | 1791 | return err; |
1508 | } | 1792 | } |
1509 | 1793 | ||
@@ -1755,6 +2039,7 @@ static int test_aead_vec(const char *driver, int enc, | |||
1755 | &cfg, req, tsgls); | 2039 | &cfg, req, tsgls); |
1756 | if (err) | 2040 | if (err) |
1757 | return err; | 2041 | return err; |
2042 | cond_resched(); | ||
1758 | } | 2043 | } |
1759 | } | 2044 | } |
1760 | #endif | 2045 | #endif |
@@ -1864,7 +2149,7 @@ static int test_aead_vs_generic_impl(const char *driver, | |||
1864 | unsigned int i; | 2149 | unsigned int i; |
1865 | struct aead_testvec vec = { 0 }; | 2150 | struct aead_testvec vec = { 0 }; |
1866 | char vec_name[64]; | 2151 | char vec_name[64]; |
1867 | struct testvec_config cfg; | 2152 | struct testvec_config *cfg; |
1868 | char cfgname[TESTVEC_CONFIG_NAMELEN]; | 2153 | char cfgname[TESTVEC_CONFIG_NAMELEN]; |
1869 | int err; | 2154 | int err; |
1870 | 2155 | ||
@@ -1894,6 +2179,12 @@ static int test_aead_vs_generic_impl(const char *driver, | |||
1894 | return err; | 2179 | return err; |
1895 | } | 2180 | } |
1896 | 2181 | ||
2182 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | ||
2183 | if (!cfg) { | ||
2184 | err = -ENOMEM; | ||
2185 | goto out; | ||
2186 | } | ||
2187 | |||
1897 | generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL); | 2188 | generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL); |
1898 | if (!generic_req) { | 2189 | if (!generic_req) { |
1899 | err = -ENOMEM; | 2190 | err = -ENOMEM; |
@@ -1948,13 +2239,13 @@ static int test_aead_vs_generic_impl(const char *driver, | |||
1948 | generate_random_aead_testvec(generic_req, &vec, | 2239 | generate_random_aead_testvec(generic_req, &vec, |
1949 | maxkeysize, maxdatasize, | 2240 | maxkeysize, maxdatasize, |
1950 | vec_name, sizeof(vec_name)); | 2241 | vec_name, sizeof(vec_name)); |
1951 | generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname)); | 2242 | generate_random_testvec_config(cfg, cfgname, sizeof(cfgname)); |
1952 | 2243 | ||
1953 | err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, &cfg, | 2244 | err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, cfg, |
1954 | req, tsgls); | 2245 | req, tsgls); |
1955 | if (err) | 2246 | if (err) |
1956 | goto out; | 2247 | goto out; |
1957 | err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, &cfg, | 2248 | err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, cfg, |
1958 | req, tsgls); | 2249 | req, tsgls); |
1959 | if (err) | 2250 | if (err) |
1960 | goto out; | 2251 | goto out; |
@@ -1962,6 +2253,7 @@ static int test_aead_vs_generic_impl(const char *driver, | |||
1962 | } | 2253 | } |
1963 | err = 0; | 2254 | err = 0; |
1964 | out: | 2255 | out: |
2256 | kfree(cfg); | ||
1965 | kfree(vec.key); | 2257 | kfree(vec.key); |
1966 | kfree(vec.iv); | 2258 | kfree(vec.iv); |
1967 | kfree(vec.assoc); | 2259 | kfree(vec.assoc); |
@@ -1994,6 +2286,7 @@ static int test_aead(const char *driver, int enc, | |||
1994 | tsgls); | 2286 | tsgls); |
1995 | if (err) | 2287 | if (err) |
1996 | return err; | 2288 | return err; |
2289 | cond_resched(); | ||
1997 | } | 2290 | } |
1998 | return 0; | 2291 | return 0; |
1999 | } | 2292 | } |
@@ -2336,6 +2629,7 @@ static int test_skcipher_vec(const char *driver, int enc, | |||
2336 | &cfg, req, tsgls); | 2629 | &cfg, req, tsgls); |
2337 | if (err) | 2630 | if (err) |
2338 | return err; | 2631 | return err; |
2632 | cond_resched(); | ||
2339 | } | 2633 | } |
2340 | } | 2634 | } |
2341 | #endif | 2635 | #endif |
@@ -2409,7 +2703,7 @@ static int test_skcipher_vs_generic_impl(const char *driver, | |||
2409 | unsigned int i; | 2703 | unsigned int i; |
2410 | struct cipher_testvec vec = { 0 }; | 2704 | struct cipher_testvec vec = { 0 }; |
2411 | char vec_name[64]; | 2705 | char vec_name[64]; |
2412 | struct testvec_config cfg; | 2706 | struct testvec_config *cfg; |
2413 | char cfgname[TESTVEC_CONFIG_NAMELEN]; | 2707 | char cfgname[TESTVEC_CONFIG_NAMELEN]; |
2414 | int err; | 2708 | int err; |
2415 | 2709 | ||
@@ -2443,6 +2737,12 @@ static int test_skcipher_vs_generic_impl(const char *driver, | |||
2443 | return err; | 2737 | return err; |
2444 | } | 2738 | } |
2445 | 2739 | ||
2740 | cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); | ||
2741 | if (!cfg) { | ||
2742 | err = -ENOMEM; | ||
2743 | goto out; | ||
2744 | } | ||
2745 | |||
2446 | generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL); | 2746 | generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL); |
2447 | if (!generic_req) { | 2747 | if (!generic_req) { |
2448 | err = -ENOMEM; | 2748 | err = -ENOMEM; |
@@ -2490,20 +2790,21 @@ static int test_skcipher_vs_generic_impl(const char *driver, | |||
2490 | for (i = 0; i < fuzz_iterations * 8; i++) { | 2790 | for (i = 0; i < fuzz_iterations * 8; i++) { |
2491 | generate_random_cipher_testvec(generic_req, &vec, maxdatasize, | 2791 | generate_random_cipher_testvec(generic_req, &vec, maxdatasize, |
2492 | vec_name, sizeof(vec_name)); | 2792 | vec_name, sizeof(vec_name)); |
2493 | generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname)); | 2793 | generate_random_testvec_config(cfg, cfgname, sizeof(cfgname)); |
2494 | 2794 | ||
2495 | err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name, | 2795 | err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name, |
2496 | &cfg, req, tsgls); | 2796 | cfg, req, tsgls); |
2497 | if (err) | 2797 | if (err) |
2498 | goto out; | 2798 | goto out; |
2499 | err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name, | 2799 | err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name, |
2500 | &cfg, req, tsgls); | 2800 | cfg, req, tsgls); |
2501 | if (err) | 2801 | if (err) |
2502 | goto out; | 2802 | goto out; |
2503 | cond_resched(); | 2803 | cond_resched(); |
2504 | } | 2804 | } |
2505 | err = 0; | 2805 | err = 0; |
2506 | out: | 2806 | out: |
2807 | kfree(cfg); | ||
2507 | kfree(vec.key); | 2808 | kfree(vec.key); |
2508 | kfree(vec.iv); | 2809 | kfree(vec.iv); |
2509 | kfree(vec.ptext); | 2810 | kfree(vec.ptext); |
@@ -2535,6 +2836,7 @@ static int test_skcipher(const char *driver, int enc, | |||
2535 | tsgls); | 2836 | tsgls); |
2536 | if (err) | 2837 | if (err) |
2537 | return err; | 2838 | return err; |
2839 | cond_resched(); | ||
2538 | } | 2840 | } |
2539 | return 0; | 2841 | return 0; |
2540 | } | 2842 | } |
@@ -4125,6 +4427,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
4125 | } | 4427 | } |
4126 | }, { | 4428 | }, { |
4127 | .alg = "ecb(arc4)", | 4429 | .alg = "ecb(arc4)", |
4430 | .generic_driver = "ecb(arc4)-generic", | ||
4128 | .test = alg_test_skcipher, | 4431 | .test = alg_test_skcipher, |
4129 | .suite = { | 4432 | .suite = { |
4130 | .cipher = __VECS(arc4_tv_template) | 4433 | .cipher = __VECS(arc4_tv_template) |
@@ -4790,6 +5093,13 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
4790 | .test = alg_test_null, | 5093 | .test = alg_test_null, |
4791 | .fips_allowed = 1, | 5094 | .fips_allowed = 1, |
4792 | }, { | 5095 | }, { |
5096 | .alg = "xxhash64", | ||
5097 | .test = alg_test_hash, | ||
5098 | .fips_allowed = 1, | ||
5099 | .suite = { | ||
5100 | .hash = __VECS(xxhash64_tv_template) | ||
5101 | } | ||
5102 | }, { | ||
4793 | .alg = "zlib-deflate", | 5103 | .alg = "zlib-deflate", |
4794 | .test = alg_test_comp, | 5104 | .test = alg_test_comp, |
4795 | .fips_allowed = 1, | 5105 | .fips_allowed = 1, |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 1fdae5993bc3..073bd2efafca 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -38,7 +38,7 @@ struct hash_testvec { | |||
38 | const char *key; | 38 | const char *key; |
39 | const char *plaintext; | 39 | const char *plaintext; |
40 | const char *digest; | 40 | const char *digest; |
41 | unsigned short psize; | 41 | unsigned int psize; |
42 | unsigned short ksize; | 42 | unsigned short ksize; |
43 | int setkey_error; | 43 | int setkey_error; |
44 | int digest_error; | 44 | int digest_error; |
@@ -69,7 +69,7 @@ struct cipher_testvec { | |||
69 | const char *ctext; | 69 | const char *ctext; |
70 | unsigned char wk; /* weak key flag */ | 70 | unsigned char wk; /* weak key flag */ |
71 | unsigned short klen; | 71 | unsigned short klen; |
72 | unsigned short len; | 72 | unsigned int len; |
73 | bool fips_skip; | 73 | bool fips_skip; |
74 | bool generates_iv; | 74 | bool generates_iv; |
75 | int setkey_error; | 75 | int setkey_error; |
@@ -105,9 +105,9 @@ struct aead_testvec { | |||
105 | unsigned char novrfy; | 105 | unsigned char novrfy; |
106 | unsigned char wk; | 106 | unsigned char wk; |
107 | unsigned char klen; | 107 | unsigned char klen; |
108 | unsigned short plen; | 108 | unsigned int plen; |
109 | unsigned short clen; | 109 | unsigned int clen; |
110 | unsigned short alen; | 110 | unsigned int alen; |
111 | int setkey_error; | 111 | int setkey_error; |
112 | int setauthsize_error; | 112 | int setauthsize_error; |
113 | int crypt_error; | 113 | int crypt_error; |
@@ -33382,6 +33382,112 @@ static const struct hash_testvec crc32c_tv_template[] = { | |||
33382 | } | 33382 | } |
33383 | }; | 33383 | }; |
33384 | 33384 | ||
33385 | static const struct hash_testvec xxhash64_tv_template[] = { | ||
33386 | { | ||
33387 | .psize = 0, | ||
33388 | .digest = "\x99\xe9\xd8\x51\x37\xdb\x46\xef", | ||
33389 | }, | ||
33390 | { | ||
33391 | .plaintext = "\x40", | ||
33392 | .psize = 1, | ||
33393 | .digest = "\x20\x5c\x91\xaa\x88\xeb\x59\xd0", | ||
33394 | }, | ||
33395 | { | ||
33396 | .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d" | ||
33397 | "\x88\xc7\x9a\x09\x1a\x9b", | ||
33398 | .psize = 14, | ||
33399 | .digest = "\xa8\xe8\x2b\xa9\x92\xa1\x37\x4a", | ||
33400 | }, | ||
33401 | { | ||
33402 | .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d" | ||
33403 | "\x88\xc7\x9a\x09\x1a\x9b\x42\xe0" | ||
33404 | "\xd4\x38\xa5\x2a\x26\xa5\x19\x4b" | ||
33405 | "\x57\x65\x7f\xad\xc3\x7d\xca\x40" | ||
33406 | "\x31\x65\x05\xbb\x31\xae\x51\x11" | ||
33407 | "\xa8\xc0\xb3\x28\x42\xeb\x3c\x46" | ||
33408 | "\xc8\xed\xed\x0f\x8d\x0b\xfa\x6e" | ||
33409 | "\xbc\xe3\x88\x53\xca\x8f\xc8\xd9" | ||
33410 | "\x41\x26\x7a\x3d\x21\xdb\x1a\x3c" | ||
33411 | "\x01\x1d\xc9\xe9\xb7\x3a\x78\x67" | ||
33412 | "\x57\x20\x94\xf1\x1e\xfd\xce\x39" | ||
33413 | "\x99\x57\x69\x39\xa5\xd0\x8d\xd9" | ||
33414 | "\x43\xfe\x1d\x66\x04\x3c\x27\x6a" | ||
33415 | "\xe1\x0d\xe7\xc9\xfa\xc9\x07\x56" | ||
33416 | "\xa5\xb3\xec\xd9\x1f\x42\x65\x66" | ||
33417 | "\xaa\xbf\x87\x9b\xc5\x41\x9c\x27" | ||
33418 | "\x3f\x2f\xa9\x55\x93\x01\x27\x33" | ||
33419 | "\x43\x99\x4d\x81\x85\xae\x82\x00" | ||
33420 | "\x6c\xd0\xd1\xa3\x57\x18\x06\xcc" | ||
33421 | "\xec\x72\xf7\x8e\x87\x2d\x1f\x5e" | ||
33422 | "\xd7\x5b\x1f\x36\x4c\xfa\xfd\x18" | ||
33423 | "\x89\x76\xd3\x5e\xb5\x5a\xc0\x01" | ||
33424 | "\xd2\xa1\x9a\x50\xe6\x08\xb4\x76" | ||
33425 | "\x56\x4f\x0e\xbc\x54\xfc\x67\xe6" | ||
33426 | "\xb9\xc0\x28\x4b\xb5\xc3\xff\x79" | ||
33427 | "\x52\xea\xa1\x90\xc3\xaf\x08\x70" | ||
33428 | "\x12\x02\x0c\xdb\x94\x00\x38\x95" | ||
33429 | "\xed\xfd\x08\xf7\xe8\x04", | ||
33430 | .psize = 222, | ||
33431 | .digest = "\x41\xfc\xd4\x29\xfe\xe7\x85\x17", | ||
33432 | }, | ||
33433 | { | ||
33434 | .psize = 0, | ||
33435 | .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00", | ||
33436 | .ksize = 8, | ||
33437 | .digest = "\xef\x17\x9b\x92\xa2\xfd\x75\xac", | ||
33438 | }, | ||
33439 | |||
33440 | { | ||
33441 | .plaintext = "\x40", | ||
33442 | .psize = 1, | ||
33443 | .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00", | ||
33444 | .ksize = 8, | ||
33445 | .digest = "\xd1\x70\x4f\x14\x02\xc4\x9e\x71", | ||
33446 | }, | ||
33447 | { | ||
33448 | .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d" | ||
33449 | "\x88\xc7\x9a\x09\x1a\x9b", | ||
33450 | .psize = 14, | ||
33451 | .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00", | ||
33452 | .ksize = 8, | ||
33453 | .digest = "\xa4\xcd\xfe\x8e\x37\xe2\x1c\x64" | ||
33454 | }, | ||
33455 | { | ||
33456 | .plaintext = "\x40\x8b\xb8\x41\xe4\x42\x15\x2d" | ||
33457 | "\x88\xc7\x9a\x09\x1a\x9b\x42\xe0" | ||
33458 | "\xd4\x38\xa5\x2a\x26\xa5\x19\x4b" | ||
33459 | "\x57\x65\x7f\xad\xc3\x7d\xca\x40" | ||
33460 | "\x31\x65\x05\xbb\x31\xae\x51\x11" | ||
33461 | "\xa8\xc0\xb3\x28\x42\xeb\x3c\x46" | ||
33462 | "\xc8\xed\xed\x0f\x8d\x0b\xfa\x6e" | ||
33463 | "\xbc\xe3\x88\x53\xca\x8f\xc8\xd9" | ||
33464 | "\x41\x26\x7a\x3d\x21\xdb\x1a\x3c" | ||
33465 | "\x01\x1d\xc9\xe9\xb7\x3a\x78\x67" | ||
33466 | "\x57\x20\x94\xf1\x1e\xfd\xce\x39" | ||
33467 | "\x99\x57\x69\x39\xa5\xd0\x8d\xd9" | ||
33468 | "\x43\xfe\x1d\x66\x04\x3c\x27\x6a" | ||
33469 | "\xe1\x0d\xe7\xc9\xfa\xc9\x07\x56" | ||
33470 | "\xa5\xb3\xec\xd9\x1f\x42\x65\x66" | ||
33471 | "\xaa\xbf\x87\x9b\xc5\x41\x9c\x27" | ||
33472 | "\x3f\x2f\xa9\x55\x93\x01\x27\x33" | ||
33473 | "\x43\x99\x4d\x81\x85\xae\x82\x00" | ||
33474 | "\x6c\xd0\xd1\xa3\x57\x18\x06\xcc" | ||
33475 | "\xec\x72\xf7\x8e\x87\x2d\x1f\x5e" | ||
33476 | "\xd7\x5b\x1f\x36\x4c\xfa\xfd\x18" | ||
33477 | "\x89\x76\xd3\x5e\xb5\x5a\xc0\x01" | ||
33478 | "\xd2\xa1\x9a\x50\xe6\x08\xb4\x76" | ||
33479 | "\x56\x4f\x0e\xbc\x54\xfc\x67\xe6" | ||
33480 | "\xb9\xc0\x28\x4b\xb5\xc3\xff\x79" | ||
33481 | "\x52\xea\xa1\x90\xc3\xaf\x08\x70" | ||
33482 | "\x12\x02\x0c\xdb\x94\x00\x38\x95" | ||
33483 | "\xed\xfd\x08\xf7\xe8\x04", | ||
33484 | .psize = 222, | ||
33485 | .key = "\xb1\x79\x37\x9e\x00\x00\x00\x00", | ||
33486 | .ksize = 8, | ||
33487 | .digest = "\x58\xbc\x55\xf2\x42\x81\x5c\xf0" | ||
33488 | }, | ||
33489 | }; | ||
33490 | |||
33385 | static const struct comp_testvec lz4_comp_tv_template[] = { | 33491 | static const struct comp_testvec lz4_comp_tv_template[] = { |
33386 | { | 33492 | { |
33387 | .inlen = 255, | 33493 | .inlen = 255, |
diff --git a/crypto/tgr192.c b/crypto/tgr192.c index 702c2c89c7a1..052648e24909 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c | |||
@@ -630,9 +630,10 @@ static struct shash_alg tgr_algs[3] = { { | |||
630 | .final = tgr192_final, | 630 | .final = tgr192_final, |
631 | .descsize = sizeof(struct tgr192_ctx), | 631 | .descsize = sizeof(struct tgr192_ctx), |
632 | .base = { | 632 | .base = { |
633 | .cra_name = "tgr192", | 633 | .cra_name = "tgr192", |
634 | .cra_blocksize = TGR192_BLOCK_SIZE, | 634 | .cra_driver_name = "tgr192-generic", |
635 | .cra_module = THIS_MODULE, | 635 | .cra_blocksize = TGR192_BLOCK_SIZE, |
636 | .cra_module = THIS_MODULE, | ||
636 | } | 637 | } |
637 | }, { | 638 | }, { |
638 | .digestsize = TGR160_DIGEST_SIZE, | 639 | .digestsize = TGR160_DIGEST_SIZE, |
@@ -641,9 +642,10 @@ static struct shash_alg tgr_algs[3] = { { | |||
641 | .final = tgr160_final, | 642 | .final = tgr160_final, |
642 | .descsize = sizeof(struct tgr192_ctx), | 643 | .descsize = sizeof(struct tgr192_ctx), |
643 | .base = { | 644 | .base = { |
644 | .cra_name = "tgr160", | 645 | .cra_name = "tgr160", |
645 | .cra_blocksize = TGR192_BLOCK_SIZE, | 646 | .cra_driver_name = "tgr160-generic", |
646 | .cra_module = THIS_MODULE, | 647 | .cra_blocksize = TGR192_BLOCK_SIZE, |
648 | .cra_module = THIS_MODULE, | ||
647 | } | 649 | } |
648 | }, { | 650 | }, { |
649 | .digestsize = TGR128_DIGEST_SIZE, | 651 | .digestsize = TGR128_DIGEST_SIZE, |
@@ -652,9 +654,10 @@ static struct shash_alg tgr_algs[3] = { { | |||
652 | .final = tgr128_final, | 654 | .final = tgr128_final, |
653 | .descsize = sizeof(struct tgr192_ctx), | 655 | .descsize = sizeof(struct tgr192_ctx), |
654 | .base = { | 656 | .base = { |
655 | .cra_name = "tgr128", | 657 | .cra_name = "tgr128", |
656 | .cra_blocksize = TGR192_BLOCK_SIZE, | 658 | .cra_driver_name = "tgr128-generic", |
657 | .cra_module = THIS_MODULE, | 659 | .cra_blocksize = TGR192_BLOCK_SIZE, |
660 | .cra_module = THIS_MODULE, | ||
658 | } | 661 | } |
659 | } }; | 662 | } }; |
660 | 663 | ||
diff --git a/crypto/wp512.c b/crypto/wp512.c index 1b8e502d999f..feadc13ccae0 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c | |||
@@ -1126,9 +1126,10 @@ static struct shash_alg wp_algs[3] = { { | |||
1126 | .final = wp512_final, | 1126 | .final = wp512_final, |
1127 | .descsize = sizeof(struct wp512_ctx), | 1127 | .descsize = sizeof(struct wp512_ctx), |
1128 | .base = { | 1128 | .base = { |
1129 | .cra_name = "wp512", | 1129 | .cra_name = "wp512", |
1130 | .cra_blocksize = WP512_BLOCK_SIZE, | 1130 | .cra_driver_name = "wp512-generic", |
1131 | .cra_module = THIS_MODULE, | 1131 | .cra_blocksize = WP512_BLOCK_SIZE, |
1132 | .cra_module = THIS_MODULE, | ||
1132 | } | 1133 | } |
1133 | }, { | 1134 | }, { |
1134 | .digestsize = WP384_DIGEST_SIZE, | 1135 | .digestsize = WP384_DIGEST_SIZE, |
@@ -1137,9 +1138,10 @@ static struct shash_alg wp_algs[3] = { { | |||
1137 | .final = wp384_final, | 1138 | .final = wp384_final, |
1138 | .descsize = sizeof(struct wp512_ctx), | 1139 | .descsize = sizeof(struct wp512_ctx), |
1139 | .base = { | 1140 | .base = { |
1140 | .cra_name = "wp384", | 1141 | .cra_name = "wp384", |
1141 | .cra_blocksize = WP512_BLOCK_SIZE, | 1142 | .cra_driver_name = "wp384-generic", |
1142 | .cra_module = THIS_MODULE, | 1143 | .cra_blocksize = WP512_BLOCK_SIZE, |
1144 | .cra_module = THIS_MODULE, | ||
1143 | } | 1145 | } |
1144 | }, { | 1146 | }, { |
1145 | .digestsize = WP256_DIGEST_SIZE, | 1147 | .digestsize = WP256_DIGEST_SIZE, |
@@ -1148,9 +1150,10 @@ static struct shash_alg wp_algs[3] = { { | |||
1148 | .final = wp256_final, | 1150 | .final = wp256_final, |
1149 | .descsize = sizeof(struct wp512_ctx), | 1151 | .descsize = sizeof(struct wp512_ctx), |
1150 | .base = { | 1152 | .base = { |
1151 | .cra_name = "wp256", | 1153 | .cra_name = "wp256", |
1152 | .cra_blocksize = WP512_BLOCK_SIZE, | 1154 | .cra_driver_name = "wp256-generic", |
1153 | .cra_module = THIS_MODULE, | 1155 | .cra_blocksize = WP512_BLOCK_SIZE, |
1156 | .cra_module = THIS_MODULE, | ||
1154 | } | 1157 | } |
1155 | } }; | 1158 | } }; |
1156 | 1159 | ||
diff --git a/crypto/xxhash_generic.c b/crypto/xxhash_generic.c new file mode 100644 index 000000000000..4aad2c0f40a9 --- /dev/null +++ b/crypto/xxhash_generic.c | |||
@@ -0,0 +1,108 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | #include <crypto/internal/hash.h> | ||
4 | #include <linux/init.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/xxhash.h> | ||
7 | #include <asm/unaligned.h> | ||
8 | |||
9 | #define XXHASH64_BLOCK_SIZE 32 | ||
10 | #define XXHASH64_DIGEST_SIZE 8 | ||
11 | |||
12 | struct xxhash64_tfm_ctx { | ||
13 | u64 seed; | ||
14 | }; | ||
15 | |||
16 | struct xxhash64_desc_ctx { | ||
17 | struct xxh64_state xxhstate; | ||
18 | }; | ||
19 | |||
20 | static int xxhash64_setkey(struct crypto_shash *tfm, const u8 *key, | ||
21 | unsigned int keylen) | ||
22 | { | ||
23 | struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(tfm); | ||
24 | |||
25 | if (keylen != sizeof(tctx->seed)) { | ||
26 | crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
27 | return -EINVAL; | ||
28 | } | ||
29 | tctx->seed = get_unaligned_le64(key); | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | static int xxhash64_init(struct shash_desc *desc) | ||
34 | { | ||
35 | struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); | ||
36 | struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); | ||
37 | |||
38 | xxh64_reset(&dctx->xxhstate, tctx->seed); | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int xxhash64_update(struct shash_desc *desc, const u8 *data, | ||
44 | unsigned int length) | ||
45 | { | ||
46 | struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); | ||
47 | |||
48 | xxh64_update(&dctx->xxhstate, data, length); | ||
49 | |||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | static int xxhash64_final(struct shash_desc *desc, u8 *out) | ||
54 | { | ||
55 | struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); | ||
56 | |||
57 | put_unaligned_le64(xxh64_digest(&dctx->xxhstate), out); | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static int xxhash64_digest(struct shash_desc *desc, const u8 *data, | ||
63 | unsigned int length, u8 *out) | ||
64 | { | ||
65 | struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); | ||
66 | |||
67 | put_unaligned_le64(xxh64(data, length, tctx->seed), out); | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static struct shash_alg alg = { | ||
73 | .digestsize = XXHASH64_DIGEST_SIZE, | ||
74 | .setkey = xxhash64_setkey, | ||
75 | .init = xxhash64_init, | ||
76 | .update = xxhash64_update, | ||
77 | .final = xxhash64_final, | ||
78 | .digest = xxhash64_digest, | ||
79 | .descsize = sizeof(struct xxhash64_desc_ctx), | ||
80 | .base = { | ||
81 | .cra_name = "xxhash64", | ||
82 | .cra_driver_name = "xxhash64-generic", | ||
83 | .cra_priority = 100, | ||
84 | .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, | ||
85 | .cra_blocksize = XXHASH64_BLOCK_SIZE, | ||
86 | .cra_ctxsize = sizeof(struct xxhash64_tfm_ctx), | ||
87 | .cra_module = THIS_MODULE, | ||
88 | } | ||
89 | }; | ||
90 | |||
91 | static int __init xxhash_mod_init(void) | ||
92 | { | ||
93 | return crypto_register_shash(&alg); | ||
94 | } | ||
95 | |||
96 | static void __exit xxhash_mod_fini(void) | ||
97 | { | ||
98 | crypto_unregister_shash(&alg); | ||
99 | } | ||
100 | |||
101 | subsys_initcall(xxhash_mod_init); | ||
102 | module_exit(xxhash_mod_fini); | ||
103 | |||
104 | MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>"); | ||
105 | MODULE_DESCRIPTION("xxhash calculations wrapper for lib/xxhash.c"); | ||
106 | MODULE_LICENSE("GPL"); | ||
107 | MODULE_ALIAS_CRYPTO("xxhash64"); | ||
108 | MODULE_ALIAS_CRYPTO("xxhash64-generic"); | ||
diff --git a/crypto/zstd.c b/crypto/zstd.c index f1e4c70c9d24..5a3ff258d8f7 100644 --- a/crypto/zstd.c +++ b/crypto/zstd.c | |||
@@ -206,6 +206,7 @@ static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src, | |||
206 | 206 | ||
207 | static struct crypto_alg alg = { | 207 | static struct crypto_alg alg = { |
208 | .cra_name = "zstd", | 208 | .cra_name = "zstd", |
209 | .cra_driver_name = "zstd-generic", | ||
209 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, | 210 | .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, |
210 | .cra_ctxsize = sizeof(struct zstd_ctx), | 211 | .cra_ctxsize = sizeof(struct zstd_ctx), |
211 | .cra_module = THIS_MODULE, | 212 | .cra_module = THIS_MODULE, |
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c index 8b5a20b35293..92be1c0ab99f 100644 --- a/drivers/char/hw_random/iproc-rng200.c +++ b/drivers/char/hw_random/iproc-rng200.c | |||
@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev) | |||
220 | } | 220 | } |
221 | 221 | ||
222 | static const struct of_device_id iproc_rng200_of_match[] = { | 222 | static const struct of_device_id iproc_rng200_of_match[] = { |
223 | { .compatible = "brcm,bcm7211-rng200", }, | ||
223 | { .compatible = "brcm,bcm7278-rng200", }, | 224 | { .compatible = "brcm,bcm7278-rng200", }, |
224 | { .compatible = "brcm,iproc-rng200", }, | 225 | { .compatible = "brcm,iproc-rng200", }, |
225 | {}, | 226 | {}, |
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c index 2e23be802a62..76e693da5dde 100644 --- a/drivers/char/hw_random/meson-rng.c +++ b/drivers/char/hw_random/meson-rng.c | |||
@@ -1,58 +1,8 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause | ||
1 | /* | 2 | /* |
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | * redistributing this file, you may do so under either license. | ||
4 | * | ||
5 | * GPL LICENSE SUMMARY | ||
6 | * | ||
7 | * Copyright (c) 2016 BayLibre, SAS. | 3 | * Copyright (c) 2016 BayLibre, SAS. |
8 | * Author: Neil Armstrong <narmstrong@baylibre.com> | 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> |
9 | * Copyright (C) 2014 Amlogic, Inc. | 5 | * Copyright (C) 2014 Amlogic, Inc. |
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of version 2 of the GNU General Public License as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
22 | * The full GNU General Public License is included in this distribution | ||
23 | * in the file called COPYING. | ||
24 | * | ||
25 | * BSD LICENSE | ||
26 | * | ||
27 | * Copyright (c) 2016 BayLibre, SAS. | ||
28 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
29 | * Copyright (C) 2014 Amlogic, Inc. | ||
30 | * | ||
31 | * Redistribution and use in source and binary forms, with or without | ||
32 | * modification, are permitted provided that the following conditions | ||
33 | * are met: | ||
34 | * | ||
35 | * * Redistributions of source code must retain the above copyright | ||
36 | * notice, this list of conditions and the following disclaimer. | ||
37 | * * Redistributions in binary form must reproduce the above copyright | ||
38 | * notice, this list of conditions and the following disclaimer in | ||
39 | * the documentation and/or other materials provided with the | ||
40 | * distribution. | ||
41 | * * Neither the name of Intel Corporation nor the names of its | ||
42 | * contributors may be used to endorse or promote products derived | ||
43 | * from this software without specific prior written permission. | ||
44 | * | ||
45 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
46 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
47 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
48 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
49 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
50 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
51 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
52 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
53 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
54 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
55 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
56 | */ | 6 | */ |
57 | #include <linux/err.h> | 7 | #include <linux/err.h> |
58 | #include <linux/module.h> | 8 | #include <linux/module.h> |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 0af08081e305..603413f28fa3 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -520,10 +520,13 @@ config CRYPTO_DEV_ATMEL_SHA | |||
520 | To compile this driver as a module, choose M here: the module | 520 | To compile this driver as a module, choose M here: the module |
521 | will be called atmel-sha. | 521 | will be called atmel-sha. |
522 | 522 | ||
523 | config CRYPTO_DEV_ATMEL_I2C | ||
524 | tristate | ||
525 | |||
523 | config CRYPTO_DEV_ATMEL_ECC | 526 | config CRYPTO_DEV_ATMEL_ECC |
524 | tristate "Support for Microchip / Atmel ECC hw accelerator" | 527 | tristate "Support for Microchip / Atmel ECC hw accelerator" |
525 | depends on ARCH_AT91 || COMPILE_TEST | ||
526 | depends on I2C | 528 | depends on I2C |
529 | select CRYPTO_DEV_ATMEL_I2C | ||
527 | select CRYPTO_ECDH | 530 | select CRYPTO_ECDH |
528 | select CRC16 | 531 | select CRC16 |
529 | help | 532 | help |
@@ -534,6 +537,21 @@ config CRYPTO_DEV_ATMEL_ECC | |||
534 | To compile this driver as a module, choose M here: the module | 537 | To compile this driver as a module, choose M here: the module |
535 | will be called atmel-ecc. | 538 | will be called atmel-ecc. |
536 | 539 | ||
540 | config CRYPTO_DEV_ATMEL_SHA204A | ||
541 | tristate "Support for Microchip / Atmel SHA accelerator and RNG" | ||
542 | depends on I2C | ||
543 | select CRYPTO_DEV_ATMEL_I2C | ||
544 | select HW_RANDOM | ||
545 | select CRC16 | ||
546 | help | ||
547 | Microhip / Atmel SHA accelerator and RNG. | ||
548 | Select this if you want to use the Microchip / Atmel SHA204A | ||
549 | module as a random number generator. (Other functions of the | ||
550 | chip are currently not exposed by this driver) | ||
551 | |||
552 | To compile this driver as a module, choose M here: the module | ||
553 | will be called atmel-sha204a. | ||
554 | |||
537 | config CRYPTO_DEV_CCP | 555 | config CRYPTO_DEV_CCP |
538 | bool "Support for AMD Secure Processor" | 556 | bool "Support for AMD Secure Processor" |
539 | depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM | 557 | depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index a23a7197fcd7..afc4753b5d28 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -2,7 +2,9 @@ | |||
2 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o | 2 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o |
3 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o | 3 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o |
4 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o | 4 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o |
5 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o | ||
5 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o | 6 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o |
7 | obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o | ||
6 | obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ | 8 | obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ |
7 | obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ | 9 | obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ |
8 | obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ | 10 | obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ |
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index 49f3e0ce242c..cbfc607282f4 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c | |||
@@ -67,12 +67,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, | |||
67 | } | 67 | } |
68 | 68 | ||
69 | static inline int crypto4xx_crypt(struct skcipher_request *req, | 69 | static inline int crypto4xx_crypt(struct skcipher_request *req, |
70 | const unsigned int ivlen, bool decrypt) | 70 | const unsigned int ivlen, bool decrypt, |
71 | bool check_blocksize) | ||
71 | { | 72 | { |
72 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); | 73 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); |
73 | struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); | 74 | struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher); |
74 | __le32 iv[AES_IV_SIZE]; | 75 | __le32 iv[AES_IV_SIZE]; |
75 | 76 | ||
77 | if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) | ||
78 | return -EINVAL; | ||
79 | |||
76 | if (ivlen) | 80 | if (ivlen) |
77 | crypto4xx_memcpy_to_le32(iv, req->iv, ivlen); | 81 | crypto4xx_memcpy_to_le32(iv, req->iv, ivlen); |
78 | 82 | ||
@@ -81,24 +85,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req, | |||
81 | ctx->sa_len, 0, NULL); | 85 | ctx->sa_len, 0, NULL); |
82 | } | 86 | } |
83 | 87 | ||
84 | int crypto4xx_encrypt_noiv(struct skcipher_request *req) | 88 | int crypto4xx_encrypt_noiv_block(struct skcipher_request *req) |
89 | { | ||
90 | return crypto4xx_crypt(req, 0, false, true); | ||
91 | } | ||
92 | |||
93 | int crypto4xx_encrypt_iv_stream(struct skcipher_request *req) | ||
94 | { | ||
95 | return crypto4xx_crypt(req, AES_IV_SIZE, false, false); | ||
96 | } | ||
97 | |||
98 | int crypto4xx_decrypt_noiv_block(struct skcipher_request *req) | ||
85 | { | 99 | { |
86 | return crypto4xx_crypt(req, 0, false); | 100 | return crypto4xx_crypt(req, 0, true, true); |
87 | } | 101 | } |
88 | 102 | ||
89 | int crypto4xx_encrypt_iv(struct skcipher_request *req) | 103 | int crypto4xx_decrypt_iv_stream(struct skcipher_request *req) |
90 | { | 104 | { |
91 | return crypto4xx_crypt(req, AES_IV_SIZE, false); | 105 | return crypto4xx_crypt(req, AES_IV_SIZE, true, false); |
92 | } | 106 | } |
93 | 107 | ||
94 | int crypto4xx_decrypt_noiv(struct skcipher_request *req) | 108 | int crypto4xx_encrypt_iv_block(struct skcipher_request *req) |
95 | { | 109 | { |
96 | return crypto4xx_crypt(req, 0, true); | 110 | return crypto4xx_crypt(req, AES_IV_SIZE, false, true); |
97 | } | 111 | } |
98 | 112 | ||
99 | int crypto4xx_decrypt_iv(struct skcipher_request *req) | 113 | int crypto4xx_decrypt_iv_block(struct skcipher_request *req) |
100 | { | 114 | { |
101 | return crypto4xx_crypt(req, AES_IV_SIZE, true); | 115 | return crypto4xx_crypt(req, AES_IV_SIZE, true, true); |
102 | } | 116 | } |
103 | 117 | ||
104 | /** | 118 | /** |
@@ -269,8 +283,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt) | |||
269 | return ret; | 283 | return ret; |
270 | } | 284 | } |
271 | 285 | ||
272 | return encrypt ? crypto4xx_encrypt_iv(req) | 286 | return encrypt ? crypto4xx_encrypt_iv_stream(req) |
273 | : crypto4xx_decrypt_iv(req); | 287 | : crypto4xx_decrypt_iv_stream(req); |
274 | } | 288 | } |
275 | 289 | ||
276 | static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx, | 290 | static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx, |
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 16d911aaa508..de5e9352e920 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -182,7 +182,6 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) | |||
182 | dev->pdr_pa); | 182 | dev->pdr_pa); |
183 | return -ENOMEM; | 183 | return -ENOMEM; |
184 | } | 184 | } |
185 | memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); | ||
186 | dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, | 185 | dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, |
187 | sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, | 186 | sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, |
188 | &dev->shadow_sa_pool_pa, | 187 | &dev->shadow_sa_pool_pa, |
@@ -1210,8 +1209,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1210 | .max_keysize = AES_MAX_KEY_SIZE, | 1209 | .max_keysize = AES_MAX_KEY_SIZE, |
1211 | .ivsize = AES_IV_SIZE, | 1210 | .ivsize = AES_IV_SIZE, |
1212 | .setkey = crypto4xx_setkey_aes_cbc, | 1211 | .setkey = crypto4xx_setkey_aes_cbc, |
1213 | .encrypt = crypto4xx_encrypt_iv, | 1212 | .encrypt = crypto4xx_encrypt_iv_block, |
1214 | .decrypt = crypto4xx_decrypt_iv, | 1213 | .decrypt = crypto4xx_decrypt_iv_block, |
1215 | .init = crypto4xx_sk_init, | 1214 | .init = crypto4xx_sk_init, |
1216 | .exit = crypto4xx_sk_exit, | 1215 | .exit = crypto4xx_sk_exit, |
1217 | } }, | 1216 | } }, |
@@ -1222,7 +1221,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1222 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | 1221 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1223 | .cra_flags = CRYPTO_ALG_ASYNC | | 1222 | .cra_flags = CRYPTO_ALG_ASYNC | |
1224 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1223 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1225 | .cra_blocksize = AES_BLOCK_SIZE, | 1224 | .cra_blocksize = 1, |
1226 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | 1225 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1227 | .cra_module = THIS_MODULE, | 1226 | .cra_module = THIS_MODULE, |
1228 | }, | 1227 | }, |
@@ -1230,8 +1229,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1230 | .max_keysize = AES_MAX_KEY_SIZE, | 1229 | .max_keysize = AES_MAX_KEY_SIZE, |
1231 | .ivsize = AES_IV_SIZE, | 1230 | .ivsize = AES_IV_SIZE, |
1232 | .setkey = crypto4xx_setkey_aes_cfb, | 1231 | .setkey = crypto4xx_setkey_aes_cfb, |
1233 | .encrypt = crypto4xx_encrypt_iv, | 1232 | .encrypt = crypto4xx_encrypt_iv_stream, |
1234 | .decrypt = crypto4xx_decrypt_iv, | 1233 | .decrypt = crypto4xx_decrypt_iv_stream, |
1235 | .init = crypto4xx_sk_init, | 1234 | .init = crypto4xx_sk_init, |
1236 | .exit = crypto4xx_sk_exit, | 1235 | .exit = crypto4xx_sk_exit, |
1237 | } }, | 1236 | } }, |
@@ -1243,7 +1242,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1243 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | | 1242 | .cra_flags = CRYPTO_ALG_NEED_FALLBACK | |
1244 | CRYPTO_ALG_ASYNC | | 1243 | CRYPTO_ALG_ASYNC | |
1245 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1244 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1246 | .cra_blocksize = AES_BLOCK_SIZE, | 1245 | .cra_blocksize = 1, |
1247 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | 1246 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1248 | .cra_module = THIS_MODULE, | 1247 | .cra_module = THIS_MODULE, |
1249 | }, | 1248 | }, |
@@ -1263,7 +1262,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1263 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | 1262 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1264 | .cra_flags = CRYPTO_ALG_ASYNC | | 1263 | .cra_flags = CRYPTO_ALG_ASYNC | |
1265 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1264 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1266 | .cra_blocksize = AES_BLOCK_SIZE, | 1265 | .cra_blocksize = 1, |
1267 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | 1266 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1268 | .cra_module = THIS_MODULE, | 1267 | .cra_module = THIS_MODULE, |
1269 | }, | 1268 | }, |
@@ -1290,8 +1289,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1290 | .min_keysize = AES_MIN_KEY_SIZE, | 1289 | .min_keysize = AES_MIN_KEY_SIZE, |
1291 | .max_keysize = AES_MAX_KEY_SIZE, | 1290 | .max_keysize = AES_MAX_KEY_SIZE, |
1292 | .setkey = crypto4xx_setkey_aes_ecb, | 1291 | .setkey = crypto4xx_setkey_aes_ecb, |
1293 | .encrypt = crypto4xx_encrypt_noiv, | 1292 | .encrypt = crypto4xx_encrypt_noiv_block, |
1294 | .decrypt = crypto4xx_decrypt_noiv, | 1293 | .decrypt = crypto4xx_decrypt_noiv_block, |
1295 | .init = crypto4xx_sk_init, | 1294 | .init = crypto4xx_sk_init, |
1296 | .exit = crypto4xx_sk_exit, | 1295 | .exit = crypto4xx_sk_exit, |
1297 | } }, | 1296 | } }, |
@@ -1302,7 +1301,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1302 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, | 1301 | .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, |
1303 | .cra_flags = CRYPTO_ALG_ASYNC | | 1302 | .cra_flags = CRYPTO_ALG_ASYNC | |
1304 | CRYPTO_ALG_KERN_DRIVER_ONLY, | 1303 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
1305 | .cra_blocksize = AES_BLOCK_SIZE, | 1304 | .cra_blocksize = 1, |
1306 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), | 1305 | .cra_ctxsize = sizeof(struct crypto4xx_ctx), |
1307 | .cra_module = THIS_MODULE, | 1306 | .cra_module = THIS_MODULE, |
1308 | }, | 1307 | }, |
@@ -1310,8 +1309,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { | |||
1310 | .max_keysize = AES_MAX_KEY_SIZE, | 1309 | .max_keysize = AES_MAX_KEY_SIZE, |
1311 | .ivsize = AES_IV_SIZE, | 1310 | .ivsize = AES_IV_SIZE, |
1312 | .setkey = crypto4xx_setkey_aes_ofb, | 1311 | .setkey = crypto4xx_setkey_aes_ofb, |
1313 | .encrypt = crypto4xx_encrypt_iv, | 1312 | .encrypt = crypto4xx_encrypt_iv_stream, |
1314 | .decrypt = crypto4xx_decrypt_iv, | 1313 | .decrypt = crypto4xx_decrypt_iv_stream, |
1315 | .init = crypto4xx_sk_init, | 1314 | .init = crypto4xx_sk_init, |
1316 | .exit = crypto4xx_sk_exit, | 1315 | .exit = crypto4xx_sk_exit, |
1317 | } }, | 1316 | } }, |
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index ca1c25c40c23..6b6841359190 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h | |||
@@ -173,10 +173,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher, | |||
173 | const u8 *key, unsigned int keylen); | 173 | const u8 *key, unsigned int keylen); |
174 | int crypto4xx_encrypt_ctr(struct skcipher_request *req); | 174 | int crypto4xx_encrypt_ctr(struct skcipher_request *req); |
175 | int crypto4xx_decrypt_ctr(struct skcipher_request *req); | 175 | int crypto4xx_decrypt_ctr(struct skcipher_request *req); |
176 | int crypto4xx_encrypt_iv(struct skcipher_request *req); | 176 | int crypto4xx_encrypt_iv_stream(struct skcipher_request *req); |
177 | int crypto4xx_decrypt_iv(struct skcipher_request *req); | 177 | int crypto4xx_decrypt_iv_stream(struct skcipher_request *req); |
178 | int crypto4xx_encrypt_noiv(struct skcipher_request *req); | 178 | int crypto4xx_encrypt_iv_block(struct skcipher_request *req); |
179 | int crypto4xx_decrypt_noiv(struct skcipher_request *req); | 179 | int crypto4xx_decrypt_iv_block(struct skcipher_request *req); |
180 | int crypto4xx_encrypt_noiv_block(struct skcipher_request *req); | ||
181 | int crypto4xx_decrypt_noiv_block(struct skcipher_request *req); | ||
180 | int crypto4xx_rfc3686_encrypt(struct skcipher_request *req); | 182 | int crypto4xx_rfc3686_encrypt(struct skcipher_request *req); |
181 | int crypto4xx_rfc3686_decrypt(struct skcipher_request *req); | 183 | int crypto4xx_rfc3686_decrypt(struct skcipher_request *req); |
182 | int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); | 184 | int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); |
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index ba00e4563ca0..ff02cc05affb 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c | |||
@@ -6,8 +6,6 @@ | |||
6 | * Author: Tudor Ambarus <tudor.ambarus@microchip.com> | 6 | * Author: Tudor Ambarus <tudor.ambarus@microchip.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/bitrev.h> | ||
10 | #include <linux/crc16.h> | ||
11 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
12 | #include <linux/device.h> | 10 | #include <linux/device.h> |
13 | #include <linux/err.h> | 11 | #include <linux/err.h> |
@@ -23,42 +21,11 @@ | |||
23 | #include <crypto/internal/kpp.h> | 21 | #include <crypto/internal/kpp.h> |
24 | #include <crypto/ecdh.h> | 22 | #include <crypto/ecdh.h> |
25 | #include <crypto/kpp.h> | 23 | #include <crypto/kpp.h> |
26 | #include "atmel-ecc.h" | 24 | #include "atmel-i2c.h" |
27 | |||
28 | /* Used for binding tfm objects to i2c clients. */ | ||
29 | struct atmel_ecc_driver_data { | ||
30 | struct list_head i2c_client_list; | ||
31 | spinlock_t i2c_list_lock; | ||
32 | } ____cacheline_aligned; | ||
33 | 25 | ||
34 | static struct atmel_ecc_driver_data driver_data; | 26 | static struct atmel_ecc_driver_data driver_data; |
35 | 27 | ||
36 | /** | 28 | /** |
37 | * atmel_ecc_i2c_client_priv - i2c_client private data | ||
38 | * @client : pointer to i2c client device | ||
39 | * @i2c_client_list_node: part of i2c_client_list | ||
40 | * @lock : lock for sending i2c commands | ||
41 | * @wake_token : wake token array of zeros | ||
42 | * @wake_token_sz : size in bytes of the wake_token | ||
43 | * @tfm_count : number of active crypto transformations on i2c client | ||
44 | * | ||
45 | * Reads and writes from/to the i2c client are sequential. The first byte | ||
46 | * transmitted to the device is treated as the byte size. Any attempt to send | ||
47 | * more than this number of bytes will cause the device to not ACK those bytes. | ||
48 | * After the host writes a single command byte to the input buffer, reads are | ||
49 | * prohibited until after the device completes command execution. Use a mutex | ||
50 | * when sending i2c commands. | ||
51 | */ | ||
52 | struct atmel_ecc_i2c_client_priv { | ||
53 | struct i2c_client *client; | ||
54 | struct list_head i2c_client_list_node; | ||
55 | struct mutex lock; | ||
56 | u8 wake_token[WAKE_TOKEN_MAX_SIZE]; | ||
57 | size_t wake_token_sz; | ||
58 | atomic_t tfm_count ____cacheline_aligned; | ||
59 | }; | ||
60 | |||
61 | /** | ||
62 | * atmel_ecdh_ctx - transformation context | 29 | * atmel_ecdh_ctx - transformation context |
63 | * @client : pointer to i2c client device | 30 | * @client : pointer to i2c client device |
64 | * @fallback : used for unsupported curves or when user wants to use its own | 31 | * @fallback : used for unsupported curves or when user wants to use its own |
@@ -80,188 +47,12 @@ struct atmel_ecdh_ctx { | |||
80 | bool do_fallback; | 47 | bool do_fallback; |
81 | }; | 48 | }; |
82 | 49 | ||
83 | /** | 50 | static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, |
84 | * atmel_ecc_work_data - data structure representing the work | ||
85 | * @ctx : transformation context. | ||
86 | * @cbk : pointer to a callback function to be invoked upon completion of this | ||
87 | * request. This has the form: | ||
88 | * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status) | ||
89 | * where: | ||
90 | * @work_data: data structure representing the work | ||
91 | * @areq : optional pointer to an argument passed with the original | ||
92 | * request. | ||
93 | * @status : status returned from the i2c client device or i2c error. | ||
94 | * @areq: optional pointer to a user argument for use at callback time. | ||
95 | * @work: describes the task to be executed. | ||
96 | * @cmd : structure used for communicating with the device. | ||
97 | */ | ||
98 | struct atmel_ecc_work_data { | ||
99 | struct atmel_ecdh_ctx *ctx; | ||
100 | void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq, | ||
101 | int status); | ||
102 | void *areq; | ||
103 | struct work_struct work; | ||
104 | struct atmel_ecc_cmd cmd; | ||
105 | }; | ||
106 | |||
107 | static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len) | ||
108 | { | ||
109 | return cpu_to_le16(bitrev16(crc16(crc, buffer, len))); | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC. | ||
114 | * CRC16 verification of the count, opcode, param1, param2 and data bytes. | ||
115 | * The checksum is saved in little-endian format in the least significant | ||
116 | * two bytes of the command. CRC polynomial is 0x8005 and the initial register | ||
117 | * value should be zero. | ||
118 | * | ||
119 | * @cmd : structure used for communicating with the device. | ||
120 | */ | ||
121 | static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd) | ||
122 | { | ||
123 | u8 *data = &cmd->count; | ||
124 | size_t len = cmd->count - CRC_SIZE; | ||
125 | u16 *crc16 = (u16 *)(data + len); | ||
126 | |||
127 | *crc16 = atmel_ecc_crc16(0, data, len); | ||
128 | } | ||
129 | |||
130 | static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd) | ||
131 | { | ||
132 | cmd->word_addr = COMMAND; | ||
133 | cmd->opcode = OPCODE_READ; | ||
134 | /* | ||
135 | * Read the word from Configuration zone that contains the lock bytes | ||
136 | * (UserExtra, Selector, LockValue, LockConfig). | ||
137 | */ | ||
138 | cmd->param1 = CONFIG_ZONE; | ||
139 | cmd->param2 = DEVICE_LOCK_ADDR; | ||
140 | cmd->count = READ_COUNT; | ||
141 | |||
142 | atmel_ecc_checksum(cmd); | ||
143 | |||
144 | cmd->msecs = MAX_EXEC_TIME_READ; | ||
145 | cmd->rxsize = READ_RSP_SIZE; | ||
146 | } | ||
147 | |||
148 | static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid) | ||
149 | { | ||
150 | cmd->word_addr = COMMAND; | ||
151 | cmd->count = GENKEY_COUNT; | ||
152 | cmd->opcode = OPCODE_GENKEY; | ||
153 | cmd->param1 = GENKEY_MODE_PRIVATE; | ||
154 | /* a random private key will be generated and stored in slot keyID */ | ||
155 | cmd->param2 = cpu_to_le16(keyid); | ||
156 | |||
157 | atmel_ecc_checksum(cmd); | ||
158 | |||
159 | cmd->msecs = MAX_EXEC_TIME_GENKEY; | ||
160 | cmd->rxsize = GENKEY_RSP_SIZE; | ||
161 | } | ||
162 | |||
163 | static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd, | ||
164 | struct scatterlist *pubkey) | ||
165 | { | ||
166 | size_t copied; | ||
167 | |||
168 | cmd->word_addr = COMMAND; | ||
169 | cmd->count = ECDH_COUNT; | ||
170 | cmd->opcode = OPCODE_ECDH; | ||
171 | cmd->param1 = ECDH_PREFIX_MODE; | ||
172 | /* private key slot */ | ||
173 | cmd->param2 = cpu_to_le16(DATA_SLOT_2); | ||
174 | |||
175 | /* | ||
176 | * The device only supports NIST P256 ECC keys. The public key size will | ||
177 | * always be the same. Use a macro for the key size to avoid unnecessary | ||
178 | * computations. | ||
179 | */ | ||
180 | copied = sg_copy_to_buffer(pubkey, | ||
181 | sg_nents_for_len(pubkey, | ||
182 | ATMEL_ECC_PUBKEY_SIZE), | ||
183 | cmd->data, ATMEL_ECC_PUBKEY_SIZE); | ||
184 | if (copied != ATMEL_ECC_PUBKEY_SIZE) | ||
185 | return -EINVAL; | ||
186 | |||
187 | atmel_ecc_checksum(cmd); | ||
188 | |||
189 | cmd->msecs = MAX_EXEC_TIME_ECDH; | ||
190 | cmd->rxsize = ECDH_RSP_SIZE; | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * After wake and after execution of a command, there will be error, status, or | ||
197 | * result bytes in the device's output register that can be retrieved by the | ||
198 | * system. When the length of that group is four bytes, the codes returned are | ||
199 | * detailed in error_list. | ||
200 | */ | ||
201 | static int atmel_ecc_status(struct device *dev, u8 *status) | ||
202 | { | ||
203 | size_t err_list_len = ARRAY_SIZE(error_list); | ||
204 | int i; | ||
205 | u8 err_id = status[1]; | ||
206 | |||
207 | if (*status != STATUS_SIZE) | ||
208 | return 0; | ||
209 | |||
210 | if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR) | ||
211 | return 0; | ||
212 | |||
213 | for (i = 0; i < err_list_len; i++) | ||
214 | if (error_list[i].value == err_id) | ||
215 | break; | ||
216 | |||
217 | /* if err_id is not in the error_list then ignore it */ | ||
218 | if (i != err_list_len) { | ||
219 | dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text); | ||
220 | return err_id; | ||
221 | } | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int atmel_ecc_wakeup(struct i2c_client *client) | ||
227 | { | ||
228 | struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); | ||
229 | u8 status[STATUS_RSP_SIZE]; | ||
230 | int ret; | ||
231 | |||
232 | /* | ||
233 | * The device ignores any levels or transitions on the SCL pin when the | ||
234 | * device is idle, asleep or during waking up. Don't check for error | ||
235 | * when waking up the device. | ||
236 | */ | ||
237 | i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz); | ||
238 | |||
239 | /* | ||
240 | * Wait to wake the device. Typical execution times for ecdh and genkey | ||
241 | * are around tens of milliseconds. Delta is chosen to 50 microseconds. | ||
242 | */ | ||
243 | usleep_range(TWHI_MIN, TWHI_MAX); | ||
244 | |||
245 | ret = i2c_master_recv(client, status, STATUS_SIZE); | ||
246 | if (ret < 0) | ||
247 | return ret; | ||
248 | |||
249 | return atmel_ecc_status(&client->dev, status); | ||
250 | } | ||
251 | |||
252 | static int atmel_ecc_sleep(struct i2c_client *client) | ||
253 | { | ||
254 | u8 sleep = SLEEP_TOKEN; | ||
255 | |||
256 | return i2c_master_send(client, &sleep, 1); | ||
257 | } | ||
258 | |||
259 | static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq, | ||
260 | int status) | 51 | int status) |
261 | { | 52 | { |
262 | struct kpp_request *req = areq; | 53 | struct kpp_request *req = areq; |
263 | struct atmel_ecdh_ctx *ctx = work_data->ctx; | 54 | struct atmel_ecdh_ctx *ctx = work_data->ctx; |
264 | struct atmel_ecc_cmd *cmd = &work_data->cmd; | 55 | struct atmel_i2c_cmd *cmd = &work_data->cmd; |
265 | size_t copied, n_sz; | 56 | size_t copied, n_sz; |
266 | 57 | ||
267 | if (status) | 58 | if (status) |
@@ -282,82 +73,6 @@ free_work_data: | |||
282 | kpp_request_complete(req, status); | 73 | kpp_request_complete(req, status); |
283 | } | 74 | } |
284 | 75 | ||
285 | /* | ||
286 | * atmel_ecc_send_receive() - send a command to the device and receive its | ||
287 | * response. | ||
288 | * @client: i2c client device | ||
289 | * @cmd : structure used to communicate with the device | ||
290 | * | ||
291 | * After the device receives a Wake token, a watchdog counter starts within the | ||
292 | * device. After the watchdog timer expires, the device enters sleep mode | ||
293 | * regardless of whether some I/O transmission or command execution is in | ||
294 | * progress. If a command is attempted when insufficient time remains prior to | ||
295 | * watchdog timer execution, the device will return the watchdog timeout error | ||
296 | * code without attempting to execute the command. There is no way to reset the | ||
297 | * counter other than to put the device into sleep or idle mode and then | ||
298 | * wake it up again. | ||
299 | */ | ||
300 | static int atmel_ecc_send_receive(struct i2c_client *client, | ||
301 | struct atmel_ecc_cmd *cmd) | ||
302 | { | ||
303 | struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); | ||
304 | int ret; | ||
305 | |||
306 | mutex_lock(&i2c_priv->lock); | ||
307 | |||
308 | ret = atmel_ecc_wakeup(client); | ||
309 | if (ret) | ||
310 | goto err; | ||
311 | |||
312 | /* send the command */ | ||
313 | ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE); | ||
314 | if (ret < 0) | ||
315 | goto err; | ||
316 | |||
317 | /* delay the appropriate amount of time for command to execute */ | ||
318 | msleep(cmd->msecs); | ||
319 | |||
320 | /* receive the response */ | ||
321 | ret = i2c_master_recv(client, cmd->data, cmd->rxsize); | ||
322 | if (ret < 0) | ||
323 | goto err; | ||
324 | |||
325 | /* put the device into low-power mode */ | ||
326 | ret = atmel_ecc_sleep(client); | ||
327 | if (ret < 0) | ||
328 | goto err; | ||
329 | |||
330 | mutex_unlock(&i2c_priv->lock); | ||
331 | return atmel_ecc_status(&client->dev, cmd->data); | ||
332 | err: | ||
333 | mutex_unlock(&i2c_priv->lock); | ||
334 | return ret; | ||
335 | } | ||
336 | |||
337 | static void atmel_ecc_work_handler(struct work_struct *work) | ||
338 | { | ||
339 | struct atmel_ecc_work_data *work_data = | ||
340 | container_of(work, struct atmel_ecc_work_data, work); | ||
341 | struct atmel_ecc_cmd *cmd = &work_data->cmd; | ||
342 | struct i2c_client *client = work_data->ctx->client; | ||
343 | int status; | ||
344 | |||
345 | status = atmel_ecc_send_receive(client, cmd); | ||
346 | work_data->cbk(work_data, work_data->areq, status); | ||
347 | } | ||
348 | |||
349 | static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data, | ||
350 | void (*cbk)(struct atmel_ecc_work_data *work_data, | ||
351 | void *areq, int status), | ||
352 | void *areq) | ||
353 | { | ||
354 | work_data->cbk = (void *)cbk; | ||
355 | work_data->areq = areq; | ||
356 | |||
357 | INIT_WORK(&work_data->work, atmel_ecc_work_handler); | ||
358 | schedule_work(&work_data->work); | ||
359 | } | ||
360 | |||
361 | static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id) | 76 | static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id) |
362 | { | 77 | { |
363 | if (curve_id == ECC_CURVE_NIST_P256) | 78 | if (curve_id == ECC_CURVE_NIST_P256) |
@@ -374,7 +89,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, | |||
374 | unsigned int len) | 89 | unsigned int len) |
375 | { | 90 | { |
376 | struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); | 91 | struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); |
377 | struct atmel_ecc_cmd *cmd; | 92 | struct atmel_i2c_cmd *cmd; |
378 | void *public_key; | 93 | void *public_key; |
379 | struct ecdh params; | 94 | struct ecdh params; |
380 | int ret = -ENOMEM; | 95 | int ret = -ENOMEM; |
@@ -412,9 +127,9 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, | |||
412 | ctx->do_fallback = false; | 127 | ctx->do_fallback = false; |
413 | ctx->curve_id = params.curve_id; | 128 | ctx->curve_id = params.curve_id; |
414 | 129 | ||
415 | atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2); | 130 | atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2); |
416 | 131 | ||
417 | ret = atmel_ecc_send_receive(ctx->client, cmd); | 132 | ret = atmel_i2c_send_receive(ctx->client, cmd); |
418 | if (ret) | 133 | if (ret) |
419 | goto free_public_key; | 134 | goto free_public_key; |
420 | 135 | ||
@@ -444,6 +159,9 @@ static int atmel_ecdh_generate_public_key(struct kpp_request *req) | |||
444 | return crypto_kpp_generate_public_key(req); | 159 | return crypto_kpp_generate_public_key(req); |
445 | } | 160 | } |
446 | 161 | ||
162 | if (!ctx->public_key) | ||
163 | return -EINVAL; | ||
164 | |||
447 | /* might want less than we've got */ | 165 | /* might want less than we've got */ |
448 | nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len); | 166 | nbytes = min_t(size_t, ATMEL_ECC_PUBKEY_SIZE, req->dst_len); |
449 | 167 | ||
@@ -461,7 +179,7 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req) | |||
461 | { | 179 | { |
462 | struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); | 180 | struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); |
463 | struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); | 181 | struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); |
464 | struct atmel_ecc_work_data *work_data; | 182 | struct atmel_i2c_work_data *work_data; |
465 | gfp_t gfp; | 183 | gfp_t gfp; |
466 | int ret; | 184 | int ret; |
467 | 185 | ||
@@ -482,12 +200,13 @@ static int atmel_ecdh_compute_shared_secret(struct kpp_request *req) | |||
482 | return -ENOMEM; | 200 | return -ENOMEM; |
483 | 201 | ||
484 | work_data->ctx = ctx; | 202 | work_data->ctx = ctx; |
203 | work_data->client = ctx->client; | ||
485 | 204 | ||
486 | ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src); | 205 | ret = atmel_i2c_init_ecdh_cmd(&work_data->cmd, req->src); |
487 | if (ret) | 206 | if (ret) |
488 | goto free_work_data; | 207 | goto free_work_data; |
489 | 208 | ||
490 | atmel_ecc_enqueue(work_data, atmel_ecdh_done, req); | 209 | atmel_i2c_enqueue(work_data, atmel_ecdh_done, req); |
491 | 210 | ||
492 | return -EINPROGRESS; | 211 | return -EINPROGRESS; |
493 | 212 | ||
@@ -498,7 +217,7 @@ free_work_data: | |||
498 | 217 | ||
499 | static struct i2c_client *atmel_ecc_i2c_client_alloc(void) | 218 | static struct i2c_client *atmel_ecc_i2c_client_alloc(void) |
500 | { | 219 | { |
501 | struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL; | 220 | struct atmel_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL; |
502 | struct i2c_client *client = ERR_PTR(-ENODEV); | 221 | struct i2c_client *client = ERR_PTR(-ENODEV); |
503 | int min_tfm_cnt = INT_MAX; | 222 | int min_tfm_cnt = INT_MAX; |
504 | int tfm_cnt; | 223 | int tfm_cnt; |
@@ -533,7 +252,7 @@ static struct i2c_client *atmel_ecc_i2c_client_alloc(void) | |||
533 | 252 | ||
534 | static void atmel_ecc_i2c_client_free(struct i2c_client *client) | 253 | static void atmel_ecc_i2c_client_free(struct i2c_client *client) |
535 | { | 254 | { |
536 | struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); | 255 | struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); |
537 | 256 | ||
538 | atomic_dec(&i2c_priv->tfm_count); | 257 | atomic_dec(&i2c_priv->tfm_count); |
539 | } | 258 | } |
@@ -604,96 +323,18 @@ static struct kpp_alg atmel_ecdh = { | |||
604 | }, | 323 | }, |
605 | }; | 324 | }; |
606 | 325 | ||
607 | static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate) | ||
608 | { | ||
609 | u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC); | ||
610 | |||
611 | /* return the size of the wake_token in bytes */ | ||
612 | return DIV_ROUND_UP(no_of_bits, 8); | ||
613 | } | ||
614 | |||
615 | static int device_sanity_check(struct i2c_client *client) | ||
616 | { | ||
617 | struct atmel_ecc_cmd *cmd; | ||
618 | int ret; | ||
619 | |||
620 | cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); | ||
621 | if (!cmd) | ||
622 | return -ENOMEM; | ||
623 | |||
624 | atmel_ecc_init_read_cmd(cmd); | ||
625 | |||
626 | ret = atmel_ecc_send_receive(client, cmd); | ||
627 | if (ret) | ||
628 | goto free_cmd; | ||
629 | |||
630 | /* | ||
631 | * It is vital that the Configuration, Data and OTP zones be locked | ||
632 | * prior to release into the field of the system containing the device. | ||
633 | * Failure to lock these zones may permit modification of any secret | ||
634 | * keys and may lead to other security problems. | ||
635 | */ | ||
636 | if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) { | ||
637 | dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n"); | ||
638 | ret = -ENOTSUPP; | ||
639 | } | ||
640 | |||
641 | /* fall through */ | ||
642 | free_cmd: | ||
643 | kfree(cmd); | ||
644 | return ret; | ||
645 | } | ||
646 | |||
647 | static int atmel_ecc_probe(struct i2c_client *client, | 326 | static int atmel_ecc_probe(struct i2c_client *client, |
648 | const struct i2c_device_id *id) | 327 | const struct i2c_device_id *id) |
649 | { | 328 | { |
650 | struct atmel_ecc_i2c_client_priv *i2c_priv; | 329 | struct atmel_i2c_client_priv *i2c_priv; |
651 | struct device *dev = &client->dev; | ||
652 | int ret; | 330 | int ret; |
653 | u32 bus_clk_rate; | ||
654 | |||
655 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { | ||
656 | dev_err(dev, "I2C_FUNC_I2C not supported\n"); | ||
657 | return -ENODEV; | ||
658 | } | ||
659 | 331 | ||
660 | ret = of_property_read_u32(client->adapter->dev.of_node, | 332 | ret = atmel_i2c_probe(client, id); |
661 | "clock-frequency", &bus_clk_rate); | ||
662 | if (ret) { | ||
663 | dev_err(dev, "of: failed to read clock-frequency property\n"); | ||
664 | return ret; | ||
665 | } | ||
666 | |||
667 | if (bus_clk_rate > 1000000L) { | ||
668 | dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n", | ||
669 | bus_clk_rate); | ||
670 | return -EINVAL; | ||
671 | } | ||
672 | |||
673 | i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL); | ||
674 | if (!i2c_priv) | ||
675 | return -ENOMEM; | ||
676 | |||
677 | i2c_priv->client = client; | ||
678 | mutex_init(&i2c_priv->lock); | ||
679 | |||
680 | /* | ||
681 | * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate - | ||
682 | * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz | ||
683 | * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE. | ||
684 | */ | ||
685 | i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate); | ||
686 | |||
687 | memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token)); | ||
688 | |||
689 | atomic_set(&i2c_priv->tfm_count, 0); | ||
690 | |||
691 | i2c_set_clientdata(client, i2c_priv); | ||
692 | |||
693 | ret = device_sanity_check(client); | ||
694 | if (ret) | 333 | if (ret) |
695 | return ret; | 334 | return ret; |
696 | 335 | ||
336 | i2c_priv = i2c_get_clientdata(client); | ||
337 | |||
697 | spin_lock(&driver_data.i2c_list_lock); | 338 | spin_lock(&driver_data.i2c_list_lock); |
698 | list_add_tail(&i2c_priv->i2c_client_list_node, | 339 | list_add_tail(&i2c_priv->i2c_client_list_node, |
699 | &driver_data.i2c_client_list); | 340 | &driver_data.i2c_client_list); |
@@ -705,10 +346,10 @@ static int atmel_ecc_probe(struct i2c_client *client, | |||
705 | list_del(&i2c_priv->i2c_client_list_node); | 346 | list_del(&i2c_priv->i2c_client_list_node); |
706 | spin_unlock(&driver_data.i2c_list_lock); | 347 | spin_unlock(&driver_data.i2c_list_lock); |
707 | 348 | ||
708 | dev_err(dev, "%s alg registration failed\n", | 349 | dev_err(&client->dev, "%s alg registration failed\n", |
709 | atmel_ecdh.base.cra_driver_name); | 350 | atmel_ecdh.base.cra_driver_name); |
710 | } else { | 351 | } else { |
711 | dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n"); | 352 | dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n"); |
712 | } | 353 | } |
713 | 354 | ||
714 | return ret; | 355 | return ret; |
@@ -716,7 +357,7 @@ static int atmel_ecc_probe(struct i2c_client *client, | |||
716 | 357 | ||
717 | static int atmel_ecc_remove(struct i2c_client *client) | 358 | static int atmel_ecc_remove(struct i2c_client *client) |
718 | { | 359 | { |
719 | struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); | 360 | struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); |
720 | 361 | ||
721 | /* Return EBUSY if i2c client already allocated. */ | 362 | /* Return EBUSY if i2c client already allocated. */ |
722 | if (atomic_read(&i2c_priv->tfm_count)) { | 363 | if (atomic_read(&i2c_priv->tfm_count)) { |
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h deleted file mode 100644 index 643a3b947338..000000000000 --- a/drivers/crypto/atmel-ecc.h +++ /dev/null | |||
@@ -1,116 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright (c) 2017, Microchip Technology Inc. | ||
4 | * Author: Tudor Ambarus <tudor.ambarus@microchip.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef __ATMEL_ECC_H__ | ||
8 | #define __ATMEL_ECC_H__ | ||
9 | |||
10 | #define ATMEL_ECC_PRIORITY 300 | ||
11 | |||
12 | #define COMMAND 0x03 /* packet function */ | ||
13 | #define SLEEP_TOKEN 0x01 | ||
14 | #define WAKE_TOKEN_MAX_SIZE 8 | ||
15 | |||
16 | /* Definitions of Data and Command sizes */ | ||
17 | #define WORD_ADDR_SIZE 1 | ||
18 | #define COUNT_SIZE 1 | ||
19 | #define CRC_SIZE 2 | ||
20 | #define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE) | ||
21 | |||
22 | /* size in bytes of the n prime */ | ||
23 | #define ATMEL_ECC_NIST_P256_N_SIZE 32 | ||
24 | #define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE) | ||
25 | |||
26 | #define STATUS_RSP_SIZE 4 | ||
27 | #define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE) | ||
28 | #define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \ | ||
29 | CMD_OVERHEAD_SIZE) | ||
30 | #define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE) | ||
31 | #define MAX_RSP_SIZE GENKEY_RSP_SIZE | ||
32 | |||
33 | /** | ||
34 | * atmel_ecc_cmd - structure used for communicating with the device. | ||
35 | * @word_addr: indicates the function of the packet sent to the device. This | ||
36 | * byte should have a value of COMMAND for normal operation. | ||
37 | * @count : number of bytes to be transferred to (or from) the device. | ||
38 | * @opcode : the command code. | ||
39 | * @param1 : the first parameter; always present. | ||
40 | * @param2 : the second parameter; always present. | ||
41 | * @data : optional remaining input data. Includes a 2-byte CRC. | ||
42 | * @rxsize : size of the data received from i2c client. | ||
43 | * @msecs : command execution time in milliseconds | ||
44 | */ | ||
45 | struct atmel_ecc_cmd { | ||
46 | u8 word_addr; | ||
47 | u8 count; | ||
48 | u8 opcode; | ||
49 | u8 param1; | ||
50 | u16 param2; | ||
51 | u8 data[MAX_RSP_SIZE]; | ||
52 | u8 msecs; | ||
53 | u16 rxsize; | ||
54 | } __packed; | ||
55 | |||
56 | /* Status/Error codes */ | ||
57 | #define STATUS_SIZE 0x04 | ||
58 | #define STATUS_NOERR 0x00 | ||
59 | #define STATUS_WAKE_SUCCESSFUL 0x11 | ||
60 | |||
61 | static const struct { | ||
62 | u8 value; | ||
63 | const char *error_text; | ||
64 | } error_list[] = { | ||
65 | { 0x01, "CheckMac or Verify miscompare" }, | ||
66 | { 0x03, "Parse Error" }, | ||
67 | { 0x05, "ECC Fault" }, | ||
68 | { 0x0F, "Execution Error" }, | ||
69 | { 0xEE, "Watchdog about to expire" }, | ||
70 | { 0xFF, "CRC or other communication error" }, | ||
71 | }; | ||
72 | |||
73 | /* Definitions for eeprom organization */ | ||
74 | #define CONFIG_ZONE 0 | ||
75 | |||
76 | /* Definitions for Indexes common to all commands */ | ||
77 | #define RSP_DATA_IDX 1 /* buffer index of data in response */ | ||
78 | #define DATA_SLOT_2 2 /* used for ECDH private key */ | ||
79 | |||
80 | /* Definitions for the device lock state */ | ||
81 | #define DEVICE_LOCK_ADDR 0x15 | ||
82 | #define LOCK_VALUE_IDX (RSP_DATA_IDX + 2) | ||
83 | #define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3) | ||
84 | |||
85 | /* | ||
86 | * Wake High delay to data communication (microseconds). SDA should be stable | ||
87 | * high for this entire duration. | ||
88 | */ | ||
89 | #define TWHI_MIN 1500 | ||
90 | #define TWHI_MAX 1550 | ||
91 | |||
92 | /* Wake Low duration */ | ||
93 | #define TWLO_USEC 60 | ||
94 | |||
95 | /* Command execution time (milliseconds) */ | ||
96 | #define MAX_EXEC_TIME_ECDH 58 | ||
97 | #define MAX_EXEC_TIME_GENKEY 115 | ||
98 | #define MAX_EXEC_TIME_READ 1 | ||
99 | |||
100 | /* Command opcode */ | ||
101 | #define OPCODE_ECDH 0x43 | ||
102 | #define OPCODE_GENKEY 0x40 | ||
103 | #define OPCODE_READ 0x02 | ||
104 | |||
105 | /* Definitions for the READ Command */ | ||
106 | #define READ_COUNT 7 | ||
107 | |||
108 | /* Definitions for the GenKey Command */ | ||
109 | #define GENKEY_COUNT 7 | ||
110 | #define GENKEY_MODE_PRIVATE 0x04 | ||
111 | |||
112 | /* Definitions for the ECDH Command */ | ||
113 | #define ECDH_COUNT 71 | ||
114 | #define ECDH_PREFIX_MODE 0x00 | ||
115 | |||
116 | #endif /* __ATMEL_ECC_H__ */ | ||
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c new file mode 100644 index 000000000000..dc876fab2882 --- /dev/null +++ b/drivers/crypto/atmel-i2c.c | |||
@@ -0,0 +1,364 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Microchip / Atmel ECC (I2C) driver. | ||
4 | * | ||
5 | * Copyright (c) 2017, Microchip Technology Inc. | ||
6 | * Author: Tudor Ambarus <tudor.ambarus@microchip.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/bitrev.h> | ||
10 | #include <linux/crc16.h> | ||
11 | #include <linux/delay.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/i2c.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/workqueue.h> | ||
22 | #include "atmel-i2c.h" | ||
23 | |||
24 | /** | ||
25 | * atmel_i2c_checksum() - Generate 16-bit CRC as required by ATMEL ECC. | ||
26 | * CRC16 verification of the count, opcode, param1, param2 and data bytes. | ||
27 | * The checksum is saved in little-endian format in the least significant | ||
28 | * two bytes of the command. CRC polynomial is 0x8005 and the initial register | ||
29 | * value should be zero. | ||
30 | * | ||
31 | * @cmd : structure used for communicating with the device. | ||
32 | */ | ||
33 | static void atmel_i2c_checksum(struct atmel_i2c_cmd *cmd) | ||
34 | { | ||
35 | u8 *data = &cmd->count; | ||
36 | size_t len = cmd->count - CRC_SIZE; | ||
37 | __le16 *__crc16 = (__le16 *)(data + len); | ||
38 | |||
39 | *__crc16 = cpu_to_le16(bitrev16(crc16(0, data, len))); | ||
40 | } | ||
41 | |||
42 | void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd) | ||
43 | { | ||
44 | cmd->word_addr = COMMAND; | ||
45 | cmd->opcode = OPCODE_READ; | ||
46 | /* | ||
47 | * Read the word from Configuration zone that contains the lock bytes | ||
48 | * (UserExtra, Selector, LockValue, LockConfig). | ||
49 | */ | ||
50 | cmd->param1 = CONFIG_ZONE; | ||
51 | cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR); | ||
52 | cmd->count = READ_COUNT; | ||
53 | |||
54 | atmel_i2c_checksum(cmd); | ||
55 | |||
56 | cmd->msecs = MAX_EXEC_TIME_READ; | ||
57 | cmd->rxsize = READ_RSP_SIZE; | ||
58 | } | ||
59 | EXPORT_SYMBOL(atmel_i2c_init_read_cmd); | ||
60 | |||
61 | void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd) | ||
62 | { | ||
63 | cmd->word_addr = COMMAND; | ||
64 | cmd->opcode = OPCODE_RANDOM; | ||
65 | cmd->param1 = 0; | ||
66 | cmd->param2 = 0; | ||
67 | cmd->count = RANDOM_COUNT; | ||
68 | |||
69 | atmel_i2c_checksum(cmd); | ||
70 | |||
71 | cmd->msecs = MAX_EXEC_TIME_RANDOM; | ||
72 | cmd->rxsize = RANDOM_RSP_SIZE; | ||
73 | } | ||
74 | EXPORT_SYMBOL(atmel_i2c_init_random_cmd); | ||
75 | |||
76 | void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid) | ||
77 | { | ||
78 | cmd->word_addr = COMMAND; | ||
79 | cmd->count = GENKEY_COUNT; | ||
80 | cmd->opcode = OPCODE_GENKEY; | ||
81 | cmd->param1 = GENKEY_MODE_PRIVATE; | ||
82 | /* a random private key will be generated and stored in slot keyID */ | ||
83 | cmd->param2 = cpu_to_le16(keyid); | ||
84 | |||
85 | atmel_i2c_checksum(cmd); | ||
86 | |||
87 | cmd->msecs = MAX_EXEC_TIME_GENKEY; | ||
88 | cmd->rxsize = GENKEY_RSP_SIZE; | ||
89 | } | ||
90 | EXPORT_SYMBOL(atmel_i2c_init_genkey_cmd); | ||
91 | |||
92 | int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd, | ||
93 | struct scatterlist *pubkey) | ||
94 | { | ||
95 | size_t copied; | ||
96 | |||
97 | cmd->word_addr = COMMAND; | ||
98 | cmd->count = ECDH_COUNT; | ||
99 | cmd->opcode = OPCODE_ECDH; | ||
100 | cmd->param1 = ECDH_PREFIX_MODE; | ||
101 | /* private key slot */ | ||
102 | cmd->param2 = cpu_to_le16(DATA_SLOT_2); | ||
103 | |||
104 | /* | ||
105 | * The device only supports NIST P256 ECC keys. The public key size will | ||
106 | * always be the same. Use a macro for the key size to avoid unnecessary | ||
107 | * computations. | ||
108 | */ | ||
109 | copied = sg_copy_to_buffer(pubkey, | ||
110 | sg_nents_for_len(pubkey, | ||
111 | ATMEL_ECC_PUBKEY_SIZE), | ||
112 | cmd->data, ATMEL_ECC_PUBKEY_SIZE); | ||
113 | if (copied != ATMEL_ECC_PUBKEY_SIZE) | ||
114 | return -EINVAL; | ||
115 | |||
116 | atmel_i2c_checksum(cmd); | ||
117 | |||
118 | cmd->msecs = MAX_EXEC_TIME_ECDH; | ||
119 | cmd->rxsize = ECDH_RSP_SIZE; | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | EXPORT_SYMBOL(atmel_i2c_init_ecdh_cmd); | ||
124 | |||
125 | /* | ||
126 | * After wake and after execution of a command, there will be error, status, or | ||
127 | * result bytes in the device's output register that can be retrieved by the | ||
128 | * system. When the length of that group is four bytes, the codes returned are | ||
129 | * detailed in error_list. | ||
130 | */ | ||
131 | static int atmel_i2c_status(struct device *dev, u8 *status) | ||
132 | { | ||
133 | size_t err_list_len = ARRAY_SIZE(error_list); | ||
134 | int i; | ||
135 | u8 err_id = status[1]; | ||
136 | |||
137 | if (*status != STATUS_SIZE) | ||
138 | return 0; | ||
139 | |||
140 | if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR) | ||
141 | return 0; | ||
142 | |||
143 | for (i = 0; i < err_list_len; i++) | ||
144 | if (error_list[i].value == err_id) | ||
145 | break; | ||
146 | |||
147 | /* if err_id is not in the error_list then ignore it */ | ||
148 | if (i != err_list_len) { | ||
149 | dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text); | ||
150 | return err_id; | ||
151 | } | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static int atmel_i2c_wakeup(struct i2c_client *client) | ||
157 | { | ||
158 | struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); | ||
159 | u8 status[STATUS_RSP_SIZE]; | ||
160 | int ret; | ||
161 | |||
162 | /* | ||
163 | * The device ignores any levels or transitions on the SCL pin when the | ||
164 | * device is idle, asleep or during waking up. Don't check for error | ||
165 | * when waking up the device. | ||
166 | */ | ||
167 | i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz); | ||
168 | |||
169 | /* | ||
170 | * Wait to wake the device. Typical execution times for ecdh and genkey | ||
171 | * are around tens of milliseconds. Delta is chosen to 50 microseconds. | ||
172 | */ | ||
173 | usleep_range(TWHI_MIN, TWHI_MAX); | ||
174 | |||
175 | ret = i2c_master_recv(client, status, STATUS_SIZE); | ||
176 | if (ret < 0) | ||
177 | return ret; | ||
178 | |||
179 | return atmel_i2c_status(&client->dev, status); | ||
180 | } | ||
181 | |||
182 | static int atmel_i2c_sleep(struct i2c_client *client) | ||
183 | { | ||
184 | u8 sleep = SLEEP_TOKEN; | ||
185 | |||
186 | return i2c_master_send(client, &sleep, 1); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * atmel_i2c_send_receive() - send a command to the device and receive its | ||
191 | * response. | ||
192 | * @client: i2c client device | ||
193 | * @cmd : structure used to communicate with the device | ||
194 | * | ||
195 | * After the device receives a Wake token, a watchdog counter starts within the | ||
196 | * device. After the watchdog timer expires, the device enters sleep mode | ||
197 | * regardless of whether some I/O transmission or command execution is in | ||
198 | * progress. If a command is attempted when insufficient time remains prior to | ||
199 | * watchdog timer execution, the device will return the watchdog timeout error | ||
200 | * code without attempting to execute the command. There is no way to reset the | ||
201 | * counter other than to put the device into sleep or idle mode and then | ||
202 | * wake it up again. | ||
203 | */ | ||
204 | int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd) | ||
205 | { | ||
206 | struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); | ||
207 | int ret; | ||
208 | |||
209 | mutex_lock(&i2c_priv->lock); | ||
210 | |||
211 | ret = atmel_i2c_wakeup(client); | ||
212 | if (ret) | ||
213 | goto err; | ||
214 | |||
215 | /* send the command */ | ||
216 | ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE); | ||
217 | if (ret < 0) | ||
218 | goto err; | ||
219 | |||
220 | /* delay the appropriate amount of time for command to execute */ | ||
221 | msleep(cmd->msecs); | ||
222 | |||
223 | /* receive the response */ | ||
224 | ret = i2c_master_recv(client, cmd->data, cmd->rxsize); | ||
225 | if (ret < 0) | ||
226 | goto err; | ||
227 | |||
228 | /* put the device into low-power mode */ | ||
229 | ret = atmel_i2c_sleep(client); | ||
230 | if (ret < 0) | ||
231 | goto err; | ||
232 | |||
233 | mutex_unlock(&i2c_priv->lock); | ||
234 | return atmel_i2c_status(&client->dev, cmd->data); | ||
235 | err: | ||
236 | mutex_unlock(&i2c_priv->lock); | ||
237 | return ret; | ||
238 | } | ||
239 | EXPORT_SYMBOL(atmel_i2c_send_receive); | ||
240 | |||
241 | static void atmel_i2c_work_handler(struct work_struct *work) | ||
242 | { | ||
243 | struct atmel_i2c_work_data *work_data = | ||
244 | container_of(work, struct atmel_i2c_work_data, work); | ||
245 | struct atmel_i2c_cmd *cmd = &work_data->cmd; | ||
246 | struct i2c_client *client = work_data->client; | ||
247 | int status; | ||
248 | |||
249 | status = atmel_i2c_send_receive(client, cmd); | ||
250 | work_data->cbk(work_data, work_data->areq, status); | ||
251 | } | ||
252 | |||
253 | void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data, | ||
254 | void (*cbk)(struct atmel_i2c_work_data *work_data, | ||
255 | void *areq, int status), | ||
256 | void *areq) | ||
257 | { | ||
258 | work_data->cbk = (void *)cbk; | ||
259 | work_data->areq = areq; | ||
260 | |||
261 | INIT_WORK(&work_data->work, atmel_i2c_work_handler); | ||
262 | schedule_work(&work_data->work); | ||
263 | } | ||
264 | EXPORT_SYMBOL(atmel_i2c_enqueue); | ||
265 | |||
266 | static inline size_t atmel_i2c_wake_token_sz(u32 bus_clk_rate) | ||
267 | { | ||
268 | u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC); | ||
269 | |||
270 | /* return the size of the wake_token in bytes */ | ||
271 | return DIV_ROUND_UP(no_of_bits, 8); | ||
272 | } | ||
273 | |||
274 | static int device_sanity_check(struct i2c_client *client) | ||
275 | { | ||
276 | struct atmel_i2c_cmd *cmd; | ||
277 | int ret; | ||
278 | |||
279 | cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); | ||
280 | if (!cmd) | ||
281 | return -ENOMEM; | ||
282 | |||
283 | atmel_i2c_init_read_cmd(cmd); | ||
284 | |||
285 | ret = atmel_i2c_send_receive(client, cmd); | ||
286 | if (ret) | ||
287 | goto free_cmd; | ||
288 | |||
289 | /* | ||
290 | * It is vital that the Configuration, Data and OTP zones be locked | ||
291 | * prior to release into the field of the system containing the device. | ||
292 | * Failure to lock these zones may permit modification of any secret | ||
293 | * keys and may lead to other security problems. | ||
294 | */ | ||
295 | if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) { | ||
296 | dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n"); | ||
297 | ret = -ENOTSUPP; | ||
298 | } | ||
299 | |||
300 | /* fall through */ | ||
301 | free_cmd: | ||
302 | kfree(cmd); | ||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) | ||
307 | { | ||
308 | struct atmel_i2c_client_priv *i2c_priv; | ||
309 | struct device *dev = &client->dev; | ||
310 | int ret; | ||
311 | u32 bus_clk_rate; | ||
312 | |||
313 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { | ||
314 | dev_err(dev, "I2C_FUNC_I2C not supported\n"); | ||
315 | return -ENODEV; | ||
316 | } | ||
317 | |||
318 | bus_clk_rate = i2c_acpi_find_bus_speed(&client->adapter->dev); | ||
319 | if (!bus_clk_rate) { | ||
320 | ret = device_property_read_u32(&client->adapter->dev, | ||
321 | "clock-frequency", &bus_clk_rate); | ||
322 | if (ret) { | ||
323 | dev_err(dev, "failed to read clock-frequency property\n"); | ||
324 | return ret; | ||
325 | } | ||
326 | } | ||
327 | |||
328 | if (bus_clk_rate > 1000000L) { | ||
329 | dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n", | ||
330 | bus_clk_rate); | ||
331 | return -EINVAL; | ||
332 | } | ||
333 | |||
334 | i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL); | ||
335 | if (!i2c_priv) | ||
336 | return -ENOMEM; | ||
337 | |||
338 | i2c_priv->client = client; | ||
339 | mutex_init(&i2c_priv->lock); | ||
340 | |||
341 | /* | ||
342 | * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate - | ||
343 | * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz | ||
344 | * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE. | ||
345 | */ | ||
346 | i2c_priv->wake_token_sz = atmel_i2c_wake_token_sz(bus_clk_rate); | ||
347 | |||
348 | memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token)); | ||
349 | |||
350 | atomic_set(&i2c_priv->tfm_count, 0); | ||
351 | |||
352 | i2c_set_clientdata(client, i2c_priv); | ||
353 | |||
354 | ret = device_sanity_check(client); | ||
355 | if (ret) | ||
356 | return ret; | ||
357 | |||
358 | return 0; | ||
359 | } | ||
360 | EXPORT_SYMBOL(atmel_i2c_probe); | ||
361 | |||
362 | MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>"); | ||
363 | MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver"); | ||
364 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h new file mode 100644 index 000000000000..21860b99c3e3 --- /dev/null +++ b/drivers/crypto/atmel-i2c.h | |||
@@ -0,0 +1,197 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright (c) 2017, Microchip Technology Inc. | ||
4 | * Author: Tudor Ambarus <tudor.ambarus@microchip.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef __ATMEL_I2C_H__ | ||
8 | #define __ATMEL_I2C_H__ | ||
9 | |||
10 | #include <linux/hw_random.h> | ||
11 | #include <linux/types.h> | ||
12 | |||
13 | #define ATMEL_ECC_PRIORITY 300 | ||
14 | |||
15 | #define COMMAND 0x03 /* packet function */ | ||
16 | #define SLEEP_TOKEN 0x01 | ||
17 | #define WAKE_TOKEN_MAX_SIZE 8 | ||
18 | |||
19 | /* Definitions of Data and Command sizes */ | ||
20 | #define WORD_ADDR_SIZE 1 | ||
21 | #define COUNT_SIZE 1 | ||
22 | #define CRC_SIZE 2 | ||
23 | #define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE) | ||
24 | |||
25 | /* size in bytes of the n prime */ | ||
26 | #define ATMEL_ECC_NIST_P256_N_SIZE 32 | ||
27 | #define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE) | ||
28 | |||
29 | #define STATUS_RSP_SIZE 4 | ||
30 | #define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE) | ||
31 | #define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \ | ||
32 | CMD_OVERHEAD_SIZE) | ||
33 | #define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE) | ||
34 | #define RANDOM_RSP_SIZE (32 + CMD_OVERHEAD_SIZE) | ||
35 | #define MAX_RSP_SIZE GENKEY_RSP_SIZE | ||
36 | |||
37 | /** | ||
38 | * atmel_i2c_cmd - structure used for communicating with the device. | ||
39 | * @word_addr: indicates the function of the packet sent to the device. This | ||
40 | * byte should have a value of COMMAND for normal operation. | ||
41 | * @count : number of bytes to be transferred to (or from) the device. | ||
42 | * @opcode : the command code. | ||
43 | * @param1 : the first parameter; always present. | ||
44 | * @param2 : the second parameter; always present. | ||
45 | * @data : optional remaining input data. Includes a 2-byte CRC. | ||
46 | * @rxsize : size of the data received from i2c client. | ||
47 | * @msecs : command execution time in milliseconds | ||
48 | */ | ||
49 | struct atmel_i2c_cmd { | ||
50 | u8 word_addr; | ||
51 | u8 count; | ||
52 | u8 opcode; | ||
53 | u8 param1; | ||
54 | __le16 param2; | ||
55 | u8 data[MAX_RSP_SIZE]; | ||
56 | u8 msecs; | ||
57 | u16 rxsize; | ||
58 | } __packed; | ||
59 | |||
60 | /* Status/Error codes */ | ||
61 | #define STATUS_SIZE 0x04 | ||
62 | #define STATUS_NOERR 0x00 | ||
63 | #define STATUS_WAKE_SUCCESSFUL 0x11 | ||
64 | |||
65 | static const struct { | ||
66 | u8 value; | ||
67 | const char *error_text; | ||
68 | } error_list[] = { | ||
69 | { 0x01, "CheckMac or Verify miscompare" }, | ||
70 | { 0x03, "Parse Error" }, | ||
71 | { 0x05, "ECC Fault" }, | ||
72 | { 0x0F, "Execution Error" }, | ||
73 | { 0xEE, "Watchdog about to expire" }, | ||
74 | { 0xFF, "CRC or other communication error" }, | ||
75 | }; | ||
76 | |||
77 | /* Definitions for eeprom organization */ | ||
78 | #define CONFIG_ZONE 0 | ||
79 | |||
80 | /* Definitions for Indexes common to all commands */ | ||
81 | #define RSP_DATA_IDX 1 /* buffer index of data in response */ | ||
82 | #define DATA_SLOT_2 2 /* used for ECDH private key */ | ||
83 | |||
84 | /* Definitions for the device lock state */ | ||
85 | #define DEVICE_LOCK_ADDR 0x15 | ||
86 | #define LOCK_VALUE_IDX (RSP_DATA_IDX + 2) | ||
87 | #define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3) | ||
88 | |||
89 | /* | ||
90 | * Wake High delay to data communication (microseconds). SDA should be stable | ||
91 | * high for this entire duration. | ||
92 | */ | ||
93 | #define TWHI_MIN 1500 | ||
94 | #define TWHI_MAX 1550 | ||
95 | |||
96 | /* Wake Low duration */ | ||
97 | #define TWLO_USEC 60 | ||
98 | |||
99 | /* Command execution time (milliseconds) */ | ||
100 | #define MAX_EXEC_TIME_ECDH 58 | ||
101 | #define MAX_EXEC_TIME_GENKEY 115 | ||
102 | #define MAX_EXEC_TIME_READ 1 | ||
103 | #define MAX_EXEC_TIME_RANDOM 50 | ||
104 | |||
105 | /* Command opcode */ | ||
106 | #define OPCODE_ECDH 0x43 | ||
107 | #define OPCODE_GENKEY 0x40 | ||
108 | #define OPCODE_READ 0x02 | ||
109 | #define OPCODE_RANDOM 0x1b | ||
110 | |||
111 | /* Definitions for the READ Command */ | ||
112 | #define READ_COUNT 7 | ||
113 | |||
114 | /* Definitions for the RANDOM Command */ | ||
115 | #define RANDOM_COUNT 7 | ||
116 | |||
117 | /* Definitions for the GenKey Command */ | ||
118 | #define GENKEY_COUNT 7 | ||
119 | #define GENKEY_MODE_PRIVATE 0x04 | ||
120 | |||
121 | /* Definitions for the ECDH Command */ | ||
122 | #define ECDH_COUNT 71 | ||
123 | #define ECDH_PREFIX_MODE 0x00 | ||
124 | |||
125 | /* Used for binding tfm objects to i2c clients. */ | ||
126 | struct atmel_ecc_driver_data { | ||
127 | struct list_head i2c_client_list; | ||
128 | spinlock_t i2c_list_lock; | ||
129 | } ____cacheline_aligned; | ||
130 | |||
131 | /** | ||
132 | * atmel_i2c_client_priv - i2c_client private data | ||
133 | * @client : pointer to i2c client device | ||
134 | * @i2c_client_list_node: part of i2c_client_list | ||
135 | * @lock : lock for sending i2c commands | ||
136 | * @wake_token : wake token array of zeros | ||
137 | * @wake_token_sz : size in bytes of the wake_token | ||
138 | * @tfm_count : number of active crypto transformations on i2c client | ||
139 | * | ||
140 | * Reads and writes from/to the i2c client are sequential. The first byte | ||
141 | * transmitted to the device is treated as the byte size. Any attempt to send | ||
142 | * more than this number of bytes will cause the device to not ACK those bytes. | ||
143 | * After the host writes a single command byte to the input buffer, reads are | ||
144 | * prohibited until after the device completes command execution. Use a mutex | ||
145 | * when sending i2c commands. | ||
146 | */ | ||
147 | struct atmel_i2c_client_priv { | ||
148 | struct i2c_client *client; | ||
149 | struct list_head i2c_client_list_node; | ||
150 | struct mutex lock; | ||
151 | u8 wake_token[WAKE_TOKEN_MAX_SIZE]; | ||
152 | size_t wake_token_sz; | ||
153 | atomic_t tfm_count ____cacheline_aligned; | ||
154 | struct hwrng hwrng; | ||
155 | }; | ||
156 | |||
157 | /** | ||
158 | * atmel_i2c_work_data - data structure representing the work | ||
159 | * @ctx : transformation context. | ||
160 | * @cbk : pointer to a callback function to be invoked upon completion of this | ||
161 | * request. This has the form: | ||
162 | * callback(struct atmel_i2c_work_data *work_data, void *areq, u8 status) | ||
163 | * where: | ||
164 | * @work_data: data structure representing the work | ||
165 | * @areq : optional pointer to an argument passed with the original | ||
166 | * request. | ||
167 | * @status : status returned from the i2c client device or i2c error. | ||
168 | * @areq: optional pointer to a user argument for use at callback time. | ||
169 | * @work: describes the task to be executed. | ||
170 | * @cmd : structure used for communicating with the device. | ||
171 | */ | ||
172 | struct atmel_i2c_work_data { | ||
173 | void *ctx; | ||
174 | struct i2c_client *client; | ||
175 | void (*cbk)(struct atmel_i2c_work_data *work_data, void *areq, | ||
176 | int status); | ||
177 | void *areq; | ||
178 | struct work_struct work; | ||
179 | struct atmel_i2c_cmd cmd; | ||
180 | }; | ||
181 | |||
182 | int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id); | ||
183 | |||
184 | void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data, | ||
185 | void (*cbk)(struct atmel_i2c_work_data *work_data, | ||
186 | void *areq, int status), | ||
187 | void *areq); | ||
188 | |||
189 | int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd); | ||
190 | |||
191 | void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd); | ||
192 | void atmel_i2c_init_random_cmd(struct atmel_i2c_cmd *cmd); | ||
193 | void atmel_i2c_init_genkey_cmd(struct atmel_i2c_cmd *cmd, u16 keyid); | ||
194 | int atmel_i2c_init_ecdh_cmd(struct atmel_i2c_cmd *cmd, | ||
195 | struct scatterlist *pubkey); | ||
196 | |||
197 | #endif /* __ATMEL_I2C_H__ */ | ||
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c new file mode 100644 index 000000000000..ea0d2068ea4f --- /dev/null +++ b/drivers/crypto/atmel-sha204a.c | |||
@@ -0,0 +1,171 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Microchip / Atmel SHA204A (I2C) driver. | ||
4 | * | ||
5 | * Copyright (c) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org> | ||
6 | */ | ||
7 | |||
8 | #include <linux/delay.h> | ||
9 | #include <linux/device.h> | ||
10 | #include <linux/err.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/i2c.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/scatterlist.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/workqueue.h> | ||
19 | #include "atmel-i2c.h" | ||
20 | |||
21 | static void atmel_sha204a_rng_done(struct atmel_i2c_work_data *work_data, | ||
22 | void *areq, int status) | ||
23 | { | ||
24 | struct atmel_i2c_client_priv *i2c_priv = work_data->ctx; | ||
25 | struct hwrng *rng = areq; | ||
26 | |||
27 | if (status) | ||
28 | dev_warn_ratelimited(&i2c_priv->client->dev, | ||
29 | "i2c transaction failed (%d)\n", | ||
30 | status); | ||
31 | |||
32 | rng->priv = (unsigned long)work_data; | ||
33 | atomic_dec(&i2c_priv->tfm_count); | ||
34 | } | ||
35 | |||
36 | static int atmel_sha204a_rng_read_nonblocking(struct hwrng *rng, void *data, | ||
37 | size_t max) | ||
38 | { | ||
39 | struct atmel_i2c_client_priv *i2c_priv; | ||
40 | struct atmel_i2c_work_data *work_data; | ||
41 | |||
42 | i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng); | ||
43 | |||
44 | /* keep maximum 1 asynchronous read in flight at any time */ | ||
45 | if (!atomic_add_unless(&i2c_priv->tfm_count, 1, 1)) | ||
46 | return 0; | ||
47 | |||
48 | if (rng->priv) { | ||
49 | work_data = (struct atmel_i2c_work_data *)rng->priv; | ||
50 | max = min(sizeof(work_data->cmd.data), max); | ||
51 | memcpy(data, &work_data->cmd.data, max); | ||
52 | rng->priv = 0; | ||
53 | } else { | ||
54 | work_data = kmalloc(sizeof(*work_data), GFP_ATOMIC); | ||
55 | if (!work_data) | ||
56 | return -ENOMEM; | ||
57 | |||
58 | work_data->ctx = i2c_priv; | ||
59 | work_data->client = i2c_priv->client; | ||
60 | |||
61 | max = 0; | ||
62 | } | ||
63 | |||
64 | atmel_i2c_init_random_cmd(&work_data->cmd); | ||
65 | atmel_i2c_enqueue(work_data, atmel_sha204a_rng_done, rng); | ||
66 | |||
67 | return max; | ||
68 | } | ||
69 | |||
70 | static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max, | ||
71 | bool wait) | ||
72 | { | ||
73 | struct atmel_i2c_client_priv *i2c_priv; | ||
74 | struct atmel_i2c_cmd cmd; | ||
75 | int ret; | ||
76 | |||
77 | if (!wait) | ||
78 | return atmel_sha204a_rng_read_nonblocking(rng, data, max); | ||
79 | |||
80 | i2c_priv = container_of(rng, struct atmel_i2c_client_priv, hwrng); | ||
81 | |||
82 | atmel_i2c_init_random_cmd(&cmd); | ||
83 | |||
84 | ret = atmel_i2c_send_receive(i2c_priv->client, &cmd); | ||
85 | if (ret) | ||
86 | return ret; | ||
87 | |||
88 | max = min(sizeof(cmd.data), max); | ||
89 | memcpy(data, cmd.data, max); | ||
90 | |||
91 | return max; | ||
92 | } | ||
93 | |||
94 | static int atmel_sha204a_probe(struct i2c_client *client, | ||
95 | const struct i2c_device_id *id) | ||
96 | { | ||
97 | struct atmel_i2c_client_priv *i2c_priv; | ||
98 | int ret; | ||
99 | |||
100 | ret = atmel_i2c_probe(client, id); | ||
101 | if (ret) | ||
102 | return ret; | ||
103 | |||
104 | i2c_priv = i2c_get_clientdata(client); | ||
105 | |||
106 | memset(&i2c_priv->hwrng, 0, sizeof(i2c_priv->hwrng)); | ||
107 | |||
108 | i2c_priv->hwrng.name = dev_name(&client->dev); | ||
109 | i2c_priv->hwrng.read = atmel_sha204a_rng_read; | ||
110 | i2c_priv->hwrng.quality = 1024; | ||
111 | |||
112 | ret = hwrng_register(&i2c_priv->hwrng); | ||
113 | if (ret) | ||
114 | dev_warn(&client->dev, "failed to register RNG (%d)\n", ret); | ||
115 | |||
116 | return ret; | ||
117 | } | ||
118 | |||
119 | static int atmel_sha204a_remove(struct i2c_client *client) | ||
120 | { | ||
121 | struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); | ||
122 | |||
123 | if (atomic_read(&i2c_priv->tfm_count)) { | ||
124 | dev_err(&client->dev, "Device is busy\n"); | ||
125 | return -EBUSY; | ||
126 | } | ||
127 | |||
128 | if (i2c_priv->hwrng.priv) | ||
129 | kfree((void *)i2c_priv->hwrng.priv); | ||
130 | hwrng_unregister(&i2c_priv->hwrng); | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static const struct of_device_id atmel_sha204a_dt_ids[] = { | ||
136 | { .compatible = "atmel,atsha204a", }, | ||
137 | { /* sentinel */ } | ||
138 | }; | ||
139 | MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids); | ||
140 | |||
141 | static const struct i2c_device_id atmel_sha204a_id[] = { | ||
142 | { "atsha204a", 0 }, | ||
143 | { /* sentinel */ } | ||
144 | }; | ||
145 | MODULE_DEVICE_TABLE(i2c, atmel_sha204a_id); | ||
146 | |||
147 | static struct i2c_driver atmel_sha204a_driver = { | ||
148 | .probe = atmel_sha204a_probe, | ||
149 | .remove = atmel_sha204a_remove, | ||
150 | .id_table = atmel_sha204a_id, | ||
151 | |||
152 | .driver.name = "atmel-sha204a", | ||
153 | .driver.of_match_table = of_match_ptr(atmel_sha204a_dt_ids), | ||
154 | }; | ||
155 | |||
156 | static int __init atmel_sha204a_init(void) | ||
157 | { | ||
158 | return i2c_add_driver(&atmel_sha204a_driver); | ||
159 | } | ||
160 | |||
161 | static void __exit atmel_sha204a_exit(void) | ||
162 | { | ||
163 | flush_scheduled_work(); | ||
164 | i2c_del_driver(&atmel_sha204a_driver); | ||
165 | } | ||
166 | |||
167 | module_init(atmel_sha204a_init); | ||
168 | module_exit(atmel_sha204a_exit); | ||
169 | |||
170 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | ||
171 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 18410c9e7b29..869602fcfd96 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c | |||
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos"); | |||
85 | * 0x70 - ring 2 | 85 | * 0x70 - ring 2 |
86 | * 0x78 - ring 3 | 86 | * 0x78 - ring 3 |
87 | */ | 87 | */ |
88 | char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 }; | 88 | static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 }; |
89 | /* | 89 | /* |
90 | * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN | 90 | * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN |
91 | * is set dynamically after reading SPU type from device tree. | 91 | * is set dynamically after reading SPU type from device tree. |
@@ -2083,7 +2083,7 @@ static int __ahash_init(struct ahash_request *req) | |||
2083 | * Return: true if incremental hashing is not supported | 2083 | * Return: true if incremental hashing is not supported |
2084 | * false otherwise | 2084 | * false otherwise |
2085 | */ | 2085 | */ |
2086 | bool spu_no_incr_hash(struct iproc_ctx_s *ctx) | 2086 | static bool spu_no_incr_hash(struct iproc_ctx_s *ctx) |
2087 | { | 2087 | { |
2088 | struct spu_hw *spu = &iproc_priv.spu; | 2088 | struct spu_hw *spu = &iproc_priv.spu; |
2089 | 2089 | ||
@@ -4809,7 +4809,7 @@ static int spu_dt_read(struct platform_device *pdev) | |||
4809 | return 0; | 4809 | return 0; |
4810 | } | 4810 | } |
4811 | 4811 | ||
4812 | int bcm_spu_probe(struct platform_device *pdev) | 4812 | static int bcm_spu_probe(struct platform_device *pdev) |
4813 | { | 4813 | { |
4814 | struct device *dev = &pdev->dev; | 4814 | struct device *dev = &pdev->dev; |
4815 | struct spu_hw *spu = &iproc_priv.spu; | 4815 | struct spu_hw *spu = &iproc_priv.spu; |
@@ -4853,7 +4853,7 @@ failure: | |||
4853 | return err; | 4853 | return err; |
4854 | } | 4854 | } |
4855 | 4855 | ||
4856 | int bcm_spu_remove(struct platform_device *pdev) | 4856 | static int bcm_spu_remove(struct platform_device *pdev) |
4857 | { | 4857 | { |
4858 | int i; | 4858 | int i; |
4859 | struct device *dev = &pdev->dev; | 4859 | struct device *dev = &pdev->dev; |
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index cb477259a2e2..2add51024575 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c | |||
@@ -38,21 +38,21 @@ enum spu2_proto_sel { | |||
38 | SPU2_DTLS_AEAD = 10 | 38 | SPU2_DTLS_AEAD = 10 |
39 | }; | 39 | }; |
40 | 40 | ||
41 | char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256", | 41 | static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256", |
42 | "DES", "3DES" | 42 | "DES", "3DES" |
43 | }; | 43 | }; |
44 | 44 | ||
45 | char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS", | 45 | static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", |
46 | "CCM", "GCM" | 46 | "XTS", "CCM", "GCM" |
47 | }; | 47 | }; |
48 | 48 | ||
49 | char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256", | 49 | static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256", |
50 | "Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384", | 50 | "Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384", |
51 | "SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256", | 51 | "SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256", |
52 | "SHA3-384", "SHA3-512" | 52 | "SHA3-384", "SHA3-512" |
53 | }; | 53 | }; |
54 | 54 | ||
55 | char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC", | 55 | static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC", |
56 | "Rabin", "CCM", "GCM", "Reserved" | 56 | "Rabin", "CCM", "GCM", "Reserved" |
57 | }; | 57 | }; |
58 | 58 | ||
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index 577c9844b322..3720ddabb507 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
@@ -2,6 +2,12 @@ | |||
2 | config CRYPTO_DEV_FSL_CAAM_COMMON | 2 | config CRYPTO_DEV_FSL_CAAM_COMMON |
3 | tristate | 3 | tristate |
4 | 4 | ||
5 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
6 | tristate | ||
7 | |||
8 | config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC | ||
9 | tristate | ||
10 | |||
5 | config CRYPTO_DEV_FSL_CAAM | 11 | config CRYPTO_DEV_FSL_CAAM |
6 | tristate "Freescale CAAM-Multicore platform driver backend" | 12 | tristate "Freescale CAAM-Multicore platform driver backend" |
7 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE | 13 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE |
@@ -25,7 +31,7 @@ config CRYPTO_DEV_FSL_CAAM_DEBUG | |||
25 | Selecting this will enable printing of various debug | 31 | Selecting this will enable printing of various debug |
26 | information in the CAAM driver. | 32 | information in the CAAM driver. |
27 | 33 | ||
28 | config CRYPTO_DEV_FSL_CAAM_JR | 34 | menuconfig CRYPTO_DEV_FSL_CAAM_JR |
29 | tristate "Freescale CAAM Job Ring driver backend" | 35 | tristate "Freescale CAAM Job Ring driver backend" |
30 | default y | 36 | default y |
31 | help | 37 | help |
@@ -86,8 +92,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD | |||
86 | threshold. Range is 1-65535. | 92 | threshold. Range is 1-65535. |
87 | 93 | ||
88 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | 94 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API |
89 | tristate "Register algorithm implementations with the Crypto API" | 95 | bool "Register algorithm implementations with the Crypto API" |
90 | default y | 96 | default y |
97 | select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
91 | select CRYPTO_AEAD | 98 | select CRYPTO_AEAD |
92 | select CRYPTO_AUTHENC | 99 | select CRYPTO_AUTHENC |
93 | select CRYPTO_BLKCIPHER | 100 | select CRYPTO_BLKCIPHER |
@@ -97,13 +104,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | |||
97 | scatterlist crypto API (such as the linux native IPSec | 104 | scatterlist crypto API (such as the linux native IPSec |
98 | stack) to the SEC4 via job ring. | 105 | stack) to the SEC4 via job ring. |
99 | 106 | ||
100 | To compile this as a module, choose M here: the module | ||
101 | will be called caamalg. | ||
102 | |||
103 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI | 107 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI |
104 | tristate "Queue Interface as Crypto API backend" | 108 | bool "Queue Interface as Crypto API backend" |
105 | depends on FSL_DPAA && NET | 109 | depends on FSL_DPAA && NET |
106 | default y | 110 | default y |
111 | select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
107 | select CRYPTO_AUTHENC | 112 | select CRYPTO_AUTHENC |
108 | select CRYPTO_BLKCIPHER | 113 | select CRYPTO_BLKCIPHER |
109 | help | 114 | help |
@@ -114,33 +119,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI | |||
114 | assigned to the kernel should also be more than the number of | 119 | assigned to the kernel should also be more than the number of |
115 | job rings. | 120 | job rings. |
116 | 121 | ||
117 | To compile this as a module, choose M here: the module | ||
118 | will be called caamalg_qi. | ||
119 | |||
120 | config CRYPTO_DEV_FSL_CAAM_AHASH_API | 122 | config CRYPTO_DEV_FSL_CAAM_AHASH_API |
121 | tristate "Register hash algorithm implementations with Crypto API" | 123 | bool "Register hash algorithm implementations with Crypto API" |
122 | default y | 124 | default y |
125 | select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC | ||
123 | select CRYPTO_HASH | 126 | select CRYPTO_HASH |
124 | help | 127 | help |
125 | Selecting this will offload ahash for users of the | 128 | Selecting this will offload ahash for users of the |
126 | scatterlist crypto API to the SEC4 via job ring. | 129 | scatterlist crypto API to the SEC4 via job ring. |
127 | 130 | ||
128 | To compile this as a module, choose M here: the module | ||
129 | will be called caamhash. | ||
130 | |||
131 | config CRYPTO_DEV_FSL_CAAM_PKC_API | 131 | config CRYPTO_DEV_FSL_CAAM_PKC_API |
132 | tristate "Register public key cryptography implementations with Crypto API" | 132 | bool "Register public key cryptography implementations with Crypto API" |
133 | default y | 133 | default y |
134 | select CRYPTO_RSA | 134 | select CRYPTO_RSA |
135 | help | 135 | help |
136 | Selecting this will allow SEC Public key support for RSA. | 136 | Selecting this will allow SEC Public key support for RSA. |
137 | Supported cryptographic primitives: encryption, decryption, | 137 | Supported cryptographic primitives: encryption, decryption, |
138 | signature and verification. | 138 | signature and verification. |
139 | To compile this as a module, choose M here: the module | ||
140 | will be called caam_pkc. | ||
141 | 139 | ||
142 | config CRYPTO_DEV_FSL_CAAM_RNG_API | 140 | config CRYPTO_DEV_FSL_CAAM_RNG_API |
143 | tristate "Register caam device for hwrng API" | 141 | bool "Register caam device for hwrng API" |
144 | default y | 142 | default y |
145 | select CRYPTO_RNG | 143 | select CRYPTO_RNG |
146 | select HW_RANDOM | 144 | select HW_RANDOM |
@@ -148,9 +146,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API | |||
148 | Selecting this will register the SEC4 hardware rng to | 146 | Selecting this will register the SEC4 hardware rng to |
149 | the hw_random API for suppying the kernel entropy pool. | 147 | the hw_random API for suppying the kernel entropy pool. |
150 | 148 | ||
151 | To compile this as a module, choose M here: the module | ||
152 | will be called caamrng. | ||
153 | |||
154 | endif # CRYPTO_DEV_FSL_CAAM_JR | 149 | endif # CRYPTO_DEV_FSL_CAAM_JR |
155 | 150 | ||
156 | endif # CRYPTO_DEV_FSL_CAAM | 151 | endif # CRYPTO_DEV_FSL_CAAM |
@@ -160,6 +155,8 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM | |||
160 | depends on FSL_MC_DPIO | 155 | depends on FSL_MC_DPIO |
161 | depends on NETDEVICES | 156 | depends on NETDEVICES |
162 | select CRYPTO_DEV_FSL_CAAM_COMMON | 157 | select CRYPTO_DEV_FSL_CAAM_COMMON |
158 | select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
159 | select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC | ||
163 | select CRYPTO_BLKCIPHER | 160 | select CRYPTO_BLKCIPHER |
164 | select CRYPTO_AUTHENC | 161 | select CRYPTO_AUTHENC |
165 | select CRYPTO_AEAD | 162 | select CRYPTO_AEAD |
@@ -171,12 +168,3 @@ config CRYPTO_DEV_FSL_DPAA2_CAAM | |||
171 | 168 | ||
172 | To compile this as a module, choose M here: the module | 169 | To compile this as a module, choose M here: the module |
173 | will be called dpaa2_caam. | 170 | will be called dpaa2_caam. |
174 | |||
175 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC | ||
176 | def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \ | ||
177 | CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \ | ||
178 | CRYPTO_DEV_FSL_DPAA2_CAAM) | ||
179 | |||
180 | config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC | ||
181 | def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \ | ||
182 | CRYPTO_DEV_FSL_DPAA2_CAAM) | ||
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index 7bbfd06a11ff..9ab4e81ea21e 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile | |||
@@ -11,20 +11,20 @@ ccflags-y += -DVERSION=\"\" | |||
11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o | 11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o |
12 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o | 12 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o |
13 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o | 13 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o |
14 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o | ||
15 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o | ||
16 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o | 14 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o |
17 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o | ||
18 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o | 15 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o |
19 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o | ||
20 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o | ||
21 | 16 | ||
22 | caam-objs := ctrl.o | 17 | caam-y := ctrl.o |
23 | caam_jr-objs := jr.o key_gen.o | 18 | caam_jr-y := jr.o key_gen.o |
24 | caam_pkc-y := caampkc.o pkc_desc.o | 19 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o |
20 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o | ||
21 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o | ||
22 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o | ||
23 | caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o | ||
24 | |||
25 | caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o | ||
25 | ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) | 26 | ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),) |
26 | ccflags-y += -DCONFIG_CAAM_QI | 27 | ccflags-y += -DCONFIG_CAAM_QI |
27 | caam-objs += qi.o | ||
28 | endif | 28 | endif |
29 | 29 | ||
30 | obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o | 30 | obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index c0ece44f303b..43f18253e5b6 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -77,13 +77,6 @@ | |||
77 | #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) | 77 | #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) |
78 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) | 78 | #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) |
79 | 79 | ||
80 | #ifdef DEBUG | ||
81 | /* for print_hex_dumps with line references */ | ||
82 | #define debug(format, arg...) printk(format, arg) | ||
83 | #else | ||
84 | #define debug(format, arg...) | ||
85 | #endif | ||
86 | |||
87 | struct caam_alg_entry { | 80 | struct caam_alg_entry { |
88 | int class1_alg_type; | 81 | int class1_alg_type; |
89 | int class2_alg_type; | 82 | int class2_alg_type; |
@@ -583,13 +576,11 @@ static int aead_setkey(struct crypto_aead *aead, | |||
583 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | 576 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
584 | goto badkey; | 577 | goto badkey; |
585 | 578 | ||
586 | #ifdef DEBUG | 579 | dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", |
587 | printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", | ||
588 | keys.authkeylen + keys.enckeylen, keys.enckeylen, | 580 | keys.authkeylen + keys.enckeylen, keys.enckeylen, |
589 | keys.authkeylen); | 581 | keys.authkeylen); |
590 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 582 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
591 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 583 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
592 | #endif | ||
593 | 584 | ||
594 | /* | 585 | /* |
595 | * If DKP is supported, use it in the shared descriptor to generate | 586 | * If DKP is supported, use it in the shared descriptor to generate |
@@ -623,11 +614,10 @@ static int aead_setkey(struct crypto_aead *aead, | |||
623 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); | 614 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
624 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + | 615 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + |
625 | keys.enckeylen, ctx->dir); | 616 | keys.enckeylen, ctx->dir); |
626 | #ifdef DEBUG | 617 | |
627 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 618 | print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", |
628 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 619 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
629 | ctx->adata.keylen_pad + keys.enckeylen, 1); | 620 | ctx->adata.keylen_pad + keys.enckeylen, 1); |
630 | #endif | ||
631 | 621 | ||
632 | skip_split_key: | 622 | skip_split_key: |
633 | ctx->cdata.keylen = keys.enckeylen; | 623 | ctx->cdata.keylen = keys.enckeylen; |
@@ -678,10 +668,8 @@ static int gcm_setkey(struct crypto_aead *aead, | |||
678 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 668 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
679 | struct device *jrdev = ctx->jrdev; | 669 | struct device *jrdev = ctx->jrdev; |
680 | 670 | ||
681 | #ifdef DEBUG | 671 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
682 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 672 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
683 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
684 | #endif | ||
685 | 673 | ||
686 | memcpy(ctx->key, key, keylen); | 674 | memcpy(ctx->key, key, keylen); |
687 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); | 675 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); |
@@ -699,10 +687,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
699 | if (keylen < 4) | 687 | if (keylen < 4) |
700 | return -EINVAL; | 688 | return -EINVAL; |
701 | 689 | ||
702 | #ifdef DEBUG | 690 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
703 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 691 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
704 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
705 | #endif | ||
706 | 692 | ||
707 | memcpy(ctx->key, key, keylen); | 693 | memcpy(ctx->key, key, keylen); |
708 | 694 | ||
@@ -725,10 +711,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
725 | if (keylen < 4) | 711 | if (keylen < 4) |
726 | return -EINVAL; | 712 | return -EINVAL; |
727 | 713 | ||
728 | #ifdef DEBUG | 714 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
729 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 715 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
730 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
731 | #endif | ||
732 | 716 | ||
733 | memcpy(ctx->key, key, keylen); | 717 | memcpy(ctx->key, key, keylen); |
734 | 718 | ||
@@ -757,10 +741,8 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | |||
757 | OP_ALG_AAI_CTR_MOD128); | 741 | OP_ALG_AAI_CTR_MOD128); |
758 | const bool is_rfc3686 = alg->caam.rfc3686; | 742 | const bool is_rfc3686 = alg->caam.rfc3686; |
759 | 743 | ||
760 | #ifdef DEBUG | 744 | print_hex_dump_debug("key in @"__stringify(__LINE__)": ", |
761 | print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", | 745 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
762 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
763 | #endif | ||
764 | /* | 746 | /* |
765 | * AES-CTR needs to load IV in CONTEXT1 reg | 747 | * AES-CTR needs to load IV in CONTEXT1 reg |
766 | * at an offset of 128bits (16bytes) | 748 | * at an offset of 128bits (16bytes) |
@@ -916,7 +898,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, | |||
916 | } | 898 | } |
917 | 899 | ||
918 | if (iv_dma) | 900 | if (iv_dma) |
919 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 901 | dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL); |
920 | if (sec4_sg_bytes) | 902 | if (sec4_sg_bytes) |
921 | dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, | 903 | dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, |
922 | DMA_TO_DEVICE); | 904 | DMA_TO_DEVICE); |
@@ -949,9 +931,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
949 | struct aead_request *req = context; | 931 | struct aead_request *req = context; |
950 | struct aead_edesc *edesc; | 932 | struct aead_edesc *edesc; |
951 | 933 | ||
952 | #ifdef DEBUG | 934 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
953 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
954 | #endif | ||
955 | 935 | ||
956 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); | 936 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); |
957 | 937 | ||
@@ -971,9 +951,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
971 | struct aead_request *req = context; | 951 | struct aead_request *req = context; |
972 | struct aead_edesc *edesc; | 952 | struct aead_edesc *edesc; |
973 | 953 | ||
974 | #ifdef DEBUG | 954 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
975 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
976 | #endif | ||
977 | 955 | ||
978 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); | 956 | edesc = container_of(desc, struct aead_edesc, hw_desc[0]); |
979 | 957 | ||
@@ -1001,33 +979,32 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
1001 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 979 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1002 | int ivsize = crypto_skcipher_ivsize(skcipher); | 980 | int ivsize = crypto_skcipher_ivsize(skcipher); |
1003 | 981 | ||
1004 | #ifdef DEBUG | 982 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
1005 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
1006 | #endif | ||
1007 | 983 | ||
1008 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); | 984 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); |
1009 | 985 | ||
1010 | if (err) | 986 | if (err) |
1011 | caam_jr_strstatus(jrdev, err); | 987 | caam_jr_strstatus(jrdev, err); |
1012 | 988 | ||
1013 | #ifdef DEBUG | ||
1014 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | ||
1015 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
1016 | edesc->src_nents > 1 ? 100 : ivsize, 1); | ||
1017 | #endif | ||
1018 | caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", | ||
1019 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | ||
1020 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | ||
1021 | |||
1022 | skcipher_unmap(jrdev, edesc, req); | 989 | skcipher_unmap(jrdev, edesc, req); |
1023 | 990 | ||
1024 | /* | 991 | /* |
1025 | * The crypto API expects us to set the IV (req->iv) to the last | 992 | * The crypto API expects us to set the IV (req->iv) to the last |
1026 | * ciphertext block. This is used e.g. by the CTS mode. | 993 | * ciphertext block (CBC mode) or last counter (CTR mode). |
994 | * This is used e.g. by the CTS mode. | ||
1027 | */ | 995 | */ |
1028 | if (ivsize) | 996 | if (ivsize) { |
1029 | scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - | 997 | memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, |
1030 | ivsize, ivsize, 0); | 998 | ivsize); |
999 | |||
1000 | print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", | ||
1001 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
1002 | edesc->src_nents > 1 ? 100 : ivsize, 1); | ||
1003 | } | ||
1004 | |||
1005 | caam_dump_sg("dst @" __stringify(__LINE__)": ", | ||
1006 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | ||
1007 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | ||
1031 | 1008 | ||
1032 | kfree(edesc); | 1009 | kfree(edesc); |
1033 | 1010 | ||
@@ -1039,26 +1016,35 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, | |||
1039 | { | 1016 | { |
1040 | struct skcipher_request *req = context; | 1017 | struct skcipher_request *req = context; |
1041 | struct skcipher_edesc *edesc; | 1018 | struct skcipher_edesc *edesc; |
1042 | #ifdef DEBUG | ||
1043 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1019 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1044 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1020 | int ivsize = crypto_skcipher_ivsize(skcipher); |
1045 | 1021 | ||
1046 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 1022 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
1047 | #endif | ||
1048 | 1023 | ||
1049 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); | 1024 | edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); |
1050 | if (err) | 1025 | if (err) |
1051 | caam_jr_strstatus(jrdev, err); | 1026 | caam_jr_strstatus(jrdev, err); |
1052 | 1027 | ||
1053 | #ifdef DEBUG | 1028 | skcipher_unmap(jrdev, edesc, req); |
1054 | print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", | 1029 | |
1055 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); | 1030 | /* |
1056 | #endif | 1031 | * The crypto API expects us to set the IV (req->iv) to the last |
1057 | caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", | 1032 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1033 | * This is used e.g. by the CTS mode. | ||
1034 | */ | ||
1035 | if (ivsize) { | ||
1036 | memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, | ||
1037 | ivsize); | ||
1038 | |||
1039 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", | ||
1040 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | ||
1041 | ivsize, 1); | ||
1042 | } | ||
1043 | |||
1044 | caam_dump_sg("dst @" __stringify(__LINE__)": ", | ||
1058 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1045 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
1059 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | 1046 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
1060 | 1047 | ||
1061 | skcipher_unmap(jrdev, edesc, req); | ||
1062 | kfree(edesc); | 1048 | kfree(edesc); |
1063 | 1049 | ||
1064 | skcipher_request_complete(req, err); | 1050 | skcipher_request_complete(req, err); |
@@ -1106,6 +1092,7 @@ static void init_aead_job(struct aead_request *req, | |||
1106 | if (unlikely(req->src != req->dst)) { | 1092 | if (unlikely(req->src != req->dst)) { |
1107 | if (!edesc->mapped_dst_nents) { | 1093 | if (!edesc->mapped_dst_nents) { |
1108 | dst_dma = 0; | 1094 | dst_dma = 0; |
1095 | out_options = 0; | ||
1109 | } else if (edesc->mapped_dst_nents == 1) { | 1096 | } else if (edesc->mapped_dst_nents == 1) { |
1110 | dst_dma = sg_dma_address(req->dst); | 1097 | dst_dma = sg_dma_address(req->dst); |
1111 | out_options = 0; | 1098 | out_options = 0; |
@@ -1249,6 +1236,7 @@ static void init_skcipher_job(struct skcipher_request *req, | |||
1249 | { | 1236 | { |
1250 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1237 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1251 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 1238 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
1239 | struct device *jrdev = ctx->jrdev; | ||
1252 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1240 | int ivsize = crypto_skcipher_ivsize(skcipher); |
1253 | u32 *desc = edesc->hw_desc; | 1241 | u32 *desc = edesc->hw_desc; |
1254 | u32 *sh_desc; | 1242 | u32 *sh_desc; |
@@ -1256,13 +1244,12 @@ static void init_skcipher_job(struct skcipher_request *req, | |||
1256 | dma_addr_t src_dma, dst_dma, ptr; | 1244 | dma_addr_t src_dma, dst_dma, ptr; |
1257 | int len, sec4_sg_index = 0; | 1245 | int len, sec4_sg_index = 0; |
1258 | 1246 | ||
1259 | #ifdef DEBUG | 1247 | print_hex_dump_debug("presciv@"__stringify(__LINE__)": ", |
1260 | print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", | 1248 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); |
1261 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); | 1249 | dev_dbg(jrdev, "asked=%d, cryptlen%d\n", |
1262 | pr_err("asked=%d, cryptlen%d\n", | ||
1263 | (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); | 1250 | (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); |
1264 | #endif | 1251 | |
1265 | caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", | 1252 | caam_dump_sg("src @" __stringify(__LINE__)": ", |
1266 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, | 1253 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
1267 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); | 1254 | edesc->src_nents > 1 ? 100 : req->cryptlen, 1); |
1268 | 1255 | ||
@@ -1285,7 +1272,7 @@ static void init_skcipher_job(struct skcipher_request *req, | |||
1285 | if (likely(req->src == req->dst)) { | 1272 | if (likely(req->src == req->dst)) { |
1286 | dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); | 1273 | dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); |
1287 | out_options = in_options; | 1274 | out_options = in_options; |
1288 | } else if (edesc->mapped_dst_nents == 1) { | 1275 | } else if (!ivsize && edesc->mapped_dst_nents == 1) { |
1289 | dst_dma = sg_dma_address(req->dst); | 1276 | dst_dma = sg_dma_address(req->dst); |
1290 | } else { | 1277 | } else { |
1291 | dst_dma = edesc->sec4_sg_dma + sec4_sg_index * | 1278 | dst_dma = edesc->sec4_sg_dma + sec4_sg_index * |
@@ -1293,7 +1280,7 @@ static void init_skcipher_job(struct skcipher_request *req, | |||
1293 | out_options = LDST_SGF; | 1280 | out_options = LDST_SGF; |
1294 | } | 1281 | } |
1295 | 1282 | ||
1296 | append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); | 1283 | append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); |
1297 | } | 1284 | } |
1298 | 1285 | ||
1299 | /* | 1286 | /* |
@@ -1309,37 +1296,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1309 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 1296 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1310 | GFP_KERNEL : GFP_ATOMIC; | 1297 | GFP_KERNEL : GFP_ATOMIC; |
1311 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 1298 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
1299 | int src_len, dst_len = 0; | ||
1312 | struct aead_edesc *edesc; | 1300 | struct aead_edesc *edesc; |
1313 | int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; | 1301 | int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; |
1314 | unsigned int authsize = ctx->authsize; | 1302 | unsigned int authsize = ctx->authsize; |
1315 | 1303 | ||
1316 | if (unlikely(req->dst != req->src)) { | 1304 | if (unlikely(req->dst != req->src)) { |
1317 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 1305 | src_len = req->assoclen + req->cryptlen; |
1318 | req->cryptlen); | 1306 | dst_len = src_len + (encrypt ? authsize : (-authsize)); |
1307 | |||
1308 | src_nents = sg_nents_for_len(req->src, src_len); | ||
1319 | if (unlikely(src_nents < 0)) { | 1309 | if (unlikely(src_nents < 0)) { |
1320 | dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", | 1310 | dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", |
1321 | req->assoclen + req->cryptlen); | 1311 | src_len); |
1322 | return ERR_PTR(src_nents); | 1312 | return ERR_PTR(src_nents); |
1323 | } | 1313 | } |
1324 | 1314 | ||
1325 | dst_nents = sg_nents_for_len(req->dst, req->assoclen + | 1315 | dst_nents = sg_nents_for_len(req->dst, dst_len); |
1326 | req->cryptlen + | ||
1327 | (encrypt ? authsize : | ||
1328 | (-authsize))); | ||
1329 | if (unlikely(dst_nents < 0)) { | 1316 | if (unlikely(dst_nents < 0)) { |
1330 | dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", | 1317 | dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", |
1331 | req->assoclen + req->cryptlen + | 1318 | dst_len); |
1332 | (encrypt ? authsize : (-authsize))); | ||
1333 | return ERR_PTR(dst_nents); | 1319 | return ERR_PTR(dst_nents); |
1334 | } | 1320 | } |
1335 | } else { | 1321 | } else { |
1336 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 1322 | src_len = req->assoclen + req->cryptlen + |
1337 | req->cryptlen + | 1323 | (encrypt ? authsize : 0); |
1338 | (encrypt ? authsize : 0)); | 1324 | |
1325 | src_nents = sg_nents_for_len(req->src, src_len); | ||
1339 | if (unlikely(src_nents < 0)) { | 1326 | if (unlikely(src_nents < 0)) { |
1340 | dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", | 1327 | dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", |
1341 | req->assoclen + req->cryptlen + | 1328 | src_len); |
1342 | (encrypt ? authsize : 0)); | ||
1343 | return ERR_PTR(src_nents); | 1329 | return ERR_PTR(src_nents); |
1344 | } | 1330 | } |
1345 | } | 1331 | } |
@@ -1380,8 +1366,16 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1380 | } | 1366 | } |
1381 | } | 1367 | } |
1382 | 1368 | ||
1369 | /* | ||
1370 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
1371 | * the end of the table by allocating more S/G entries. | ||
1372 | */ | ||
1383 | sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; | 1373 | sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; |
1384 | sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0; | 1374 | if (mapped_dst_nents > 1) |
1375 | sec4_sg_len += pad_sg_nents(mapped_dst_nents); | ||
1376 | else | ||
1377 | sec4_sg_len = pad_sg_nents(sec4_sg_len); | ||
1378 | |||
1385 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); | 1379 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
1386 | 1380 | ||
1387 | /* allocate space for base edesc and hw desc commands, link tables */ | 1381 | /* allocate space for base edesc and hw desc commands, link tables */ |
@@ -1403,12 +1397,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1403 | 1397 | ||
1404 | sec4_sg_index = 0; | 1398 | sec4_sg_index = 0; |
1405 | if (mapped_src_nents > 1) { | 1399 | if (mapped_src_nents > 1) { |
1406 | sg_to_sec4_sg_last(req->src, mapped_src_nents, | 1400 | sg_to_sec4_sg_last(req->src, src_len, |
1407 | edesc->sec4_sg + sec4_sg_index, 0); | 1401 | edesc->sec4_sg + sec4_sg_index, 0); |
1408 | sec4_sg_index += mapped_src_nents; | 1402 | sec4_sg_index += mapped_src_nents; |
1409 | } | 1403 | } |
1410 | if (mapped_dst_nents > 1) { | 1404 | if (mapped_dst_nents > 1) { |
1411 | sg_to_sec4_sg_last(req->dst, mapped_dst_nents, | 1405 | sg_to_sec4_sg_last(req->dst, dst_len, |
1412 | edesc->sec4_sg + sec4_sg_index, 0); | 1406 | edesc->sec4_sg + sec4_sg_index, 0); |
1413 | } | 1407 | } |
1414 | 1408 | ||
@@ -1446,11 +1440,10 @@ static int gcm_encrypt(struct aead_request *req) | |||
1446 | 1440 | ||
1447 | /* Create and submit job descriptor */ | 1441 | /* Create and submit job descriptor */ |
1448 | init_gcm_job(req, edesc, all_contig, true); | 1442 | init_gcm_job(req, edesc, all_contig, true); |
1449 | #ifdef DEBUG | 1443 | |
1450 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 1444 | print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", |
1451 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1445 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1452 | desc_bytes(edesc->hw_desc), 1); | 1446 | desc_bytes(edesc->hw_desc), 1); |
1453 | #endif | ||
1454 | 1447 | ||
1455 | desc = edesc->hw_desc; | 1448 | desc = edesc->hw_desc; |
1456 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); | 1449 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); |
@@ -1556,11 +1549,10 @@ static int aead_encrypt(struct aead_request *req) | |||
1556 | 1549 | ||
1557 | /* Create and submit job descriptor */ | 1550 | /* Create and submit job descriptor */ |
1558 | init_authenc_job(req, edesc, all_contig, true); | 1551 | init_authenc_job(req, edesc, all_contig, true); |
1559 | #ifdef DEBUG | 1552 | |
1560 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 1553 | print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", |
1561 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1554 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1562 | desc_bytes(edesc->hw_desc), 1); | 1555 | desc_bytes(edesc->hw_desc), 1); |
1563 | #endif | ||
1564 | 1556 | ||
1565 | desc = edesc->hw_desc; | 1557 | desc = edesc->hw_desc; |
1566 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); | 1558 | ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); |
@@ -1591,11 +1583,10 @@ static int gcm_decrypt(struct aead_request *req) | |||
1591 | 1583 | ||
1592 | /* Create and submit job descriptor*/ | 1584 | /* Create and submit job descriptor*/ |
1593 | init_gcm_job(req, edesc, all_contig, false); | 1585 | init_gcm_job(req, edesc, all_contig, false); |
1594 | #ifdef DEBUG | 1586 | |
1595 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 1587 | print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", |
1596 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1588 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1597 | desc_bytes(edesc->hw_desc), 1); | 1589 | desc_bytes(edesc->hw_desc), 1); |
1598 | #endif | ||
1599 | 1590 | ||
1600 | desc = edesc->hw_desc; | 1591 | desc = edesc->hw_desc; |
1601 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); | 1592 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); |
@@ -1627,7 +1618,7 @@ static int aead_decrypt(struct aead_request *req) | |||
1627 | u32 *desc; | 1618 | u32 *desc; |
1628 | int ret = 0; | 1619 | int ret = 0; |
1629 | 1620 | ||
1630 | caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ", | 1621 | caam_dump_sg("dec src@" __stringify(__LINE__)": ", |
1631 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, | 1622 | DUMP_PREFIX_ADDRESS, 16, 4, req->src, |
1632 | req->assoclen + req->cryptlen, 1); | 1623 | req->assoclen + req->cryptlen, 1); |
1633 | 1624 | ||
@@ -1639,11 +1630,10 @@ static int aead_decrypt(struct aead_request *req) | |||
1639 | 1630 | ||
1640 | /* Create and submit job descriptor*/ | 1631 | /* Create and submit job descriptor*/ |
1641 | init_authenc_job(req, edesc, all_contig, false); | 1632 | init_authenc_job(req, edesc, all_contig, false); |
1642 | #ifdef DEBUG | 1633 | |
1643 | print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", | 1634 | print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", |
1644 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1635 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1645 | desc_bytes(edesc->hw_desc), 1); | 1636 | desc_bytes(edesc->hw_desc), 1); |
1646 | #endif | ||
1647 | 1637 | ||
1648 | desc = edesc->hw_desc; | 1638 | desc = edesc->hw_desc; |
1649 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); | 1639 | ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); |
@@ -1719,7 +1709,29 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
1719 | else | 1709 | else |
1720 | sec4_sg_ents = mapped_src_nents + !!ivsize; | 1710 | sec4_sg_ents = mapped_src_nents + !!ivsize; |
1721 | dst_sg_idx = sec4_sg_ents; | 1711 | dst_sg_idx = sec4_sg_ents; |
1722 | sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; | 1712 | |
1713 | /* | ||
1714 | * Input, output HW S/G tables: [IV, src][dst, IV] | ||
1715 | * IV entries point to the same buffer | ||
1716 | * If src == dst, S/G entries are reused (S/G tables overlap) | ||
1717 | * | ||
1718 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
1719 | * the end of the table by allocating more S/G entries. Logic: | ||
1720 | * if (output S/G) | ||
1721 | * pad output S/G, if needed | ||
1722 | * else if (input S/G) ... | ||
1723 | * pad input S/G, if needed | ||
1724 | */ | ||
1725 | if (ivsize || mapped_dst_nents > 1) { | ||
1726 | if (req->src == req->dst) | ||
1727 | sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents); | ||
1728 | else | ||
1729 | sec4_sg_ents += pad_sg_nents(mapped_dst_nents + | ||
1730 | !!ivsize); | ||
1731 | } else { | ||
1732 | sec4_sg_ents = pad_sg_nents(sec4_sg_ents); | ||
1733 | } | ||
1734 | |||
1723 | sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); | 1735 | sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); |
1724 | 1736 | ||
1725 | /* | 1737 | /* |
@@ -1744,10 +1756,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
1744 | 1756 | ||
1745 | /* Make sure IV is located in a DMAable area */ | 1757 | /* Make sure IV is located in a DMAable area */ |
1746 | if (ivsize) { | 1758 | if (ivsize) { |
1747 | iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; | 1759 | iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes; |
1748 | memcpy(iv, req->iv, ivsize); | 1760 | memcpy(iv, req->iv, ivsize); |
1749 | 1761 | ||
1750 | iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); | 1762 | iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL); |
1751 | if (dma_mapping_error(jrdev, iv_dma)) { | 1763 | if (dma_mapping_error(jrdev, iv_dma)) { |
1752 | dev_err(jrdev, "unable to map IV\n"); | 1764 | dev_err(jrdev, "unable to map IV\n"); |
1753 | caam_unmap(jrdev, req->src, req->dst, src_nents, | 1765 | caam_unmap(jrdev, req->src, req->dst, src_nents, |
@@ -1759,13 +1771,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
1759 | dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); | 1771 | dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); |
1760 | } | 1772 | } |
1761 | if (dst_sg_idx) | 1773 | if (dst_sg_idx) |
1762 | sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + | 1774 | sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg + |
1763 | !!ivsize, 0); | 1775 | !!ivsize, 0); |
1764 | 1776 | ||
1765 | if (mapped_dst_nents > 1) { | 1777 | if (req->src != req->dst && (ivsize || mapped_dst_nents > 1)) |
1766 | sg_to_sec4_sg_last(req->dst, mapped_dst_nents, | 1778 | sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg + |
1767 | edesc->sec4_sg + dst_sg_idx, 0); | 1779 | dst_sg_idx, 0); |
1768 | } | 1780 | |
1781 | if (ivsize) | ||
1782 | dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx + | ||
1783 | mapped_dst_nents, iv_dma, ivsize, 0); | ||
1784 | |||
1785 | if (ivsize || mapped_dst_nents > 1) | ||
1786 | sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + | ||
1787 | mapped_dst_nents); | ||
1769 | 1788 | ||
1770 | if (sec4_sg_bytes) { | 1789 | if (sec4_sg_bytes) { |
1771 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 1790 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
@@ -1782,11 +1801,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
1782 | 1801 | ||
1783 | edesc->iv_dma = iv_dma; | 1802 | edesc->iv_dma = iv_dma; |
1784 | 1803 | ||
1785 | #ifdef DEBUG | 1804 | print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ", |
1786 | print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ", | 1805 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, |
1787 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, | 1806 | sec4_sg_bytes, 1); |
1788 | sec4_sg_bytes, 1); | ||
1789 | #endif | ||
1790 | 1807 | ||
1791 | return edesc; | 1808 | return edesc; |
1792 | } | 1809 | } |
@@ -1807,11 +1824,11 @@ static int skcipher_encrypt(struct skcipher_request *req) | |||
1807 | 1824 | ||
1808 | /* Create and submit job descriptor*/ | 1825 | /* Create and submit job descriptor*/ |
1809 | init_skcipher_job(req, edesc, true); | 1826 | init_skcipher_job(req, edesc, true); |
1810 | #ifdef DEBUG | 1827 | |
1811 | print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", | 1828 | print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", |
1812 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1829 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1813 | desc_bytes(edesc->hw_desc), 1); | 1830 | desc_bytes(edesc->hw_desc), 1); |
1814 | #endif | 1831 | |
1815 | desc = edesc->hw_desc; | 1832 | desc = edesc->hw_desc; |
1816 | ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req); | 1833 | ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req); |
1817 | 1834 | ||
@@ -1830,7 +1847,6 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
1830 | struct skcipher_edesc *edesc; | 1847 | struct skcipher_edesc *edesc; |
1831 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1848 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1832 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 1849 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
1833 | int ivsize = crypto_skcipher_ivsize(skcipher); | ||
1834 | struct device *jrdev = ctx->jrdev; | 1850 | struct device *jrdev = ctx->jrdev; |
1835 | u32 *desc; | 1851 | u32 *desc; |
1836 | int ret = 0; | 1852 | int ret = 0; |
@@ -1840,22 +1856,13 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
1840 | if (IS_ERR(edesc)) | 1856 | if (IS_ERR(edesc)) |
1841 | return PTR_ERR(edesc); | 1857 | return PTR_ERR(edesc); |
1842 | 1858 | ||
1843 | /* | ||
1844 | * The crypto API expects us to set the IV (req->iv) to the last | ||
1845 | * ciphertext block. | ||
1846 | */ | ||
1847 | if (ivsize) | ||
1848 | scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - | ||
1849 | ivsize, ivsize, 0); | ||
1850 | |||
1851 | /* Create and submit job descriptor*/ | 1859 | /* Create and submit job descriptor*/ |
1852 | init_skcipher_job(req, edesc, false); | 1860 | init_skcipher_job(req, edesc, false); |
1853 | desc = edesc->hw_desc; | 1861 | desc = edesc->hw_desc; |
1854 | #ifdef DEBUG | 1862 | |
1855 | print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ", | 1863 | print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", |
1856 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, | 1864 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, |
1857 | desc_bytes(edesc->hw_desc), 1); | 1865 | desc_bytes(edesc->hw_desc), 1); |
1858 | #endif | ||
1859 | 1866 | ||
1860 | ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req); | 1867 | ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req); |
1861 | if (!ret) { | 1868 | if (!ret) { |
@@ -3444,7 +3451,7 @@ static void caam_aead_exit(struct crypto_aead *tfm) | |||
3444 | caam_exit_common(crypto_aead_ctx(tfm)); | 3451 | caam_exit_common(crypto_aead_ctx(tfm)); |
3445 | } | 3452 | } |
3446 | 3453 | ||
3447 | static void __exit caam_algapi_exit(void) | 3454 | void caam_algapi_exit(void) |
3448 | { | 3455 | { |
3449 | int i; | 3456 | int i; |
3450 | 3457 | ||
@@ -3489,43 +3496,15 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) | |||
3489 | alg->exit = caam_aead_exit; | 3496 | alg->exit = caam_aead_exit; |
3490 | } | 3497 | } |
3491 | 3498 | ||
3492 | static int __init caam_algapi_init(void) | 3499 | int caam_algapi_init(struct device *ctrldev) |
3493 | { | 3500 | { |
3494 | struct device_node *dev_node; | 3501 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
3495 | struct platform_device *pdev; | ||
3496 | struct caam_drv_private *priv; | ||
3497 | int i = 0, err = 0; | 3502 | int i = 0, err = 0; |
3498 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; | 3503 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; |
3499 | u32 arc4_inst; | 3504 | u32 arc4_inst; |
3500 | unsigned int md_limit = SHA512_DIGEST_SIZE; | 3505 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
3501 | bool registered = false, gcm_support; | 3506 | bool registered = false, gcm_support; |
3502 | 3507 | ||
3503 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
3504 | if (!dev_node) { | ||
3505 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
3506 | if (!dev_node) | ||
3507 | return -ENODEV; | ||
3508 | } | ||
3509 | |||
3510 | pdev = of_find_device_by_node(dev_node); | ||
3511 | if (!pdev) { | ||
3512 | of_node_put(dev_node); | ||
3513 | return -ENODEV; | ||
3514 | } | ||
3515 | |||
3516 | priv = dev_get_drvdata(&pdev->dev); | ||
3517 | of_node_put(dev_node); | ||
3518 | |||
3519 | /* | ||
3520 | * If priv is NULL, it's probably because the caam driver wasn't | ||
3521 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
3522 | */ | ||
3523 | if (!priv) { | ||
3524 | err = -ENODEV; | ||
3525 | goto out_put_dev; | ||
3526 | } | ||
3527 | |||
3528 | |||
3529 | /* | 3508 | /* |
3530 | * Register crypto algorithms the device supports. | 3509 | * Register crypto algorithms the device supports. |
3531 | * First, detect presence and attributes of DES, AES, and MD blocks. | 3510 | * First, detect presence and attributes of DES, AES, and MD blocks. |
@@ -3668,14 +3647,5 @@ static int __init caam_algapi_init(void) | |||
3668 | if (registered) | 3647 | if (registered) |
3669 | pr_info("caam algorithms registered in /proc/crypto\n"); | 3648 | pr_info("caam algorithms registered in /proc/crypto\n"); |
3670 | 3649 | ||
3671 | out_put_dev: | ||
3672 | put_device(&pdev->dev); | ||
3673 | return err; | 3650 | return err; |
3674 | } | 3651 | } |
3675 | |||
3676 | module_init(caam_algapi_init); | ||
3677 | module_exit(caam_algapi_exit); | ||
3678 | |||
3679 | MODULE_LICENSE("GPL"); | ||
3680 | MODULE_DESCRIPTION("FSL CAAM support for crypto API"); | ||
3681 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); | ||
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 1e1a376edc2f..72531837571e 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c | |||
@@ -33,12 +33,11 @@ static inline void append_dec_op1(u32 *desc, u32 type) | |||
33 | } | 33 | } |
34 | 34 | ||
35 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); | 35 | jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); |
36 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | 36 | append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT); |
37 | OP_ALG_DECRYPT); | ||
38 | uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); | 37 | uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); |
39 | set_jump_tgt_here(desc, jump_cmd); | 38 | set_jump_tgt_here(desc, jump_cmd); |
40 | append_operation(desc, type | OP_ALG_AS_INITFINAL | | 39 | append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT | |
41 | OP_ALG_DECRYPT | OP_ALG_AAI_DK); | 40 | OP_ALG_AAI_DK); |
42 | set_jump_tgt_here(desc, uncond_jump_cmd); | 41 | set_jump_tgt_here(desc, uncond_jump_cmd); |
43 | } | 42 | } |
44 | 43 | ||
@@ -115,11 +114,9 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, | |||
115 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | | 114 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | |
116 | LDST_SRCDST_BYTE_CONTEXT); | 115 | LDST_SRCDST_BYTE_CONTEXT); |
117 | 116 | ||
118 | #ifdef DEBUG | 117 | print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ", |
119 | print_hex_dump(KERN_ERR, | 118 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
120 | "aead null enc shdesc@" __stringify(__LINE__)": ", | 119 | 1); |
121 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
122 | #endif | ||
123 | } | 120 | } |
124 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); | 121 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); |
125 | 122 | ||
@@ -204,11 +201,9 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, | |||
204 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | | 201 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | |
205 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | 202 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); |
206 | 203 | ||
207 | #ifdef DEBUG | 204 | print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ", |
208 | print_hex_dump(KERN_ERR, | 205 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
209 | "aead null dec shdesc@" __stringify(__LINE__)": ", | 206 | 1); |
210 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
211 | #endif | ||
212 | } | 207 | } |
213 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); | 208 | EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); |
214 | 209 | ||
@@ -358,10 +353,9 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, | |||
358 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | | 353 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | |
359 | LDST_SRCDST_BYTE_CONTEXT); | 354 | LDST_SRCDST_BYTE_CONTEXT); |
360 | 355 | ||
361 | #ifdef DEBUG | 356 | print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ", |
362 | print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ", | 357 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
363 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 358 | 1); |
364 | #endif | ||
365 | } | 359 | } |
366 | EXPORT_SYMBOL(cnstr_shdsc_aead_encap); | 360 | EXPORT_SYMBOL(cnstr_shdsc_aead_encap); |
367 | 361 | ||
@@ -475,10 +469,9 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, | |||
475 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | | 469 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | |
476 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); | 470 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); |
477 | 471 | ||
478 | #ifdef DEBUG | 472 | print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ", |
479 | print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ", | 473 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
480 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 474 | 1); |
481 | #endif | ||
482 | } | 475 | } |
483 | EXPORT_SYMBOL(cnstr_shdsc_aead_decap); | 476 | EXPORT_SYMBOL(cnstr_shdsc_aead_decap); |
484 | 477 | ||
@@ -613,11 +606,9 @@ copy_iv: | |||
613 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | | 606 | append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | |
614 | LDST_SRCDST_BYTE_CONTEXT); | 607 | LDST_SRCDST_BYTE_CONTEXT); |
615 | 608 | ||
616 | #ifdef DEBUG | 609 | print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ", |
617 | print_hex_dump(KERN_ERR, | 610 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
618 | "aead givenc shdesc@" __stringify(__LINE__)": ", | 611 | 1); |
619 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
620 | #endif | ||
621 | } | 612 | } |
622 | EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); | 613 | EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); |
623 | 614 | ||
@@ -742,10 +733,9 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, | |||
742 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | 733 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | |
743 | LDST_SRCDST_BYTE_CONTEXT); | 734 | LDST_SRCDST_BYTE_CONTEXT); |
744 | 735 | ||
745 | #ifdef DEBUG | 736 | print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ", |
746 | print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ", | 737 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
747 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 738 | 1); |
748 | #endif | ||
749 | } | 739 | } |
750 | EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); | 740 | EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); |
751 | 741 | ||
@@ -838,10 +828,9 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, | |||
838 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | | 828 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | |
839 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | 829 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); |
840 | 830 | ||
841 | #ifdef DEBUG | 831 | print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ", |
842 | print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ", | 832 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
843 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 833 | 1); |
844 | #endif | ||
845 | } | 834 | } |
846 | EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); | 835 | EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); |
847 | 836 | ||
@@ -933,11 +922,9 @@ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, | |||
933 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | 922 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | |
934 | LDST_SRCDST_BYTE_CONTEXT); | 923 | LDST_SRCDST_BYTE_CONTEXT); |
935 | 924 | ||
936 | #ifdef DEBUG | 925 | print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ", |
937 | print_hex_dump(KERN_ERR, | 926 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
938 | "rfc4106 enc shdesc@" __stringify(__LINE__)": ", | 927 | 1); |
939 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
940 | #endif | ||
941 | } | 928 | } |
942 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); | 929 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); |
943 | 930 | ||
@@ -1030,11 +1017,9 @@ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, | |||
1030 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | | 1017 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | |
1031 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | 1018 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); |
1032 | 1019 | ||
1033 | #ifdef DEBUG | 1020 | print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ", |
1034 | print_hex_dump(KERN_ERR, | 1021 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
1035 | "rfc4106 dec shdesc@" __stringify(__LINE__)": ", | 1022 | 1); |
1036 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
1037 | #endif | ||
1038 | } | 1023 | } |
1039 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); | 1024 | EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); |
1040 | 1025 | ||
@@ -1115,11 +1100,9 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, | |||
1115 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | | 1100 | append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | |
1116 | LDST_SRCDST_BYTE_CONTEXT); | 1101 | LDST_SRCDST_BYTE_CONTEXT); |
1117 | 1102 | ||
1118 | #ifdef DEBUG | 1103 | print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ", |
1119 | print_hex_dump(KERN_ERR, | 1104 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
1120 | "rfc4543 enc shdesc@" __stringify(__LINE__)": ", | 1105 | 1); |
1121 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
1122 | #endif | ||
1123 | } | 1106 | } |
1124 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); | 1107 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); |
1125 | 1108 | ||
@@ -1205,11 +1188,9 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, | |||
1205 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | | 1188 | append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | |
1206 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); | 1189 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); |
1207 | 1190 | ||
1208 | #ifdef DEBUG | 1191 | print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ", |
1209 | print_hex_dump(KERN_ERR, | 1192 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
1210 | "rfc4543 dec shdesc@" __stringify(__LINE__)": ", | 1193 | 1); |
1211 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
1212 | #endif | ||
1213 | } | 1194 | } |
1214 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); | 1195 | EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); |
1215 | 1196 | ||
@@ -1410,17 +1391,21 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, | |||
1410 | LDST_OFFSET_SHIFT)); | 1391 | LDST_OFFSET_SHIFT)); |
1411 | 1392 | ||
1412 | /* Load operation */ | 1393 | /* Load operation */ |
1413 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | 1394 | append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | |
1414 | OP_ALG_ENCRYPT); | 1395 | OP_ALG_ENCRYPT); |
1415 | 1396 | ||
1416 | /* Perform operation */ | 1397 | /* Perform operation */ |
1417 | skcipher_append_src_dst(desc); | 1398 | skcipher_append_src_dst(desc); |
1418 | 1399 | ||
1419 | #ifdef DEBUG | 1400 | /* Store IV */ |
1420 | print_hex_dump(KERN_ERR, | 1401 | if (ivsize) |
1421 | "skcipher enc shdesc@" __stringify(__LINE__)": ", | 1402 | append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | |
1422 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1403 | LDST_CLASS_1_CCB | (ctx1_iv_off << |
1423 | #endif | 1404 | LDST_OFFSET_SHIFT)); |
1405 | |||
1406 | print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ", | ||
1407 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | ||
1408 | 1); | ||
1424 | } | 1409 | } |
1425 | EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); | 1410 | EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); |
1426 | 1411 | ||
@@ -1479,7 +1464,7 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, | |||
1479 | 1464 | ||
1480 | /* Choose operation */ | 1465 | /* Choose operation */ |
1481 | if (ctx1_iv_off) | 1466 | if (ctx1_iv_off) |
1482 | append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | | 1467 | append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | |
1483 | OP_ALG_DECRYPT); | 1468 | OP_ALG_DECRYPT); |
1484 | else | 1469 | else |
1485 | append_dec_op1(desc, cdata->algtype); | 1470 | append_dec_op1(desc, cdata->algtype); |
@@ -1487,11 +1472,15 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, | |||
1487 | /* Perform operation */ | 1472 | /* Perform operation */ |
1488 | skcipher_append_src_dst(desc); | 1473 | skcipher_append_src_dst(desc); |
1489 | 1474 | ||
1490 | #ifdef DEBUG | 1475 | /* Store IV */ |
1491 | print_hex_dump(KERN_ERR, | 1476 | if (ivsize) |
1492 | "skcipher dec shdesc@" __stringify(__LINE__)": ", | 1477 | append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | |
1493 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1478 | LDST_CLASS_1_CCB | (ctx1_iv_off << |
1494 | #endif | 1479 | LDST_OFFSET_SHIFT)); |
1480 | |||
1481 | print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ", | ||
1482 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | ||
1483 | 1); | ||
1495 | } | 1484 | } |
1496 | EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); | 1485 | EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); |
1497 | 1486 | ||
@@ -1538,11 +1527,13 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata) | |||
1538 | /* Perform operation */ | 1527 | /* Perform operation */ |
1539 | skcipher_append_src_dst(desc); | 1528 | skcipher_append_src_dst(desc); |
1540 | 1529 | ||
1541 | #ifdef DEBUG | 1530 | /* Store upper 8B of IV */ |
1542 | print_hex_dump(KERN_ERR, | 1531 | append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | |
1543 | "xts skcipher enc shdesc@" __stringify(__LINE__) ": ", | 1532 | (0x20 << LDST_OFFSET_SHIFT)); |
1544 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1533 | |
1545 | #endif | 1534 | print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__) |
1535 | ": ", DUMP_PREFIX_ADDRESS, 16, 4, | ||
1536 | desc, desc_bytes(desc), 1); | ||
1546 | } | 1537 | } |
1547 | EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); | 1538 | EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); |
1548 | 1539 | ||
@@ -1588,11 +1579,13 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata) | |||
1588 | /* Perform operation */ | 1579 | /* Perform operation */ |
1589 | skcipher_append_src_dst(desc); | 1580 | skcipher_append_src_dst(desc); |
1590 | 1581 | ||
1591 | #ifdef DEBUG | 1582 | /* Store upper 8B of IV */ |
1592 | print_hex_dump(KERN_ERR, | 1583 | append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | |
1593 | "xts skcipher dec shdesc@" __stringify(__LINE__) ": ", | 1584 | (0x20 << LDST_OFFSET_SHIFT)); |
1594 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1585 | |
1595 | #endif | 1586 | print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__) |
1587 | ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, | ||
1588 | desc_bytes(desc), 1); | ||
1596 | } | 1589 | } |
1597 | EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap); | 1590 | EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap); |
1598 | 1591 | ||
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h index d5ca42ff961a..da4a4ee60c80 100644 --- a/drivers/crypto/caam/caamalg_desc.h +++ b/drivers/crypto/caam/caamalg_desc.h | |||
@@ -44,9 +44,9 @@ | |||
44 | 44 | ||
45 | #define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ) | 45 | #define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ) |
46 | #define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \ | 46 | #define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \ |
47 | 20 * CAAM_CMD_SZ) | 47 | 21 * CAAM_CMD_SZ) |
48 | #define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ | 48 | #define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \ |
49 | 15 * CAAM_CMD_SZ) | 49 | 16 * CAAM_CMD_SZ) |
50 | 50 | ||
51 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, | 51 | void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, |
52 | unsigned int icvsize, int era); | 52 | unsigned int icvsize, int era); |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index d290d6b41825..32f0f8a72067 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Based on caamalg.c | 4 | * Based on caamalg.c |
5 | * | 5 | * |
6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. | 6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. |
7 | * Copyright 2016-2018 NXP | 7 | * Copyright 2016-2019 NXP |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include "compat.h" | 10 | #include "compat.h" |
@@ -214,13 +214,11 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
214 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | 214 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
215 | goto badkey; | 215 | goto badkey; |
216 | 216 | ||
217 | #ifdef DEBUG | 217 | dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", |
218 | dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", | ||
219 | keys.authkeylen + keys.enckeylen, keys.enckeylen, | 218 | keys.authkeylen + keys.enckeylen, keys.enckeylen, |
220 | keys.authkeylen); | 219 | keys.authkeylen); |
221 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 220 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
222 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 221 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
223 | #endif | ||
224 | 222 | ||
225 | /* | 223 | /* |
226 | * If DKP is supported, use it in the shared descriptor to generate | 224 | * If DKP is supported, use it in the shared descriptor to generate |
@@ -237,7 +235,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
237 | memcpy(ctx->key, keys.authkey, keys.authkeylen); | 235 | memcpy(ctx->key, keys.authkey, keys.authkeylen); |
238 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, | 236 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, |
239 | keys.enckeylen); | 237 | keys.enckeylen); |
240 | dma_sync_single_for_device(jrdev, ctx->key_dma, | 238 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, |
241 | ctx->adata.keylen_pad + | 239 | ctx->adata.keylen_pad + |
242 | keys.enckeylen, ctx->dir); | 240 | keys.enckeylen, ctx->dir); |
243 | goto skip_split_key; | 241 | goto skip_split_key; |
@@ -251,8 +249,9 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key, | |||
251 | 249 | ||
252 | /* postpend encryption key to auth split key */ | 250 | /* postpend encryption key to auth split key */ |
253 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); | 251 | memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); |
254 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + | 252 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, |
255 | keys.enckeylen, ctx->dir); | 253 | ctx->adata.keylen_pad + keys.enckeylen, |
254 | ctx->dir); | ||
256 | #ifdef DEBUG | 255 | #ifdef DEBUG |
257 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", | 256 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", |
258 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, | 257 | DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, |
@@ -386,13 +385,12 @@ static int gcm_setkey(struct crypto_aead *aead, | |||
386 | struct device *jrdev = ctx->jrdev; | 385 | struct device *jrdev = ctx->jrdev; |
387 | int ret; | 386 | int ret; |
388 | 387 | ||
389 | #ifdef DEBUG | 388 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
390 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 389 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
391 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
392 | #endif | ||
393 | 390 | ||
394 | memcpy(ctx->key, key, keylen); | 391 | memcpy(ctx->key, key, keylen); |
395 | dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); | 392 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, |
393 | ctx->dir); | ||
396 | ctx->cdata.keylen = keylen; | 394 | ctx->cdata.keylen = keylen; |
397 | 395 | ||
398 | ret = gcm_set_sh_desc(aead); | 396 | ret = gcm_set_sh_desc(aead); |
@@ -485,10 +483,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
485 | if (keylen < 4) | 483 | if (keylen < 4) |
486 | return -EINVAL; | 484 | return -EINVAL; |
487 | 485 | ||
488 | #ifdef DEBUG | 486 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
489 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 487 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
490 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
491 | #endif | ||
492 | 488 | ||
493 | memcpy(ctx->key, key, keylen); | 489 | memcpy(ctx->key, key, keylen); |
494 | /* | 490 | /* |
@@ -496,8 +492,8 @@ static int rfc4106_setkey(struct crypto_aead *aead, | |||
496 | * in the nonce. Update the AES key length. | 492 | * in the nonce. Update the AES key length. |
497 | */ | 493 | */ |
498 | ctx->cdata.keylen = keylen - 4; | 494 | ctx->cdata.keylen = keylen - 4; |
499 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, | 495 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, |
500 | ctx->dir); | 496 | ctx->cdata.keylen, ctx->dir); |
501 | 497 | ||
502 | ret = rfc4106_set_sh_desc(aead); | 498 | ret = rfc4106_set_sh_desc(aead); |
503 | if (ret) | 499 | if (ret) |
@@ -589,10 +585,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
589 | if (keylen < 4) | 585 | if (keylen < 4) |
590 | return -EINVAL; | 586 | return -EINVAL; |
591 | 587 | ||
592 | #ifdef DEBUG | 588 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
593 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 589 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
594 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | ||
595 | #endif | ||
596 | 590 | ||
597 | memcpy(ctx->key, key, keylen); | 591 | memcpy(ctx->key, key, keylen); |
598 | /* | 592 | /* |
@@ -600,8 +594,8 @@ static int rfc4543_setkey(struct crypto_aead *aead, | |||
600 | * in the nonce. Update the AES key length. | 594 | * in the nonce. Update the AES key length. |
601 | */ | 595 | */ |
602 | ctx->cdata.keylen = keylen - 4; | 596 | ctx->cdata.keylen = keylen - 4; |
603 | dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, | 597 | dma_sync_single_for_device(jrdev->parent, ctx->key_dma, |
604 | ctx->dir); | 598 | ctx->cdata.keylen, ctx->dir); |
605 | 599 | ||
606 | ret = rfc4543_set_sh_desc(aead); | 600 | ret = rfc4543_set_sh_desc(aead); |
607 | if (ret) | 601 | if (ret) |
@@ -644,10 +638,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, | |||
644 | const bool is_rfc3686 = alg->caam.rfc3686; | 638 | const bool is_rfc3686 = alg->caam.rfc3686; |
645 | int ret = 0; | 639 | int ret = 0; |
646 | 640 | ||
647 | #ifdef DEBUG | 641 | print_hex_dump_debug("key in @" __stringify(__LINE__)": ", |
648 | print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", | 642 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); |
649 | DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); | 643 | |
650 | #endif | ||
651 | /* | 644 | /* |
652 | * AES-CTR needs to load IV in CONTEXT1 reg | 645 | * AES-CTR needs to load IV in CONTEXT1 reg |
653 | * at an offset of 128bits (16bytes) | 646 | * at an offset of 128bits (16bytes) |
@@ -838,7 +831,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, | |||
838 | static void caam_unmap(struct device *dev, struct scatterlist *src, | 831 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
839 | struct scatterlist *dst, int src_nents, | 832 | struct scatterlist *dst, int src_nents, |
840 | int dst_nents, dma_addr_t iv_dma, int ivsize, | 833 | int dst_nents, dma_addr_t iv_dma, int ivsize, |
841 | dma_addr_t qm_sg_dma, int qm_sg_bytes) | 834 | enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, |
835 | int qm_sg_bytes) | ||
842 | { | 836 | { |
843 | if (dst != src) { | 837 | if (dst != src) { |
844 | if (src_nents) | 838 | if (src_nents) |
@@ -850,7 +844,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, | |||
850 | } | 844 | } |
851 | 845 | ||
852 | if (iv_dma) | 846 | if (iv_dma) |
853 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 847 | dma_unmap_single(dev, iv_dma, ivsize, iv_dir); |
854 | if (qm_sg_bytes) | 848 | if (qm_sg_bytes) |
855 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); | 849 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); |
856 | } | 850 | } |
@@ -863,7 +857,8 @@ static void aead_unmap(struct device *dev, | |||
863 | int ivsize = crypto_aead_ivsize(aead); | 857 | int ivsize = crypto_aead_ivsize(aead); |
864 | 858 | ||
865 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, | 859 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
866 | edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); | 860 | edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, |
861 | edesc->qm_sg_bytes); | ||
867 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); | 862 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
868 | } | 863 | } |
869 | 864 | ||
@@ -874,7 +869,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, | |||
874 | int ivsize = crypto_skcipher_ivsize(skcipher); | 869 | int ivsize = crypto_skcipher_ivsize(skcipher); |
875 | 870 | ||
876 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, | 871 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
877 | edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); | 872 | edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, |
873 | edesc->qm_sg_bytes); | ||
878 | } | 874 | } |
879 | 875 | ||
880 | static void aead_done(struct caam_drv_req *drv_req, u32 status) | 876 | static void aead_done(struct caam_drv_req *drv_req, u32 status) |
@@ -924,6 +920,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
924 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 920 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
925 | GFP_KERNEL : GFP_ATOMIC; | 921 | GFP_KERNEL : GFP_ATOMIC; |
926 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 922 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
923 | int src_len, dst_len = 0; | ||
927 | struct aead_edesc *edesc; | 924 | struct aead_edesc *edesc; |
928 | dma_addr_t qm_sg_dma, iv_dma = 0; | 925 | dma_addr_t qm_sg_dma, iv_dma = 0; |
929 | int ivsize = 0; | 926 | int ivsize = 0; |
@@ -945,13 +942,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
945 | } | 942 | } |
946 | 943 | ||
947 | if (likely(req->src == req->dst)) { | 944 | if (likely(req->src == req->dst)) { |
948 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 945 | src_len = req->assoclen + req->cryptlen + |
949 | req->cryptlen + | 946 | (encrypt ? authsize : 0); |
950 | (encrypt ? authsize : 0)); | 947 | |
948 | src_nents = sg_nents_for_len(req->src, src_len); | ||
951 | if (unlikely(src_nents < 0)) { | 949 | if (unlikely(src_nents < 0)) { |
952 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", | 950 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", |
953 | req->assoclen + req->cryptlen + | 951 | src_len); |
954 | (encrypt ? authsize : 0)); | ||
955 | qi_cache_free(edesc); | 952 | qi_cache_free(edesc); |
956 | return ERR_PTR(src_nents); | 953 | return ERR_PTR(src_nents); |
957 | } | 954 | } |
@@ -964,23 +961,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
964 | return ERR_PTR(-ENOMEM); | 961 | return ERR_PTR(-ENOMEM); |
965 | } | 962 | } |
966 | } else { | 963 | } else { |
967 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 964 | src_len = req->assoclen + req->cryptlen; |
968 | req->cryptlen); | 965 | dst_len = src_len + (encrypt ? authsize : (-authsize)); |
966 | |||
967 | src_nents = sg_nents_for_len(req->src, src_len); | ||
969 | if (unlikely(src_nents < 0)) { | 968 | if (unlikely(src_nents < 0)) { |
970 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", | 969 | dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", |
971 | req->assoclen + req->cryptlen); | 970 | src_len); |
972 | qi_cache_free(edesc); | 971 | qi_cache_free(edesc); |
973 | return ERR_PTR(src_nents); | 972 | return ERR_PTR(src_nents); |
974 | } | 973 | } |
975 | 974 | ||
976 | dst_nents = sg_nents_for_len(req->dst, req->assoclen + | 975 | dst_nents = sg_nents_for_len(req->dst, dst_len); |
977 | req->cryptlen + | ||
978 | (encrypt ? authsize : | ||
979 | (-authsize))); | ||
980 | if (unlikely(dst_nents < 0)) { | 976 | if (unlikely(dst_nents < 0)) { |
981 | dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", | 977 | dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", |
982 | req->assoclen + req->cryptlen + | 978 | dst_len); |
983 | (encrypt ? authsize : (-authsize))); | ||
984 | qi_cache_free(edesc); | 979 | qi_cache_free(edesc); |
985 | return ERR_PTR(dst_nents); | 980 | return ERR_PTR(dst_nents); |
986 | } | 981 | } |
@@ -1019,9 +1014,24 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1019 | /* | 1014 | /* |
1020 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. | 1015 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. |
1021 | * Input is not contiguous. | 1016 | * Input is not contiguous. |
1017 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
1018 | * the end of the table by allocating more S/G entries. Logic: | ||
1019 | * if (src != dst && output S/G) | ||
1020 | * pad output S/G, if needed | ||
1021 | * else if (src == dst && S/G) | ||
1022 | * overlapping S/Gs; pad one of them | ||
1023 | * else if (input S/G) ... | ||
1024 | * pad input S/G, if needed | ||
1022 | */ | 1025 | */ |
1023 | qm_sg_ents = 1 + !!ivsize + mapped_src_nents + | 1026 | qm_sg_ents = 1 + !!ivsize + mapped_src_nents; |
1024 | (mapped_dst_nents > 1 ? mapped_dst_nents : 0); | 1027 | if (mapped_dst_nents > 1) |
1028 | qm_sg_ents += pad_sg_nents(mapped_dst_nents); | ||
1029 | else if ((req->src == req->dst) && (mapped_src_nents > 1)) | ||
1030 | qm_sg_ents = max(pad_sg_nents(qm_sg_ents), | ||
1031 | 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); | ||
1032 | else | ||
1033 | qm_sg_ents = pad_sg_nents(qm_sg_ents); | ||
1034 | |||
1025 | sg_table = &edesc->sgt[0]; | 1035 | sg_table = &edesc->sgt[0]; |
1026 | qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); | 1036 | qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); |
1027 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > | 1037 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > |
@@ -1029,7 +1039,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1029 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", | 1039 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", |
1030 | qm_sg_ents, ivsize); | 1040 | qm_sg_ents, ivsize); |
1031 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, | 1041 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, |
1032 | 0, 0, 0); | 1042 | 0, DMA_NONE, 0, 0); |
1033 | qi_cache_free(edesc); | 1043 | qi_cache_free(edesc); |
1034 | return ERR_PTR(-ENOMEM); | 1044 | return ERR_PTR(-ENOMEM); |
1035 | } | 1045 | } |
@@ -1044,7 +1054,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1044 | if (dma_mapping_error(qidev, iv_dma)) { | 1054 | if (dma_mapping_error(qidev, iv_dma)) { |
1045 | dev_err(qidev, "unable to map IV\n"); | 1055 | dev_err(qidev, "unable to map IV\n"); |
1046 | caam_unmap(qidev, req->src, req->dst, src_nents, | 1056 | caam_unmap(qidev, req->src, req->dst, src_nents, |
1047 | dst_nents, 0, 0, 0, 0); | 1057 | dst_nents, 0, 0, DMA_NONE, 0, 0); |
1048 | qi_cache_free(edesc); | 1058 | qi_cache_free(edesc); |
1049 | return ERR_PTR(-ENOMEM); | 1059 | return ERR_PTR(-ENOMEM); |
1050 | } | 1060 | } |
@@ -1063,7 +1073,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1063 | if (dma_mapping_error(qidev, edesc->assoclen_dma)) { | 1073 | if (dma_mapping_error(qidev, edesc->assoclen_dma)) { |
1064 | dev_err(qidev, "unable to map assoclen\n"); | 1074 | dev_err(qidev, "unable to map assoclen\n"); |
1065 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, | 1075 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, |
1066 | iv_dma, ivsize, 0, 0); | 1076 | iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); |
1067 | qi_cache_free(edesc); | 1077 | qi_cache_free(edesc); |
1068 | return ERR_PTR(-ENOMEM); | 1078 | return ERR_PTR(-ENOMEM); |
1069 | } | 1079 | } |
@@ -1074,19 +1084,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1074 | dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); | 1084 | dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); |
1075 | qm_sg_index++; | 1085 | qm_sg_index++; |
1076 | } | 1086 | } |
1077 | sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); | 1087 | sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); |
1078 | qm_sg_index += mapped_src_nents; | 1088 | qm_sg_index += mapped_src_nents; |
1079 | 1089 | ||
1080 | if (mapped_dst_nents > 1) | 1090 | if (mapped_dst_nents > 1) |
1081 | sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + | 1091 | sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); |
1082 | qm_sg_index, 0); | ||
1083 | 1092 | ||
1084 | qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); | 1093 | qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |
1085 | if (dma_mapping_error(qidev, qm_sg_dma)) { | 1094 | if (dma_mapping_error(qidev, qm_sg_dma)) { |
1086 | dev_err(qidev, "unable to map S/G table\n"); | 1095 | dev_err(qidev, "unable to map S/G table\n"); |
1087 | dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); | 1096 | dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
1088 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, | 1097 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, |
1089 | iv_dma, ivsize, 0, 0); | 1098 | iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); |
1090 | qi_cache_free(edesc); | 1099 | qi_cache_free(edesc); |
1091 | return ERR_PTR(-ENOMEM); | 1100 | return ERR_PTR(-ENOMEM); |
1092 | } | 1101 | } |
@@ -1109,7 +1118,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1109 | dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + | 1118 | dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + |
1110 | (1 + !!ivsize) * sizeof(*sg_table), | 1119 | (1 + !!ivsize) * sizeof(*sg_table), |
1111 | out_len, 0); | 1120 | out_len, 0); |
1112 | } else if (mapped_dst_nents == 1) { | 1121 | } else if (mapped_dst_nents <= 1) { |
1113 | dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, | 1122 | dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, |
1114 | 0); | 1123 | 0); |
1115 | } else { | 1124 | } else { |
@@ -1182,33 +1191,28 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status) | |||
1182 | struct device *qidev = caam_ctx->qidev; | 1191 | struct device *qidev = caam_ctx->qidev; |
1183 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1192 | int ivsize = crypto_skcipher_ivsize(skcipher); |
1184 | 1193 | ||
1185 | #ifdef DEBUG | 1194 | dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); |
1186 | dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); | ||
1187 | #endif | ||
1188 | 1195 | ||
1189 | edesc = container_of(drv_req, typeof(*edesc), drv_req); | 1196 | edesc = container_of(drv_req, typeof(*edesc), drv_req); |
1190 | 1197 | ||
1191 | if (status) | 1198 | if (status) |
1192 | caam_jr_strstatus(qidev, status); | 1199 | caam_jr_strstatus(qidev, status); |
1193 | 1200 | ||
1194 | #ifdef DEBUG | 1201 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", |
1195 | print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", | 1202 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
1196 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1203 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
1197 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 1204 | caam_dump_sg("dst @" __stringify(__LINE__)": ", |
1198 | caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", | ||
1199 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1205 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
1200 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | 1206 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
1201 | #endif | ||
1202 | 1207 | ||
1203 | skcipher_unmap(qidev, edesc, req); | 1208 | skcipher_unmap(qidev, edesc, req); |
1204 | 1209 | ||
1205 | /* | 1210 | /* |
1206 | * The crypto API expects us to set the IV (req->iv) to the last | 1211 | * The crypto API expects us to set the IV (req->iv) to the last |
1207 | * ciphertext block. This is used e.g. by the CTS mode. | 1212 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1213 | * This is used e.g. by the CTS mode. | ||
1208 | */ | 1214 | */ |
1209 | if (edesc->drv_req.drv_ctx->op_type == ENCRYPT) | 1215 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); |
1210 | scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - | ||
1211 | ivsize, ivsize, 0); | ||
1212 | 1216 | ||
1213 | qi_cache_free(edesc); | 1217 | qi_cache_free(edesc); |
1214 | skcipher_request_complete(req, status); | 1218 | skcipher_request_complete(req, status); |
@@ -1276,14 +1280,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
1276 | qm_sg_ents = 1 + mapped_src_nents; | 1280 | qm_sg_ents = 1 + mapped_src_nents; |
1277 | dst_sg_idx = qm_sg_ents; | 1281 | dst_sg_idx = qm_sg_ents; |
1278 | 1282 | ||
1279 | qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; | 1283 | /* |
1284 | * Input, output HW S/G tables: [IV, src][dst, IV] | ||
1285 | * IV entries point to the same buffer | ||
1286 | * If src == dst, S/G entries are reused (S/G tables overlap) | ||
1287 | * | ||
1288 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
1289 | * the end of the table by allocating more S/G entries. | ||
1290 | */ | ||
1291 | if (req->src != req->dst) | ||
1292 | qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); | ||
1293 | else | ||
1294 | qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); | ||
1295 | |||
1280 | qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); | 1296 | qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); |
1281 | if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + | 1297 | if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + |
1282 | ivsize > CAAM_QI_MEMCACHE_SIZE)) { | 1298 | ivsize > CAAM_QI_MEMCACHE_SIZE)) { |
1283 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", | 1299 | dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", |
1284 | qm_sg_ents, ivsize); | 1300 | qm_sg_ents, ivsize); |
1285 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, | 1301 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, |
1286 | 0, 0, 0); | 1302 | 0, DMA_NONE, 0, 0); |
1287 | return ERR_PTR(-ENOMEM); | 1303 | return ERR_PTR(-ENOMEM); |
1288 | } | 1304 | } |
1289 | 1305 | ||
@@ -1292,7 +1308,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
1292 | if (unlikely(!edesc)) { | 1308 | if (unlikely(!edesc)) { |
1293 | dev_err(qidev, "could not allocate extended descriptor\n"); | 1309 | dev_err(qidev, "could not allocate extended descriptor\n"); |
1294 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, | 1310 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, |
1295 | 0, 0, 0); | 1311 | 0, DMA_NONE, 0, 0); |
1296 | return ERR_PTR(-ENOMEM); | 1312 | return ERR_PTR(-ENOMEM); |
1297 | } | 1313 | } |
1298 | 1314 | ||
@@ -1301,11 +1317,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
1301 | iv = (u8 *)(sg_table + qm_sg_ents); | 1317 | iv = (u8 *)(sg_table + qm_sg_ents); |
1302 | memcpy(iv, req->iv, ivsize); | 1318 | memcpy(iv, req->iv, ivsize); |
1303 | 1319 | ||
1304 | iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); | 1320 | iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); |
1305 | if (dma_mapping_error(qidev, iv_dma)) { | 1321 | if (dma_mapping_error(qidev, iv_dma)) { |
1306 | dev_err(qidev, "unable to map IV\n"); | 1322 | dev_err(qidev, "unable to map IV\n"); |
1307 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, | 1323 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, |
1308 | 0, 0, 0); | 1324 | 0, DMA_NONE, 0, 0); |
1309 | qi_cache_free(edesc); | 1325 | qi_cache_free(edesc); |
1310 | return ERR_PTR(-ENOMEM); | 1326 | return ERR_PTR(-ENOMEM); |
1311 | } | 1327 | } |
@@ -1319,18 +1335,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
1319 | edesc->drv_req.drv_ctx = drv_ctx; | 1335 | edesc->drv_req.drv_ctx = drv_ctx; |
1320 | 1336 | ||
1321 | dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); | 1337 | dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); |
1322 | sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); | 1338 | sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); |
1323 | 1339 | ||
1324 | if (mapped_dst_nents > 1) | 1340 | if (req->src != req->dst) |
1325 | sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + | 1341 | sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); |
1326 | dst_sg_idx, 0); | 1342 | |
1343 | dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, | ||
1344 | ivsize, 0); | ||
1327 | 1345 | ||
1328 | edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, | 1346 | edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, |
1329 | DMA_TO_DEVICE); | 1347 | DMA_TO_DEVICE); |
1330 | if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { | 1348 | if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { |
1331 | dev_err(qidev, "unable to map S/G table\n"); | 1349 | dev_err(qidev, "unable to map S/G table\n"); |
1332 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, | 1350 | caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, |
1333 | iv_dma, ivsize, 0, 0); | 1351 | iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); |
1334 | qi_cache_free(edesc); | 1352 | qi_cache_free(edesc); |
1335 | return ERR_PTR(-ENOMEM); | 1353 | return ERR_PTR(-ENOMEM); |
1336 | } | 1354 | } |
@@ -1340,16 +1358,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, | |||
1340 | dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, | 1358 | dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, |
1341 | ivsize + req->cryptlen, 0); | 1359 | ivsize + req->cryptlen, 0); |
1342 | 1360 | ||
1343 | if (req->src == req->dst) { | 1361 | if (req->src == req->dst) |
1344 | dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + | 1362 | dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + |
1345 | sizeof(*sg_table), req->cryptlen, 0); | 1363 | sizeof(*sg_table), req->cryptlen + ivsize, |
1346 | } else if (mapped_dst_nents > 1) { | 1364 | 0); |
1365 | else | ||
1347 | dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * | 1366 | dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * |
1348 | sizeof(*sg_table), req->cryptlen, 0); | 1367 | sizeof(*sg_table), req->cryptlen + ivsize, |
1349 | } else { | 1368 | 0); |
1350 | dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), | ||
1351 | req->cryptlen, 0); | ||
1352 | } | ||
1353 | 1369 | ||
1354 | return edesc; | 1370 | return edesc; |
1355 | } | 1371 | } |
@@ -1359,7 +1375,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) | |||
1359 | struct skcipher_edesc *edesc; | 1375 | struct skcipher_edesc *edesc; |
1360 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1376 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1361 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 1377 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
1362 | int ivsize = crypto_skcipher_ivsize(skcipher); | ||
1363 | int ret; | 1378 | int ret; |
1364 | 1379 | ||
1365 | if (unlikely(caam_congested)) | 1380 | if (unlikely(caam_congested)) |
@@ -1370,14 +1385,6 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) | |||
1370 | if (IS_ERR(edesc)) | 1385 | if (IS_ERR(edesc)) |
1371 | return PTR_ERR(edesc); | 1386 | return PTR_ERR(edesc); |
1372 | 1387 | ||
1373 | /* | ||
1374 | * The crypto API expects us to set the IV (req->iv) to the last | ||
1375 | * ciphertext block. | ||
1376 | */ | ||
1377 | if (!encrypt) | ||
1378 | scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - | ||
1379 | ivsize, ivsize, 0); | ||
1380 | |||
1381 | ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); | 1388 | ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); |
1382 | if (!ret) { | 1389 | if (!ret) { |
1383 | ret = -EINPROGRESS; | 1390 | ret = -EINPROGRESS; |
@@ -2382,6 +2389,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, | |||
2382 | bool uses_dkp) | 2389 | bool uses_dkp) |
2383 | { | 2390 | { |
2384 | struct caam_drv_private *priv; | 2391 | struct caam_drv_private *priv; |
2392 | struct device *dev; | ||
2385 | 2393 | ||
2386 | /* | 2394 | /* |
2387 | * distribute tfms across job rings to ensure in-order | 2395 | * distribute tfms across job rings to ensure in-order |
@@ -2393,16 +2401,17 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, | |||
2393 | return PTR_ERR(ctx->jrdev); | 2401 | return PTR_ERR(ctx->jrdev); |
2394 | } | 2402 | } |
2395 | 2403 | ||
2396 | priv = dev_get_drvdata(ctx->jrdev->parent); | 2404 | dev = ctx->jrdev->parent; |
2405 | priv = dev_get_drvdata(dev); | ||
2397 | if (priv->era >= 6 && uses_dkp) | 2406 | if (priv->era >= 6 && uses_dkp) |
2398 | ctx->dir = DMA_BIDIRECTIONAL; | 2407 | ctx->dir = DMA_BIDIRECTIONAL; |
2399 | else | 2408 | else |
2400 | ctx->dir = DMA_TO_DEVICE; | 2409 | ctx->dir = DMA_TO_DEVICE; |
2401 | 2410 | ||
2402 | ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), | 2411 | ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), |
2403 | ctx->dir); | 2412 | ctx->dir); |
2404 | if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { | 2413 | if (dma_mapping_error(dev, ctx->key_dma)) { |
2405 | dev_err(ctx->jrdev, "unable to map key\n"); | 2414 | dev_err(dev, "unable to map key\n"); |
2406 | caam_jr_free(ctx->jrdev); | 2415 | caam_jr_free(ctx->jrdev); |
2407 | return -ENOMEM; | 2416 | return -ENOMEM; |
2408 | } | 2417 | } |
@@ -2411,7 +2420,7 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, | |||
2411 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; | 2420 | ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; |
2412 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; | 2421 | ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; |
2413 | 2422 | ||
2414 | ctx->qidev = priv->qidev; | 2423 | ctx->qidev = dev; |
2415 | 2424 | ||
2416 | spin_lock_init(&ctx->lock); | 2425 | spin_lock_init(&ctx->lock); |
2417 | ctx->drv_ctx[ENCRYPT] = NULL; | 2426 | ctx->drv_ctx[ENCRYPT] = NULL; |
@@ -2445,7 +2454,8 @@ static void caam_exit_common(struct caam_ctx *ctx) | |||
2445 | caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); | 2454 | caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); |
2446 | caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); | 2455 | caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); |
2447 | 2456 | ||
2448 | dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); | 2457 | dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), |
2458 | ctx->dir); | ||
2449 | 2459 | ||
2450 | caam_jr_free(ctx->jrdev); | 2460 | caam_jr_free(ctx->jrdev); |
2451 | } | 2461 | } |
@@ -2460,7 +2470,7 @@ static void caam_aead_exit(struct crypto_aead *tfm) | |||
2460 | caam_exit_common(crypto_aead_ctx(tfm)); | 2470 | caam_exit_common(crypto_aead_ctx(tfm)); |
2461 | } | 2471 | } |
2462 | 2472 | ||
2463 | static void __exit caam_qi_algapi_exit(void) | 2473 | void caam_qi_algapi_exit(void) |
2464 | { | 2474 | { |
2465 | int i; | 2475 | int i; |
2466 | 2476 | ||
@@ -2505,45 +2515,17 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg) | |||
2505 | alg->exit = caam_aead_exit; | 2515 | alg->exit = caam_aead_exit; |
2506 | } | 2516 | } |
2507 | 2517 | ||
2508 | static int __init caam_qi_algapi_init(void) | 2518 | int caam_qi_algapi_init(struct device *ctrldev) |
2509 | { | 2519 | { |
2510 | struct device_node *dev_node; | 2520 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
2511 | struct platform_device *pdev; | ||
2512 | struct device *ctrldev; | ||
2513 | struct caam_drv_private *priv; | ||
2514 | int i = 0, err = 0; | 2521 | int i = 0, err = 0; |
2515 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; | 2522 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; |
2516 | unsigned int md_limit = SHA512_DIGEST_SIZE; | 2523 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
2517 | bool registered = false; | 2524 | bool registered = false; |
2518 | 2525 | ||
2519 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
2520 | if (!dev_node) { | ||
2521 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
2522 | if (!dev_node) | ||
2523 | return -ENODEV; | ||
2524 | } | ||
2525 | |||
2526 | pdev = of_find_device_by_node(dev_node); | ||
2527 | of_node_put(dev_node); | ||
2528 | if (!pdev) | ||
2529 | return -ENODEV; | ||
2530 | |||
2531 | ctrldev = &pdev->dev; | ||
2532 | priv = dev_get_drvdata(ctrldev); | ||
2533 | |||
2534 | /* | ||
2535 | * If priv is NULL, it's probably because the caam driver wasn't | ||
2536 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
2537 | */ | ||
2538 | if (!priv || !priv->qi_present) { | ||
2539 | err = -ENODEV; | ||
2540 | goto out_put_dev; | ||
2541 | } | ||
2542 | |||
2543 | if (caam_dpaa2) { | 2526 | if (caam_dpaa2) { |
2544 | dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); | 2527 | dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); |
2545 | err = -ENODEV; | 2528 | return -ENODEV; |
2546 | goto out_put_dev; | ||
2547 | } | 2529 | } |
2548 | 2530 | ||
2549 | /* | 2531 | /* |
@@ -2598,7 +2580,7 @@ static int __init caam_qi_algapi_init(void) | |||
2598 | 2580 | ||
2599 | err = crypto_register_skcipher(&t_alg->skcipher); | 2581 | err = crypto_register_skcipher(&t_alg->skcipher); |
2600 | if (err) { | 2582 | if (err) { |
2601 | dev_warn(priv->qidev, "%s alg registration failed\n", | 2583 | dev_warn(ctrldev, "%s alg registration failed\n", |
2602 | t_alg->skcipher.base.cra_driver_name); | 2584 | t_alg->skcipher.base.cra_driver_name); |
2603 | continue; | 2585 | continue; |
2604 | } | 2586 | } |
@@ -2654,16 +2636,7 @@ static int __init caam_qi_algapi_init(void) | |||
2654 | } | 2636 | } |
2655 | 2637 | ||
2656 | if (registered) | 2638 | if (registered) |
2657 | dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); | 2639 | dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); |
2658 | 2640 | ||
2659 | out_put_dev: | ||
2660 | put_device(ctrldev); | ||
2661 | return err; | 2641 | return err; |
2662 | } | 2642 | } |
2663 | |||
2664 | module_init(caam_qi_algapi_init); | ||
2665 | module_exit(caam_qi_algapi_exit); | ||
2666 | |||
2667 | MODULE_LICENSE("GPL"); | ||
2668 | MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend"); | ||
2669 | MODULE_AUTHOR("Freescale Semiconductor"); | ||
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 2b2980a8a9b9..06bf32c32cbd 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c | |||
@@ -1,7 +1,7 @@ | |||
1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) | 1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* | 2 | /* |
3 | * Copyright 2015-2016 Freescale Semiconductor Inc. | 3 | * Copyright 2015-2016 Freescale Semiconductor Inc. |
4 | * Copyright 2017-2018 NXP | 4 | * Copyright 2017-2019 NXP |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "compat.h" | 7 | #include "compat.h" |
@@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq) | |||
140 | static void caam_unmap(struct device *dev, struct scatterlist *src, | 140 | static void caam_unmap(struct device *dev, struct scatterlist *src, |
141 | struct scatterlist *dst, int src_nents, | 141 | struct scatterlist *dst, int src_nents, |
142 | int dst_nents, dma_addr_t iv_dma, int ivsize, | 142 | int dst_nents, dma_addr_t iv_dma, int ivsize, |
143 | dma_addr_t qm_sg_dma, int qm_sg_bytes) | 143 | enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, |
144 | int qm_sg_bytes) | ||
144 | { | 145 | { |
145 | if (dst != src) { | 146 | if (dst != src) { |
146 | if (src_nents) | 147 | if (src_nents) |
@@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, | |||
152 | } | 153 | } |
153 | 154 | ||
154 | if (iv_dma) | 155 | if (iv_dma) |
155 | dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); | 156 | dma_unmap_single(dev, iv_dma, ivsize, iv_dir); |
156 | 157 | ||
157 | if (qm_sg_bytes) | 158 | if (qm_sg_bytes) |
158 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); | 159 | dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); |
@@ -371,6 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
371 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 372 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
372 | GFP_KERNEL : GFP_ATOMIC; | 373 | GFP_KERNEL : GFP_ATOMIC; |
373 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 374 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
375 | int src_len, dst_len = 0; | ||
374 | struct aead_edesc *edesc; | 376 | struct aead_edesc *edesc; |
375 | dma_addr_t qm_sg_dma, iv_dma = 0; | 377 | dma_addr_t qm_sg_dma, iv_dma = 0; |
376 | int ivsize = 0; | 378 | int ivsize = 0; |
@@ -387,23 +389,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
387 | } | 389 | } |
388 | 390 | ||
389 | if (unlikely(req->dst != req->src)) { | 391 | if (unlikely(req->dst != req->src)) { |
390 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 392 | src_len = req->assoclen + req->cryptlen; |
391 | req->cryptlen); | 393 | dst_len = src_len + (encrypt ? authsize : (-authsize)); |
394 | |||
395 | src_nents = sg_nents_for_len(req->src, src_len); | ||
392 | if (unlikely(src_nents < 0)) { | 396 | if (unlikely(src_nents < 0)) { |
393 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n", | 397 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n", |
394 | req->assoclen + req->cryptlen); | 398 | src_len); |
395 | qi_cache_free(edesc); | 399 | qi_cache_free(edesc); |
396 | return ERR_PTR(src_nents); | 400 | return ERR_PTR(src_nents); |
397 | } | 401 | } |
398 | 402 | ||
399 | dst_nents = sg_nents_for_len(req->dst, req->assoclen + | 403 | dst_nents = sg_nents_for_len(req->dst, dst_len); |
400 | req->cryptlen + | ||
401 | (encrypt ? authsize : | ||
402 | (-authsize))); | ||
403 | if (unlikely(dst_nents < 0)) { | 404 | if (unlikely(dst_nents < 0)) { |
404 | dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", | 405 | dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", |
405 | req->assoclen + req->cryptlen + | 406 | dst_len); |
406 | (encrypt ? authsize : (-authsize))); | ||
407 | qi_cache_free(edesc); | 407 | qi_cache_free(edesc); |
408 | return ERR_PTR(dst_nents); | 408 | return ERR_PTR(dst_nents); |
409 | } | 409 | } |
@@ -434,13 +434,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
434 | mapped_dst_nents = 0; | 434 | mapped_dst_nents = 0; |
435 | } | 435 | } |
436 | } else { | 436 | } else { |
437 | src_nents = sg_nents_for_len(req->src, req->assoclen + | 437 | src_len = req->assoclen + req->cryptlen + |
438 | req->cryptlen + | 438 | (encrypt ? authsize : 0); |
439 | (encrypt ? authsize : 0)); | 439 | |
440 | src_nents = sg_nents_for_len(req->src, src_len); | ||
440 | if (unlikely(src_nents < 0)) { | 441 | if (unlikely(src_nents < 0)) { |
441 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n", | 442 | dev_err(dev, "Insufficient bytes (%d) in src S/G\n", |
442 | req->assoclen + req->cryptlen + | 443 | src_len); |
443 | (encrypt ? authsize : 0)); | ||
444 | qi_cache_free(edesc); | 444 | qi_cache_free(edesc); |
445 | return ERR_PTR(src_nents); | 445 | return ERR_PTR(src_nents); |
446 | } | 446 | } |
@@ -460,9 +460,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
460 | /* | 460 | /* |
461 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. | 461 | * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. |
462 | * Input is not contiguous. | 462 | * Input is not contiguous. |
463 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
464 | * the end of the table by allocating more S/G entries. Logic: | ||
465 | * if (src != dst && output S/G) | ||
466 | * pad output S/G, if needed | ||
467 | * else if (src == dst && S/G) | ||
468 | * overlapping S/Gs; pad one of them | ||
469 | * else if (input S/G) ... | ||
470 | * pad input S/G, if needed | ||
463 | */ | 471 | */ |
464 | qm_sg_nents = 1 + !!ivsize + mapped_src_nents + | 472 | qm_sg_nents = 1 + !!ivsize + mapped_src_nents; |
465 | (mapped_dst_nents > 1 ? mapped_dst_nents : 0); | 473 | if (mapped_dst_nents > 1) |
474 | qm_sg_nents += pad_sg_nents(mapped_dst_nents); | ||
475 | else if ((req->src == req->dst) && (mapped_src_nents > 1)) | ||
476 | qm_sg_nents = max(pad_sg_nents(qm_sg_nents), | ||
477 | 1 + !!ivsize + | ||
478 | pad_sg_nents(mapped_src_nents)); | ||
479 | else | ||
480 | qm_sg_nents = pad_sg_nents(qm_sg_nents); | ||
481 | |||
466 | sg_table = &edesc->sgt[0]; | 482 | sg_table = &edesc->sgt[0]; |
467 | qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); | 483 | qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); |
468 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > | 484 | if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > |
@@ -470,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
470 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", | 486 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", |
471 | qm_sg_nents, ivsize); | 487 | qm_sg_nents, ivsize); |
472 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, | 488 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
473 | 0, 0, 0); | 489 | 0, DMA_NONE, 0, 0); |
474 | qi_cache_free(edesc); | 490 | qi_cache_free(edesc); |
475 | return ERR_PTR(-ENOMEM); | 491 | return ERR_PTR(-ENOMEM); |
476 | } | 492 | } |
@@ -485,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
485 | if (dma_mapping_error(dev, iv_dma)) { | 501 | if (dma_mapping_error(dev, iv_dma)) { |
486 | dev_err(dev, "unable to map IV\n"); | 502 | dev_err(dev, "unable to map IV\n"); |
487 | caam_unmap(dev, req->src, req->dst, src_nents, | 503 | caam_unmap(dev, req->src, req->dst, src_nents, |
488 | dst_nents, 0, 0, 0, 0); | 504 | dst_nents, 0, 0, DMA_NONE, 0, 0); |
489 | qi_cache_free(edesc); | 505 | qi_cache_free(edesc); |
490 | return ERR_PTR(-ENOMEM); | 506 | return ERR_PTR(-ENOMEM); |
491 | } | 507 | } |
@@ -509,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
509 | if (dma_mapping_error(dev, edesc->assoclen_dma)) { | 525 | if (dma_mapping_error(dev, edesc->assoclen_dma)) { |
510 | dev_err(dev, "unable to map assoclen\n"); | 526 | dev_err(dev, "unable to map assoclen\n"); |
511 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, | 527 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, |
512 | iv_dma, ivsize, 0, 0); | 528 | iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); |
513 | qi_cache_free(edesc); | 529 | qi_cache_free(edesc); |
514 | return ERR_PTR(-ENOMEM); | 530 | return ERR_PTR(-ENOMEM); |
515 | } | 531 | } |
@@ -520,19 +536,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
520 | dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); | 536 | dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); |
521 | qm_sg_index++; | 537 | qm_sg_index++; |
522 | } | 538 | } |
523 | sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); | 539 | sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); |
524 | qm_sg_index += mapped_src_nents; | 540 | qm_sg_index += mapped_src_nents; |
525 | 541 | ||
526 | if (mapped_dst_nents > 1) | 542 | if (mapped_dst_nents > 1) |
527 | sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + | 543 | sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); |
528 | qm_sg_index, 0); | ||
529 | 544 | ||
530 | qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); | 545 | qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |
531 | if (dma_mapping_error(dev, qm_sg_dma)) { | 546 | if (dma_mapping_error(dev, qm_sg_dma)) { |
532 | dev_err(dev, "unable to map S/G table\n"); | 547 | dev_err(dev, "unable to map S/G table\n"); |
533 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); | 548 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
534 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, | 549 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, |
535 | iv_dma, ivsize, 0, 0); | 550 | iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); |
536 | qi_cache_free(edesc); | 551 | qi_cache_free(edesc); |
537 | return ERR_PTR(-ENOMEM); | 552 | return ERR_PTR(-ENOMEM); |
538 | } | 553 | } |
@@ -559,6 +574,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
559 | dpaa2_fl_set_addr(out_fle, qm_sg_dma + | 574 | dpaa2_fl_set_addr(out_fle, qm_sg_dma + |
560 | (1 + !!ivsize) * sizeof(*sg_table)); | 575 | (1 + !!ivsize) * sizeof(*sg_table)); |
561 | } | 576 | } |
577 | } else if (!mapped_dst_nents) { | ||
578 | /* | ||
579 | * crypto engine requires the output entry to be present when | ||
580 | * "frame list" FD is used. | ||
581 | * Since engine does not support FMT=2'b11 (unused entry type), | ||
582 | * leaving out_fle zeroized is the best option. | ||
583 | */ | ||
584 | goto skip_out_fle; | ||
562 | } else if (mapped_dst_nents == 1) { | 585 | } else if (mapped_dst_nents == 1) { |
563 | dpaa2_fl_set_format(out_fle, dpaa2_fl_single); | 586 | dpaa2_fl_set_format(out_fle, dpaa2_fl_single); |
564 | dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); | 587 | dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); |
@@ -570,6 +593,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
570 | 593 | ||
571 | dpaa2_fl_set_len(out_fle, out_len); | 594 | dpaa2_fl_set_len(out_fle, out_len); |
572 | 595 | ||
596 | skip_out_fle: | ||
573 | return edesc; | 597 | return edesc; |
574 | } | 598 | } |
575 | 599 | ||
@@ -1077,14 +1101,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
1077 | qm_sg_ents = 1 + mapped_src_nents; | 1101 | qm_sg_ents = 1 + mapped_src_nents; |
1078 | dst_sg_idx = qm_sg_ents; | 1102 | dst_sg_idx = qm_sg_ents; |
1079 | 1103 | ||
1080 | qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; | 1104 | /* |
1105 | * Input, output HW S/G tables: [IV, src][dst, IV] | ||
1106 | * IV entries point to the same buffer | ||
1107 | * If src == dst, S/G entries are reused (S/G tables overlap) | ||
1108 | * | ||
1109 | * HW reads 4 S/G entries at a time; make sure the reads don't go beyond | ||
1110 | * the end of the table by allocating more S/G entries. | ||
1111 | */ | ||
1112 | if (req->src != req->dst) | ||
1113 | qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); | ||
1114 | else | ||
1115 | qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); | ||
1116 | |||
1081 | qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); | 1117 | qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); |
1082 | if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + | 1118 | if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + |
1083 | ivsize > CAAM_QI_MEMCACHE_SIZE)) { | 1119 | ivsize > CAAM_QI_MEMCACHE_SIZE)) { |
1084 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", | 1120 | dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", |
1085 | qm_sg_ents, ivsize); | 1121 | qm_sg_ents, ivsize); |
1086 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, | 1122 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
1087 | 0, 0, 0); | 1123 | 0, DMA_NONE, 0, 0); |
1088 | return ERR_PTR(-ENOMEM); | 1124 | return ERR_PTR(-ENOMEM); |
1089 | } | 1125 | } |
1090 | 1126 | ||
@@ -1093,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
1093 | if (unlikely(!edesc)) { | 1129 | if (unlikely(!edesc)) { |
1094 | dev_err(dev, "could not allocate extended descriptor\n"); | 1130 | dev_err(dev, "could not allocate extended descriptor\n"); |
1095 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, | 1131 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
1096 | 0, 0, 0); | 1132 | 0, DMA_NONE, 0, 0); |
1097 | return ERR_PTR(-ENOMEM); | 1133 | return ERR_PTR(-ENOMEM); |
1098 | } | 1134 | } |
1099 | 1135 | ||
@@ -1102,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
1102 | iv = (u8 *)(sg_table + qm_sg_ents); | 1138 | iv = (u8 *)(sg_table + qm_sg_ents); |
1103 | memcpy(iv, req->iv, ivsize); | 1139 | memcpy(iv, req->iv, ivsize); |
1104 | 1140 | ||
1105 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); | 1141 | iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL); |
1106 | if (dma_mapping_error(dev, iv_dma)) { | 1142 | if (dma_mapping_error(dev, iv_dma)) { |
1107 | dev_err(dev, "unable to map IV\n"); | 1143 | dev_err(dev, "unable to map IV\n"); |
1108 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, | 1144 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, |
1109 | 0, 0, 0); | 1145 | 0, DMA_NONE, 0, 0); |
1110 | qi_cache_free(edesc); | 1146 | qi_cache_free(edesc); |
1111 | return ERR_PTR(-ENOMEM); | 1147 | return ERR_PTR(-ENOMEM); |
1112 | } | 1148 | } |
@@ -1117,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
1117 | edesc->qm_sg_bytes = qm_sg_bytes; | 1153 | edesc->qm_sg_bytes = qm_sg_bytes; |
1118 | 1154 | ||
1119 | dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); | 1155 | dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); |
1120 | sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); | 1156 | sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); |
1121 | 1157 | ||
1122 | if (mapped_dst_nents > 1) | 1158 | if (req->src != req->dst) |
1123 | sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + | 1159 | sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); |
1124 | dst_sg_idx, 0); | 1160 | |
1161 | dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, | ||
1162 | ivsize, 0); | ||
1125 | 1163 | ||
1126 | edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, | 1164 | edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, |
1127 | DMA_TO_DEVICE); | 1165 | DMA_TO_DEVICE); |
1128 | if (dma_mapping_error(dev, edesc->qm_sg_dma)) { | 1166 | if (dma_mapping_error(dev, edesc->qm_sg_dma)) { |
1129 | dev_err(dev, "unable to map S/G table\n"); | 1167 | dev_err(dev, "unable to map S/G table\n"); |
1130 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, | 1168 | caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, |
1131 | iv_dma, ivsize, 0, 0); | 1169 | iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); |
1132 | qi_cache_free(edesc); | 1170 | qi_cache_free(edesc); |
1133 | return ERR_PTR(-ENOMEM); | 1171 | return ERR_PTR(-ENOMEM); |
1134 | } | 1172 | } |
@@ -1136,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) | |||
1136 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); | 1174 | memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); |
1137 | dpaa2_fl_set_final(in_fle, true); | 1175 | dpaa2_fl_set_final(in_fle, true); |
1138 | dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); | 1176 | dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); |
1139 | dpaa2_fl_set_len(out_fle, req->cryptlen); | 1177 | dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize); |
1140 | 1178 | ||
1141 | dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); | 1179 | dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); |
1142 | dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); | 1180 | dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); |
1143 | 1181 | ||
1144 | if (req->src == req->dst) { | 1182 | dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); |
1145 | dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); | 1183 | |
1184 | if (req->src == req->dst) | ||
1146 | dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + | 1185 | dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + |
1147 | sizeof(*sg_table)); | 1186 | sizeof(*sg_table)); |
1148 | } else if (mapped_dst_nents > 1) { | 1187 | else |
1149 | dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); | ||
1150 | dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * | 1188 | dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * |
1151 | sizeof(*sg_table)); | 1189 | sizeof(*sg_table)); |
1152 | } else { | ||
1153 | dpaa2_fl_set_format(out_fle, dpaa2_fl_single); | ||
1154 | dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); | ||
1155 | } | ||
1156 | 1190 | ||
1157 | return edesc; | 1191 | return edesc; |
1158 | } | 1192 | } |
@@ -1164,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc, | |||
1164 | int ivsize = crypto_aead_ivsize(aead); | 1198 | int ivsize = crypto_aead_ivsize(aead); |
1165 | 1199 | ||
1166 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, | 1200 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
1167 | edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); | 1201 | edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, |
1202 | edesc->qm_sg_bytes); | ||
1168 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); | 1203 | dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); |
1169 | } | 1204 | } |
1170 | 1205 | ||
@@ -1175,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, | |||
1175 | int ivsize = crypto_skcipher_ivsize(skcipher); | 1210 | int ivsize = crypto_skcipher_ivsize(skcipher); |
1176 | 1211 | ||
1177 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, | 1212 | caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, |
1178 | edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); | 1213 | edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, |
1214 | edesc->qm_sg_bytes); | ||
1179 | } | 1215 | } |
1180 | 1216 | ||
1181 | static void aead_encrypt_done(void *cbk_ctx, u32 status) | 1217 | static void aead_encrypt_done(void *cbk_ctx, u32 status) |
@@ -1324,7 +1360,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) | |||
1324 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", | 1360 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", |
1325 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1361 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
1326 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 1362 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
1327 | caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", | 1363 | caam_dump_sg("dst @" __stringify(__LINE__)": ", |
1328 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1364 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
1329 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | 1365 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
1330 | 1366 | ||
@@ -1332,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) | |||
1332 | 1368 | ||
1333 | /* | 1369 | /* |
1334 | * The crypto API expects us to set the IV (req->iv) to the last | 1370 | * The crypto API expects us to set the IV (req->iv) to the last |
1335 | * ciphertext block. This is used e.g. by the CTS mode. | 1371 | * ciphertext block (CBC mode) or last counter (CTR mode). |
1372 | * This is used e.g. by the CTS mode. | ||
1336 | */ | 1373 | */ |
1337 | scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, | 1374 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); |
1338 | ivsize, 0); | ||
1339 | 1375 | ||
1340 | qi_cache_free(edesc); | 1376 | qi_cache_free(edesc); |
1341 | skcipher_request_complete(req, ecode); | 1377 | skcipher_request_complete(req, ecode); |
@@ -1362,11 +1398,19 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) | |||
1362 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", | 1398 | print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", |
1363 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, | 1399 | DUMP_PREFIX_ADDRESS, 16, 4, req->iv, |
1364 | edesc->src_nents > 1 ? 100 : ivsize, 1); | 1400 | edesc->src_nents > 1 ? 100 : ivsize, 1); |
1365 | caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", | 1401 | caam_dump_sg("dst @" __stringify(__LINE__)": ", |
1366 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, | 1402 | DUMP_PREFIX_ADDRESS, 16, 4, req->dst, |
1367 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); | 1403 | edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); |
1368 | 1404 | ||
1369 | skcipher_unmap(ctx->dev, edesc, req); | 1405 | skcipher_unmap(ctx->dev, edesc, req); |
1406 | |||
1407 | /* | ||
1408 | * The crypto API expects us to set the IV (req->iv) to the last | ||
1409 | * ciphertext block (CBC mode) or last counter (CTR mode). | ||
1410 | * This is used e.g. by the CTS mode. | ||
1411 | */ | ||
1412 | memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); | ||
1413 | |||
1370 | qi_cache_free(edesc); | 1414 | qi_cache_free(edesc); |
1371 | skcipher_request_complete(req, ecode); | 1415 | skcipher_request_complete(req, ecode); |
1372 | } | 1416 | } |
@@ -1405,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
1405 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); | 1449 | struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); |
1406 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); | 1450 | struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); |
1407 | struct caam_request *caam_req = skcipher_request_ctx(req); | 1451 | struct caam_request *caam_req = skcipher_request_ctx(req); |
1408 | int ivsize = crypto_skcipher_ivsize(skcipher); | ||
1409 | int ret; | 1452 | int ret; |
1410 | 1453 | ||
1411 | /* allocate extended descriptor */ | 1454 | /* allocate extended descriptor */ |
@@ -1413,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req) | |||
1413 | if (IS_ERR(edesc)) | 1456 | if (IS_ERR(edesc)) |
1414 | return PTR_ERR(edesc); | 1457 | return PTR_ERR(edesc); |
1415 | 1458 | ||
1416 | /* | ||
1417 | * The crypto API expects us to set the IV (req->iv) to the last | ||
1418 | * ciphertext block. | ||
1419 | */ | ||
1420 | scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, | ||
1421 | ivsize, 0); | ||
1422 | |||
1423 | caam_req->flc = &ctx->flc[DECRYPT]; | 1459 | caam_req->flc = &ctx->flc[DECRYPT]; |
1424 | caam_req->flc_dma = ctx->flc_dma[DECRYPT]; | 1460 | caam_req->flc_dma = ctx->flc_dma[DECRYPT]; |
1425 | caam_req->cbk = skcipher_decrypt_done; | 1461 | caam_req->cbk = skcipher_decrypt_done; |
@@ -3380,9 +3416,9 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
3380 | 3416 | ||
3381 | if (to_hash) { | 3417 | if (to_hash) { |
3382 | struct dpaa2_sg_entry *sg_table; | 3418 | struct dpaa2_sg_entry *sg_table; |
3419 | int src_len = req->nbytes - *next_buflen; | ||
3383 | 3420 | ||
3384 | src_nents = sg_nents_for_len(req->src, | 3421 | src_nents = sg_nents_for_len(req->src, src_len); |
3385 | req->nbytes - (*next_buflen)); | ||
3386 | if (src_nents < 0) { | 3422 | if (src_nents < 0) { |
3387 | dev_err(ctx->dev, "Invalid number of src SG.\n"); | 3423 | dev_err(ctx->dev, "Invalid number of src SG.\n"); |
3388 | return src_nents; | 3424 | return src_nents; |
@@ -3409,7 +3445,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
3409 | 3445 | ||
3410 | edesc->src_nents = src_nents; | 3446 | edesc->src_nents = src_nents; |
3411 | qm_sg_src_index = 1 + (*buflen ? 1 : 0); | 3447 | qm_sg_src_index = 1 + (*buflen ? 1 : 0); |
3412 | qm_sg_bytes = (qm_sg_src_index + mapped_nents) * | 3448 | qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * |
3413 | sizeof(*sg_table); | 3449 | sizeof(*sg_table); |
3414 | sg_table = &edesc->sgt[0]; | 3450 | sg_table = &edesc->sgt[0]; |
3415 | 3451 | ||
@@ -3423,7 +3459,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
3423 | goto unmap_ctx; | 3459 | goto unmap_ctx; |
3424 | 3460 | ||
3425 | if (mapped_nents) { | 3461 | if (mapped_nents) { |
3426 | sg_to_qm_sg_last(req->src, mapped_nents, | 3462 | sg_to_qm_sg_last(req->src, src_len, |
3427 | sg_table + qm_sg_src_index, 0); | 3463 | sg_table + qm_sg_src_index, 0); |
3428 | if (*next_buflen) | 3464 | if (*next_buflen) |
3429 | scatterwalk_map_and_copy(next_buf, req->src, | 3465 | scatterwalk_map_and_copy(next_buf, req->src, |
@@ -3494,7 +3530,7 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
3494 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 3530 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
3495 | GFP_KERNEL : GFP_ATOMIC; | 3531 | GFP_KERNEL : GFP_ATOMIC; |
3496 | int buflen = *current_buflen(state); | 3532 | int buflen = *current_buflen(state); |
3497 | int qm_sg_bytes, qm_sg_src_index; | 3533 | int qm_sg_bytes; |
3498 | int digestsize = crypto_ahash_digestsize(ahash); | 3534 | int digestsize = crypto_ahash_digestsize(ahash); |
3499 | struct ahash_edesc *edesc; | 3535 | struct ahash_edesc *edesc; |
3500 | struct dpaa2_sg_entry *sg_table; | 3536 | struct dpaa2_sg_entry *sg_table; |
@@ -3505,8 +3541,7 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
3505 | if (!edesc) | 3541 | if (!edesc) |
3506 | return -ENOMEM; | 3542 | return -ENOMEM; |
3507 | 3543 | ||
3508 | qm_sg_src_index = 1 + (buflen ? 1 : 0); | 3544 | qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table); |
3509 | qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table); | ||
3510 | sg_table = &edesc->sgt[0]; | 3545 | sg_table = &edesc->sgt[0]; |
3511 | 3546 | ||
3512 | ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, | 3547 | ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, |
@@ -3518,7 +3553,7 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
3518 | if (ret) | 3553 | if (ret) |
3519 | goto unmap_ctx; | 3554 | goto unmap_ctx; |
3520 | 3555 | ||
3521 | dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); | 3556 | dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true); |
3522 | 3557 | ||
3523 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, | 3558 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
3524 | DMA_TO_DEVICE); | 3559 | DMA_TO_DEVICE); |
@@ -3599,7 +3634,8 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
3599 | 3634 | ||
3600 | edesc->src_nents = src_nents; | 3635 | edesc->src_nents = src_nents; |
3601 | qm_sg_src_index = 1 + (buflen ? 1 : 0); | 3636 | qm_sg_src_index = 1 + (buflen ? 1 : 0); |
3602 | qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table); | 3637 | qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * |
3638 | sizeof(*sg_table); | ||
3603 | sg_table = &edesc->sgt[0]; | 3639 | sg_table = &edesc->sgt[0]; |
3604 | 3640 | ||
3605 | ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, | 3641 | ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, |
@@ -3611,7 +3647,7 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
3611 | if (ret) | 3647 | if (ret) |
3612 | goto unmap_ctx; | 3648 | goto unmap_ctx; |
3613 | 3649 | ||
3614 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0); | 3650 | sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0); |
3615 | 3651 | ||
3616 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, | 3652 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
3617 | DMA_TO_DEVICE); | 3653 | DMA_TO_DEVICE); |
@@ -3696,8 +3732,8 @@ static int ahash_digest(struct ahash_request *req) | |||
3696 | int qm_sg_bytes; | 3732 | int qm_sg_bytes; |
3697 | struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; | 3733 | struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; |
3698 | 3734 | ||
3699 | qm_sg_bytes = mapped_nents * sizeof(*sg_table); | 3735 | qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table); |
3700 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); | 3736 | sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0); |
3701 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, | 3737 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
3702 | qm_sg_bytes, DMA_TO_DEVICE); | 3738 | qm_sg_bytes, DMA_TO_DEVICE); |
3703 | if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { | 3739 | if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { |
@@ -3840,9 +3876,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
3840 | 3876 | ||
3841 | if (to_hash) { | 3877 | if (to_hash) { |
3842 | struct dpaa2_sg_entry *sg_table; | 3878 | struct dpaa2_sg_entry *sg_table; |
3879 | int src_len = req->nbytes - *next_buflen; | ||
3843 | 3880 | ||
3844 | src_nents = sg_nents_for_len(req->src, | 3881 | src_nents = sg_nents_for_len(req->src, src_len); |
3845 | req->nbytes - *next_buflen); | ||
3846 | if (src_nents < 0) { | 3882 | if (src_nents < 0) { |
3847 | dev_err(ctx->dev, "Invalid number of src SG.\n"); | 3883 | dev_err(ctx->dev, "Invalid number of src SG.\n"); |
3848 | return src_nents; | 3884 | return src_nents; |
@@ -3868,14 +3904,15 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
3868 | } | 3904 | } |
3869 | 3905 | ||
3870 | edesc->src_nents = src_nents; | 3906 | edesc->src_nents = src_nents; |
3871 | qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table); | 3907 | qm_sg_bytes = pad_sg_nents(1 + mapped_nents) * |
3908 | sizeof(*sg_table); | ||
3872 | sg_table = &edesc->sgt[0]; | 3909 | sg_table = &edesc->sgt[0]; |
3873 | 3910 | ||
3874 | ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); | 3911 | ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); |
3875 | if (ret) | 3912 | if (ret) |
3876 | goto unmap_ctx; | 3913 | goto unmap_ctx; |
3877 | 3914 | ||
3878 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); | 3915 | sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); |
3879 | 3916 | ||
3880 | if (*next_buflen) | 3917 | if (*next_buflen) |
3881 | scatterwalk_map_and_copy(next_buf, req->src, | 3918 | scatterwalk_map_and_copy(next_buf, req->src, |
@@ -3987,14 +4024,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
3987 | } | 4024 | } |
3988 | 4025 | ||
3989 | edesc->src_nents = src_nents; | 4026 | edesc->src_nents = src_nents; |
3990 | qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table); | 4027 | qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table); |
3991 | sg_table = &edesc->sgt[0]; | 4028 | sg_table = &edesc->sgt[0]; |
3992 | 4029 | ||
3993 | ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); | 4030 | ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); |
3994 | if (ret) | 4031 | if (ret) |
3995 | goto unmap; | 4032 | goto unmap; |
3996 | 4033 | ||
3997 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); | 4034 | sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0); |
3998 | 4035 | ||
3999 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, | 4036 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, |
4000 | DMA_TO_DEVICE); | 4037 | DMA_TO_DEVICE); |
@@ -4064,9 +4101,9 @@ static int ahash_update_first(struct ahash_request *req) | |||
4064 | 4101 | ||
4065 | if (to_hash) { | 4102 | if (to_hash) { |
4066 | struct dpaa2_sg_entry *sg_table; | 4103 | struct dpaa2_sg_entry *sg_table; |
4104 | int src_len = req->nbytes - *next_buflen; | ||
4067 | 4105 | ||
4068 | src_nents = sg_nents_for_len(req->src, | 4106 | src_nents = sg_nents_for_len(req->src, src_len); |
4069 | req->nbytes - (*next_buflen)); | ||
4070 | if (src_nents < 0) { | 4107 | if (src_nents < 0) { |
4071 | dev_err(ctx->dev, "Invalid number of src SG.\n"); | 4108 | dev_err(ctx->dev, "Invalid number of src SG.\n"); |
4072 | return src_nents; | 4109 | return src_nents; |
@@ -4101,8 +4138,9 @@ static int ahash_update_first(struct ahash_request *req) | |||
4101 | if (mapped_nents > 1) { | 4138 | if (mapped_nents > 1) { |
4102 | int qm_sg_bytes; | 4139 | int qm_sg_bytes; |
4103 | 4140 | ||
4104 | sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); | 4141 | sg_to_qm_sg_last(req->src, src_len, sg_table, 0); |
4105 | qm_sg_bytes = mapped_nents * sizeof(*sg_table); | 4142 | qm_sg_bytes = pad_sg_nents(mapped_nents) * |
4143 | sizeof(*sg_table); | ||
4106 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, | 4144 | edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, |
4107 | qm_sg_bytes, | 4145 | qm_sg_bytes, |
4108 | DMA_TO_DEVICE); | 4146 | DMA_TO_DEVICE); |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 7205d9f4029e..e4ac5d591ad6 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -82,14 +82,6 @@ | |||
82 | #define HASH_MSG_LEN 8 | 82 | #define HASH_MSG_LEN 8 |
83 | #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) | 83 | #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) |
84 | 84 | ||
85 | #ifdef DEBUG | ||
86 | /* for print_hex_dumps with line references */ | ||
87 | #define debug(format, arg...) printk(format, arg) | ||
88 | #else | ||
89 | #define debug(format, arg...) | ||
90 | #endif | ||
91 | |||
92 | |||
93 | static struct list_head hash_list; | 85 | static struct list_head hash_list; |
94 | 86 | ||
95 | /* ahash per-session context */ | 87 | /* ahash per-session context */ |
@@ -243,11 +235,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
243 | ctx->ctx_len, true, ctrlpriv->era); | 235 | ctx->ctx_len, true, ctrlpriv->era); |
244 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, | 236 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, |
245 | desc_bytes(desc), ctx->dir); | 237 | desc_bytes(desc), ctx->dir); |
246 | #ifdef DEBUG | 238 | |
247 | print_hex_dump(KERN_ERR, | 239 | print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ", |
248 | "ahash update shdesc@"__stringify(__LINE__)": ", | 240 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
249 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 241 | 1); |
250 | #endif | ||
251 | 242 | ||
252 | /* ahash_update_first shared descriptor */ | 243 | /* ahash_update_first shared descriptor */ |
253 | desc = ctx->sh_desc_update_first; | 244 | desc = ctx->sh_desc_update_first; |
@@ -255,11 +246,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
255 | ctx->ctx_len, false, ctrlpriv->era); | 246 | ctx->ctx_len, false, ctrlpriv->era); |
256 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, | 247 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
257 | desc_bytes(desc), ctx->dir); | 248 | desc_bytes(desc), ctx->dir); |
258 | #ifdef DEBUG | 249 | print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__) |
259 | print_hex_dump(KERN_ERR, | 250 | ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, |
260 | "ahash update first shdesc@"__stringify(__LINE__)": ", | 251 | desc_bytes(desc), 1); |
261 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | ||
262 | #endif | ||
263 | 252 | ||
264 | /* ahash_final shared descriptor */ | 253 | /* ahash_final shared descriptor */ |
265 | desc = ctx->sh_desc_fin; | 254 | desc = ctx->sh_desc_fin; |
@@ -267,11 +256,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
267 | ctx->ctx_len, true, ctrlpriv->era); | 256 | ctx->ctx_len, true, ctrlpriv->era); |
268 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, | 257 | dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, |
269 | desc_bytes(desc), ctx->dir); | 258 | desc_bytes(desc), ctx->dir); |
270 | #ifdef DEBUG | 259 | |
271 | print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", | 260 | print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ", |
272 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 261 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
273 | desc_bytes(desc), 1); | 262 | desc_bytes(desc), 1); |
274 | #endif | ||
275 | 263 | ||
276 | /* ahash_digest shared descriptor */ | 264 | /* ahash_digest shared descriptor */ |
277 | desc = ctx->sh_desc_digest; | 265 | desc = ctx->sh_desc_digest; |
@@ -279,12 +267,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) | |||
279 | ctx->ctx_len, false, ctrlpriv->era); | 267 | ctx->ctx_len, false, ctrlpriv->era); |
280 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, | 268 | dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, |
281 | desc_bytes(desc), ctx->dir); | 269 | desc_bytes(desc), ctx->dir); |
282 | #ifdef DEBUG | 270 | |
283 | print_hex_dump(KERN_ERR, | 271 | print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ", |
284 | "ahash digest shdesc@"__stringify(__LINE__)": ", | 272 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
285 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 273 | desc_bytes(desc), 1); |
286 | desc_bytes(desc), 1); | ||
287 | #endif | ||
288 | 274 | ||
289 | return 0; | 275 | return 0; |
290 | } | 276 | } |
@@ -328,9 +314,9 @@ static int axcbc_set_sh_desc(struct crypto_ahash *ahash) | |||
328 | ctx->ctx_len, ctx->key_dma); | 314 | ctx->ctx_len, ctx->key_dma); |
329 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, | 315 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
330 | desc_bytes(desc), ctx->dir); | 316 | desc_bytes(desc), ctx->dir); |
331 | print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", | 317 | print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) |
332 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), | 318 | " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, |
333 | 1); | 319 | desc_bytes(desc), 1); |
334 | 320 | ||
335 | /* shared descriptor for ahash_digest */ | 321 | /* shared descriptor for ahash_digest */ |
336 | desc = ctx->sh_desc_digest; | 322 | desc = ctx->sh_desc_digest; |
@@ -377,8 +363,8 @@ static int acmac_set_sh_desc(struct crypto_ahash *ahash) | |||
377 | ctx->ctx_len, 0); | 363 | ctx->ctx_len, 0); |
378 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, | 364 | dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, |
379 | desc_bytes(desc), ctx->dir); | 365 | desc_bytes(desc), ctx->dir); |
380 | print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", | 366 | print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) |
381 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 367 | " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, |
382 | desc_bytes(desc), 1); | 368 | desc_bytes(desc), 1); |
383 | 369 | ||
384 | /* shared descriptor for ahash_digest */ | 370 | /* shared descriptor for ahash_digest */ |
@@ -429,12 +415,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, | |||
429 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | | 415 | append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | |
430 | LDST_SRCDST_BYTE_CONTEXT); | 416 | LDST_SRCDST_BYTE_CONTEXT); |
431 | 417 | ||
432 | #ifdef DEBUG | 418 | print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", |
433 | print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", | 419 | DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); |
434 | DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); | 420 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
435 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 421 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
436 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 422 | 1); |
437 | #endif | ||
438 | 423 | ||
439 | result.err = 0; | 424 | result.err = 0; |
440 | init_completion(&result.completion); | 425 | init_completion(&result.completion); |
@@ -444,11 +429,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, | |||
444 | /* in progress */ | 429 | /* in progress */ |
445 | wait_for_completion(&result.completion); | 430 | wait_for_completion(&result.completion); |
446 | ret = result.err; | 431 | ret = result.err; |
447 | #ifdef DEBUG | 432 | |
448 | print_hex_dump(KERN_ERR, | 433 | print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", |
449 | "digested key@"__stringify(__LINE__)": ", | 434 | DUMP_PREFIX_ADDRESS, 16, 4, key, |
450 | DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); | 435 | digestsize, 1); |
451 | #endif | ||
452 | } | 436 | } |
453 | dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); | 437 | dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); |
454 | 438 | ||
@@ -463,15 +447,14 @@ static int ahash_setkey(struct crypto_ahash *ahash, | |||
463 | const u8 *key, unsigned int keylen) | 447 | const u8 *key, unsigned int keylen) |
464 | { | 448 | { |
465 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 449 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
450 | struct device *jrdev = ctx->jrdev; | ||
466 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); | 451 | int blocksize = crypto_tfm_alg_blocksize(&ahash->base); |
467 | int digestsize = crypto_ahash_digestsize(ahash); | 452 | int digestsize = crypto_ahash_digestsize(ahash); |
468 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); | 453 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); |
469 | int ret; | 454 | int ret; |
470 | u8 *hashed_key = NULL; | 455 | u8 *hashed_key = NULL; |
471 | 456 | ||
472 | #ifdef DEBUG | 457 | dev_dbg(jrdev, "keylen %d\n", keylen); |
473 | printk(KERN_ERR "keylen %d\n", keylen); | ||
474 | #endif | ||
475 | 458 | ||
476 | if (keylen > blocksize) { | 459 | if (keylen > blocksize) { |
477 | hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); | 460 | hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); |
@@ -600,11 +583,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | |||
600 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | 583 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
601 | int digestsize = crypto_ahash_digestsize(ahash); | 584 | int digestsize = crypto_ahash_digestsize(ahash); |
602 | struct caam_hash_state *state = ahash_request_ctx(req); | 585 | struct caam_hash_state *state = ahash_request_ctx(req); |
603 | #ifdef DEBUG | ||
604 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 586 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
605 | 587 | ||
606 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 588 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
607 | #endif | ||
608 | 589 | ||
609 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 590 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
610 | if (err) | 591 | if (err) |
@@ -614,11 +595,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, | |||
614 | memcpy(req->result, state->caam_ctx, digestsize); | 595 | memcpy(req->result, state->caam_ctx, digestsize); |
615 | kfree(edesc); | 596 | kfree(edesc); |
616 | 597 | ||
617 | #ifdef DEBUG | 598 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
618 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", | 599 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
619 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 600 | ctx->ctx_len, 1); |
620 | ctx->ctx_len, 1); | ||
621 | #endif | ||
622 | 601 | ||
623 | req->base.complete(&req->base, err); | 602 | req->base.complete(&req->base, err); |
624 | } | 603 | } |
@@ -631,11 +610,9 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | |||
631 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | 610 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
632 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 611 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
633 | struct caam_hash_state *state = ahash_request_ctx(req); | 612 | struct caam_hash_state *state = ahash_request_ctx(req); |
634 | #ifdef DEBUG | ||
635 | int digestsize = crypto_ahash_digestsize(ahash); | 613 | int digestsize = crypto_ahash_digestsize(ahash); |
636 | 614 | ||
637 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 615 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
638 | #endif | ||
639 | 616 | ||
640 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 617 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
641 | if (err) | 618 | if (err) |
@@ -645,15 +622,13 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, | |||
645 | switch_buf(state); | 622 | switch_buf(state); |
646 | kfree(edesc); | 623 | kfree(edesc); |
647 | 624 | ||
648 | #ifdef DEBUG | 625 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
649 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", | 626 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
650 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 627 | ctx->ctx_len, 1); |
651 | ctx->ctx_len, 1); | ||
652 | if (req->result) | 628 | if (req->result) |
653 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", | 629 | print_hex_dump_debug("result@"__stringify(__LINE__)": ", |
654 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 630 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
655 | digestsize, 1); | 631 | digestsize, 1); |
656 | #endif | ||
657 | 632 | ||
658 | req->base.complete(&req->base, err); | 633 | req->base.complete(&req->base, err); |
659 | } | 634 | } |
@@ -666,11 +641,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
666 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | 641 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
667 | int digestsize = crypto_ahash_digestsize(ahash); | 642 | int digestsize = crypto_ahash_digestsize(ahash); |
668 | struct caam_hash_state *state = ahash_request_ctx(req); | 643 | struct caam_hash_state *state = ahash_request_ctx(req); |
669 | #ifdef DEBUG | ||
670 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 644 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
671 | 645 | ||
672 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 646 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
673 | #endif | ||
674 | 647 | ||
675 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 648 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
676 | if (err) | 649 | if (err) |
@@ -680,11 +653,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, | |||
680 | memcpy(req->result, state->caam_ctx, digestsize); | 653 | memcpy(req->result, state->caam_ctx, digestsize); |
681 | kfree(edesc); | 654 | kfree(edesc); |
682 | 655 | ||
683 | #ifdef DEBUG | 656 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
684 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", | 657 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
685 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 658 | ctx->ctx_len, 1); |
686 | ctx->ctx_len, 1); | ||
687 | #endif | ||
688 | 659 | ||
689 | req->base.complete(&req->base, err); | 660 | req->base.complete(&req->base, err); |
690 | } | 661 | } |
@@ -697,11 +668,9 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
697 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); | 668 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
698 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 669 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
699 | struct caam_hash_state *state = ahash_request_ctx(req); | 670 | struct caam_hash_state *state = ahash_request_ctx(req); |
700 | #ifdef DEBUG | ||
701 | int digestsize = crypto_ahash_digestsize(ahash); | 671 | int digestsize = crypto_ahash_digestsize(ahash); |
702 | 672 | ||
703 | dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | 673 | dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
704 | #endif | ||
705 | 674 | ||
706 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); | 675 | edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); |
707 | if (err) | 676 | if (err) |
@@ -711,15 +680,13 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, | |||
711 | switch_buf(state); | 680 | switch_buf(state); |
712 | kfree(edesc); | 681 | kfree(edesc); |
713 | 682 | ||
714 | #ifdef DEBUG | 683 | print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", |
715 | print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", | 684 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, |
716 | DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, | 685 | ctx->ctx_len, 1); |
717 | ctx->ctx_len, 1); | ||
718 | if (req->result) | 686 | if (req->result) |
719 | print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", | 687 | print_hex_dump_debug("result@"__stringify(__LINE__)": ", |
720 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, | 688 | DUMP_PREFIX_ADDRESS, 16, 4, req->result, |
721 | digestsize, 1); | 689 | digestsize, 1); |
722 | #endif | ||
723 | 690 | ||
724 | req->base.complete(&req->base, err); | 691 | req->base.complete(&req->base, err); |
725 | } | 692 | } |
@@ -759,9 +726,10 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, | |||
759 | 726 | ||
760 | if (nents > 1 || first_sg) { | 727 | if (nents > 1 || first_sg) { |
761 | struct sec4_sg_entry *sg = edesc->sec4_sg; | 728 | struct sec4_sg_entry *sg = edesc->sec4_sg; |
762 | unsigned int sgsize = sizeof(*sg) * (first_sg + nents); | 729 | unsigned int sgsize = sizeof(*sg) * |
730 | pad_sg_nents(first_sg + nents); | ||
763 | 731 | ||
764 | sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); | 732 | sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); |
765 | 733 | ||
766 | src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); | 734 | src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); |
767 | if (dma_mapping_error(ctx->jrdev, src_dma)) { | 735 | if (dma_mapping_error(ctx->jrdev, src_dma)) { |
@@ -819,8 +787,10 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
819 | } | 787 | } |
820 | 788 | ||
821 | if (to_hash) { | 789 | if (to_hash) { |
822 | src_nents = sg_nents_for_len(req->src, | 790 | int pad_nents; |
823 | req->nbytes - (*next_buflen)); | 791 | int src_len = req->nbytes - *next_buflen; |
792 | |||
793 | src_nents = sg_nents_for_len(req->src, src_len); | ||
824 | if (src_nents < 0) { | 794 | if (src_nents < 0) { |
825 | dev_err(jrdev, "Invalid number of src SG.\n"); | 795 | dev_err(jrdev, "Invalid number of src SG.\n"); |
826 | return src_nents; | 796 | return src_nents; |
@@ -838,15 +808,14 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
838 | } | 808 | } |
839 | 809 | ||
840 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); | 810 | sec4_sg_src_index = 1 + (*buflen ? 1 : 0); |
841 | sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * | 811 | pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents); |
842 | sizeof(struct sec4_sg_entry); | 812 | sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); |
843 | 813 | ||
844 | /* | 814 | /* |
845 | * allocate space for base edesc and hw desc commands, | 815 | * allocate space for base edesc and hw desc commands, |
846 | * link tables | 816 | * link tables |
847 | */ | 817 | */ |
848 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, | 818 | edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update, |
849 | ctx->sh_desc_update, | ||
850 | ctx->sh_desc_update_dma, flags); | 819 | ctx->sh_desc_update_dma, flags); |
851 | if (!edesc) { | 820 | if (!edesc) { |
852 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); | 821 | dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); |
@@ -866,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
866 | goto unmap_ctx; | 835 | goto unmap_ctx; |
867 | 836 | ||
868 | if (mapped_nents) | 837 | if (mapped_nents) |
869 | sg_to_sec4_sg_last(req->src, mapped_nents, | 838 | sg_to_sec4_sg_last(req->src, src_len, |
870 | edesc->sec4_sg + sec4_sg_src_index, | 839 | edesc->sec4_sg + sec4_sg_src_index, |
871 | 0); | 840 | 0); |
872 | else | 841 | else |
@@ -893,11 +862,9 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
893 | 862 | ||
894 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); | 863 | append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); |
895 | 864 | ||
896 | #ifdef DEBUG | 865 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
897 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 866 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
898 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 867 | desc_bytes(desc), 1); |
899 | desc_bytes(desc), 1); | ||
900 | #endif | ||
901 | 868 | ||
902 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); | 869 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); |
903 | if (ret) | 870 | if (ret) |
@@ -910,13 +877,12 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
910 | *buflen = *next_buflen; | 877 | *buflen = *next_buflen; |
911 | *next_buflen = last_buflen; | 878 | *next_buflen = last_buflen; |
912 | } | 879 | } |
913 | #ifdef DEBUG | 880 | |
914 | print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", | 881 | print_hex_dump_debug("buf@"__stringify(__LINE__)": ", |
915 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | 882 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
916 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", | 883 | print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", |
917 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | 884 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, |
918 | *next_buflen, 1); | 885 | *next_buflen, 1); |
919 | #endif | ||
920 | 886 | ||
921 | return ret; | 887 | return ret; |
922 | unmap_ctx: | 888 | unmap_ctx: |
@@ -935,18 +901,17 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
935 | GFP_KERNEL : GFP_ATOMIC; | 901 | GFP_KERNEL : GFP_ATOMIC; |
936 | int buflen = *current_buflen(state); | 902 | int buflen = *current_buflen(state); |
937 | u32 *desc; | 903 | u32 *desc; |
938 | int sec4_sg_bytes, sec4_sg_src_index; | 904 | int sec4_sg_bytes; |
939 | int digestsize = crypto_ahash_digestsize(ahash); | 905 | int digestsize = crypto_ahash_digestsize(ahash); |
940 | struct ahash_edesc *edesc; | 906 | struct ahash_edesc *edesc; |
941 | int ret; | 907 | int ret; |
942 | 908 | ||
943 | sec4_sg_src_index = 1 + (buflen ? 1 : 0); | 909 | sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * |
944 | sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); | 910 | sizeof(struct sec4_sg_entry); |
945 | 911 | ||
946 | /* allocate space for base edesc and hw desc commands, link tables */ | 912 | /* allocate space for base edesc and hw desc commands, link tables */ |
947 | edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, | 913 | edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin, |
948 | ctx->sh_desc_fin, ctx->sh_desc_fin_dma, | 914 | ctx->sh_desc_fin_dma, flags); |
949 | flags); | ||
950 | if (!edesc) | 915 | if (!edesc) |
951 | return -ENOMEM; | 916 | return -ENOMEM; |
952 | 917 | ||
@@ -963,7 +928,7 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
963 | if (ret) | 928 | if (ret) |
964 | goto unmap_ctx; | 929 | goto unmap_ctx; |
965 | 930 | ||
966 | sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); | 931 | sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); |
967 | 932 | ||
968 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, | 933 | edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, |
969 | sec4_sg_bytes, DMA_TO_DEVICE); | 934 | sec4_sg_bytes, DMA_TO_DEVICE); |
@@ -977,10 +942,9 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
977 | LDST_SGF); | 942 | LDST_SGF); |
978 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); | 943 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); |
979 | 944 | ||
980 | #ifdef DEBUG | 945 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
981 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 946 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
982 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 947 | 1); |
983 | #endif | ||
984 | 948 | ||
985 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | 949 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); |
986 | if (ret) | 950 | if (ret) |
@@ -1058,10 +1022,9 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
1058 | 1022 | ||
1059 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); | 1023 | append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); |
1060 | 1024 | ||
1061 | #ifdef DEBUG | 1025 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1062 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1026 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
1063 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1027 | 1); |
1064 | #endif | ||
1065 | 1028 | ||
1066 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); | 1029 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); |
1067 | if (ret) | 1030 | if (ret) |
@@ -1135,10 +1098,9 @@ static int ahash_digest(struct ahash_request *req) | |||
1135 | return -ENOMEM; | 1098 | return -ENOMEM; |
1136 | } | 1099 | } |
1137 | 1100 | ||
1138 | #ifdef DEBUG | 1101 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1139 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1102 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
1140 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1103 | 1); |
1141 | #endif | ||
1142 | 1104 | ||
1143 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | 1105 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); |
1144 | if (!ret) { | 1106 | if (!ret) { |
@@ -1190,10 +1152,9 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
1190 | if (ret) | 1152 | if (ret) |
1191 | goto unmap; | 1153 | goto unmap; |
1192 | 1154 | ||
1193 | #ifdef DEBUG | 1155 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1194 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1156 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
1195 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1157 | 1); |
1196 | #endif | ||
1197 | 1158 | ||
1198 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | 1159 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); |
1199 | if (!ret) { | 1160 | if (!ret) { |
@@ -1246,8 +1207,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1246 | } | 1207 | } |
1247 | 1208 | ||
1248 | if (to_hash) { | 1209 | if (to_hash) { |
1249 | src_nents = sg_nents_for_len(req->src, | 1210 | int pad_nents; |
1250 | req->nbytes - *next_buflen); | 1211 | int src_len = req->nbytes - *next_buflen; |
1212 | |||
1213 | src_nents = sg_nents_for_len(req->src, src_len); | ||
1251 | if (src_nents < 0) { | 1214 | if (src_nents < 0) { |
1252 | dev_err(jrdev, "Invalid number of src SG.\n"); | 1215 | dev_err(jrdev, "Invalid number of src SG.\n"); |
1253 | return src_nents; | 1216 | return src_nents; |
@@ -1264,14 +1227,14 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1264 | mapped_nents = 0; | 1227 | mapped_nents = 0; |
1265 | } | 1228 | } |
1266 | 1229 | ||
1267 | sec4_sg_bytes = (1 + mapped_nents) * | 1230 | pad_nents = pad_sg_nents(1 + mapped_nents); |
1268 | sizeof(struct sec4_sg_entry); | 1231 | sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); |
1269 | 1232 | ||
1270 | /* | 1233 | /* |
1271 | * allocate space for base edesc and hw desc commands, | 1234 | * allocate space for base edesc and hw desc commands, |
1272 | * link tables | 1235 | * link tables |
1273 | */ | 1236 | */ |
1274 | edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, | 1237 | edesc = ahash_edesc_alloc(ctx, pad_nents, |
1275 | ctx->sh_desc_update_first, | 1238 | ctx->sh_desc_update_first, |
1276 | ctx->sh_desc_update_first_dma, | 1239 | ctx->sh_desc_update_first_dma, |
1277 | flags); | 1240 | flags); |
@@ -1287,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1287 | if (ret) | 1250 | if (ret) |
1288 | goto unmap_ctx; | 1251 | goto unmap_ctx; |
1289 | 1252 | ||
1290 | sg_to_sec4_sg_last(req->src, mapped_nents, | 1253 | sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); |
1291 | edesc->sec4_sg + 1, 0); | ||
1292 | 1254 | ||
1293 | if (*next_buflen) { | 1255 | if (*next_buflen) { |
1294 | scatterwalk_map_and_copy(next_buf, req->src, | 1256 | scatterwalk_map_and_copy(next_buf, req->src, |
@@ -1313,11 +1275,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1313 | if (ret) | 1275 | if (ret) |
1314 | goto unmap_ctx; | 1276 | goto unmap_ctx; |
1315 | 1277 | ||
1316 | #ifdef DEBUG | 1278 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1317 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1279 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
1318 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 1280 | desc_bytes(desc), 1); |
1319 | desc_bytes(desc), 1); | ||
1320 | #endif | ||
1321 | 1281 | ||
1322 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); | 1282 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); |
1323 | if (ret) | 1283 | if (ret) |
@@ -1333,13 +1293,12 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1333 | *buflen = *next_buflen; | 1293 | *buflen = *next_buflen; |
1334 | *next_buflen = 0; | 1294 | *next_buflen = 0; |
1335 | } | 1295 | } |
1336 | #ifdef DEBUG | 1296 | |
1337 | print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", | 1297 | print_hex_dump_debug("buf@"__stringify(__LINE__)": ", |
1338 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); | 1298 | DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); |
1339 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", | 1299 | print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", |
1340 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | 1300 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, |
1341 | *next_buflen, 1); | 1301 | 1); |
1342 | #endif | ||
1343 | 1302 | ||
1344 | return ret; | 1303 | return ret; |
1345 | unmap_ctx: | 1304 | unmap_ctx: |
@@ -1414,10 +1373,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
1414 | if (ret) | 1373 | if (ret) |
1415 | goto unmap; | 1374 | goto unmap; |
1416 | 1375 | ||
1417 | #ifdef DEBUG | 1376 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1418 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1377 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
1419 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 1378 | 1); |
1420 | #endif | ||
1421 | 1379 | ||
1422 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); | 1380 | ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); |
1423 | if (!ret) { | 1381 | if (!ret) { |
@@ -1517,11 +1475,9 @@ static int ahash_update_first(struct ahash_request *req) | |||
1517 | if (ret) | 1475 | if (ret) |
1518 | goto unmap_ctx; | 1476 | goto unmap_ctx; |
1519 | 1477 | ||
1520 | #ifdef DEBUG | 1478 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
1521 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 1479 | DUMP_PREFIX_ADDRESS, 16, 4, desc, |
1522 | DUMP_PREFIX_ADDRESS, 16, 4, desc, | 1480 | desc_bytes(desc), 1); |
1523 | desc_bytes(desc), 1); | ||
1524 | #endif | ||
1525 | 1481 | ||
1526 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); | 1482 | ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); |
1527 | if (ret) | 1483 | if (ret) |
@@ -1539,11 +1495,10 @@ static int ahash_update_first(struct ahash_request *req) | |||
1539 | req->nbytes, 0); | 1495 | req->nbytes, 0); |
1540 | switch_buf(state); | 1496 | switch_buf(state); |
1541 | } | 1497 | } |
1542 | #ifdef DEBUG | 1498 | |
1543 | print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", | 1499 | print_hex_dump_debug("next buf@"__stringify(__LINE__)": ", |
1544 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, | 1500 | DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, |
1545 | *next_buflen, 1); | 1501 | 1); |
1546 | #endif | ||
1547 | 1502 | ||
1548 | return ret; | 1503 | return ret; |
1549 | unmap_ctx: | 1504 | unmap_ctx: |
@@ -1930,7 +1885,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |||
1930 | caam_jr_free(ctx->jrdev); | 1885 | caam_jr_free(ctx->jrdev); |
1931 | } | 1886 | } |
1932 | 1887 | ||
1933 | static void __exit caam_algapi_hash_exit(void) | 1888 | void caam_algapi_hash_exit(void) |
1934 | { | 1889 | { |
1935 | struct caam_hash_alg *t_alg, *n; | 1890 | struct caam_hash_alg *t_alg, *n; |
1936 | 1891 | ||
@@ -1988,40 +1943,13 @@ caam_hash_alloc(struct caam_hash_template *template, | |||
1988 | return t_alg; | 1943 | return t_alg; |
1989 | } | 1944 | } |
1990 | 1945 | ||
1991 | static int __init caam_algapi_hash_init(void) | 1946 | int caam_algapi_hash_init(struct device *ctrldev) |
1992 | { | 1947 | { |
1993 | struct device_node *dev_node; | ||
1994 | struct platform_device *pdev; | ||
1995 | int i = 0, err = 0; | 1948 | int i = 0, err = 0; |
1996 | struct caam_drv_private *priv; | 1949 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
1997 | unsigned int md_limit = SHA512_DIGEST_SIZE; | 1950 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
1998 | u32 md_inst, md_vid; | 1951 | u32 md_inst, md_vid; |
1999 | 1952 | ||
2000 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
2001 | if (!dev_node) { | ||
2002 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
2003 | if (!dev_node) | ||
2004 | return -ENODEV; | ||
2005 | } | ||
2006 | |||
2007 | pdev = of_find_device_by_node(dev_node); | ||
2008 | if (!pdev) { | ||
2009 | of_node_put(dev_node); | ||
2010 | return -ENODEV; | ||
2011 | } | ||
2012 | |||
2013 | priv = dev_get_drvdata(&pdev->dev); | ||
2014 | of_node_put(dev_node); | ||
2015 | |||
2016 | /* | ||
2017 | * If priv is NULL, it's probably because the caam driver wasn't | ||
2018 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
2019 | */ | ||
2020 | if (!priv) { | ||
2021 | err = -ENODEV; | ||
2022 | goto out_put_dev; | ||
2023 | } | ||
2024 | |||
2025 | /* | 1953 | /* |
2026 | * Register crypto algorithms the device supports. First, identify | 1954 | * Register crypto algorithms the device supports. First, identify |
2027 | * presence and attributes of MD block. | 1955 | * presence and attributes of MD block. |
@@ -2042,10 +1970,8 @@ static int __init caam_algapi_hash_init(void) | |||
2042 | * Skip registration of any hashing algorithms if MD block | 1970 | * Skip registration of any hashing algorithms if MD block |
2043 | * is not present. | 1971 | * is not present. |
2044 | */ | 1972 | */ |
2045 | if (!md_inst) { | 1973 | if (!md_inst) |
2046 | err = -ENODEV; | 1974 | return -ENODEV; |
2047 | goto out_put_dev; | ||
2048 | } | ||
2049 | 1975 | ||
2050 | /* Limit digest size based on LP256 */ | 1976 | /* Limit digest size based on LP256 */ |
2051 | if (md_vid == CHA_VER_VID_MD_LP256) | 1977 | if (md_vid == CHA_VER_VID_MD_LP256) |
@@ -2102,14 +2028,5 @@ static int __init caam_algapi_hash_init(void) | |||
2102 | list_add_tail(&t_alg->entry, &hash_list); | 2028 | list_add_tail(&t_alg->entry, &hash_list); |
2103 | } | 2029 | } |
2104 | 2030 | ||
2105 | out_put_dev: | ||
2106 | put_device(&pdev->dev); | ||
2107 | return err; | 2031 | return err; |
2108 | } | 2032 | } |
2109 | |||
2110 | module_init(caam_algapi_hash_init); | ||
2111 | module_exit(caam_algapi_hash_exit); | ||
2112 | |||
2113 | MODULE_LICENSE("GPL"); | ||
2114 | MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); | ||
2115 | MODULE_AUTHOR("Freescale Semiconductor - NMG"); | ||
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index fe24485274e1..80574106af29 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * caam - Freescale FSL CAAM support for Public Key Cryptography | 3 | * caam - Freescale FSL CAAM support for Public Key Cryptography |
4 | * | 4 | * |
5 | * Copyright 2016 Freescale Semiconductor, Inc. | 5 | * Copyright 2016 Freescale Semiconductor, Inc. |
6 | * Copyright 2018 NXP | 6 | * Copyright 2018-2019 NXP |
7 | * | 7 | * |
8 | * There is no Shared Descriptor for PKC so that the Job Descriptor must carry | 8 | * There is no Shared Descriptor for PKC so that the Job Descriptor must carry |
9 | * all the desired key parameters, input and output pointers. | 9 | * all the desired key parameters, input and output pointers. |
@@ -24,12 +24,18 @@ | |||
24 | sizeof(struct rsa_priv_f2_pdb)) | 24 | sizeof(struct rsa_priv_f2_pdb)) |
25 | #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ | 25 | #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ |
26 | sizeof(struct rsa_priv_f3_pdb)) | 26 | sizeof(struct rsa_priv_f3_pdb)) |
27 | #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ | ||
28 | |||
29 | /* buffer filled with zeros, used for padding */ | ||
30 | static u8 *zero_buffer; | ||
27 | 31 | ||
28 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, | 32 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, |
29 | struct akcipher_request *req) | 33 | struct akcipher_request *req) |
30 | { | 34 | { |
35 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); | ||
36 | |||
31 | dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); | 37 | dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); |
32 | dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); | 38 | dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); |
33 | 39 | ||
34 | if (edesc->sec4_sg_bytes) | 40 | if (edesc->sec4_sg_bytes) |
35 | dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, | 41 | dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, |
@@ -168,6 +174,13 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, | |||
168 | akcipher_request_complete(req, err); | 174 | akcipher_request_complete(req, err); |
169 | } | 175 | } |
170 | 176 | ||
177 | /** | ||
178 | * Count leading zeros, need it to strip, from a given scatterlist | ||
179 | * | ||
180 | * @sgl : scatterlist to count zeros from | ||
181 | * @nbytes: number of zeros, in bytes, to strip | ||
182 | * @flags : operation flags | ||
183 | */ | ||
171 | static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, | 184 | static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, |
172 | unsigned int nbytes, | 185 | unsigned int nbytes, |
173 | unsigned int flags) | 186 | unsigned int flags) |
@@ -187,7 +200,8 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, | |||
187 | lzeros = 0; | 200 | lzeros = 0; |
188 | len = 0; | 201 | len = 0; |
189 | while (nbytes > 0) { | 202 | while (nbytes > 0) { |
190 | while (len && !*buff) { | 203 | /* do not strip more than given bytes */ |
204 | while (len && !*buff && lzeros < nbytes) { | ||
191 | lzeros++; | 205 | lzeros++; |
192 | len--; | 206 | len--; |
193 | buff++; | 207 | buff++; |
@@ -218,6 +232,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
218 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 232 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
219 | struct device *dev = ctx->dev; | 233 | struct device *dev = ctx->dev; |
220 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); | 234 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
235 | struct caam_rsa_key *key = &ctx->key; | ||
221 | struct rsa_edesc *edesc; | 236 | struct rsa_edesc *edesc; |
222 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | 237 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
223 | GFP_KERNEL : GFP_ATOMIC; | 238 | GFP_KERNEL : GFP_ATOMIC; |
@@ -225,22 +240,45 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
225 | int sgc; | 240 | int sgc; |
226 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | 241 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
227 | int src_nents, dst_nents; | 242 | int src_nents, dst_nents; |
243 | unsigned int diff_size = 0; | ||
228 | int lzeros; | 244 | int lzeros; |
229 | 245 | ||
230 | lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags); | 246 | if (req->src_len > key->n_sz) { |
231 | if (lzeros < 0) | 247 | /* |
232 | return ERR_PTR(lzeros); | 248 | * strip leading zeros and |
233 | 249 | * return the number of zeros to skip | |
234 | req->src_len -= lzeros; | 250 | */ |
235 | req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); | 251 | lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len - |
252 | key->n_sz, sg_flags); | ||
253 | if (lzeros < 0) | ||
254 | return ERR_PTR(lzeros); | ||
255 | |||
256 | req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src, | ||
257 | lzeros); | ||
258 | req_ctx->fixup_src_len = req->src_len - lzeros; | ||
259 | } else { | ||
260 | /* | ||
261 | * input src is less then n key modulus, | ||
262 | * so there will be zero padding | ||
263 | */ | ||
264 | diff_size = key->n_sz - req->src_len; | ||
265 | req_ctx->fixup_src = req->src; | ||
266 | req_ctx->fixup_src_len = req->src_len; | ||
267 | } | ||
236 | 268 | ||
237 | src_nents = sg_nents_for_len(req->src, req->src_len); | 269 | src_nents = sg_nents_for_len(req_ctx->fixup_src, |
270 | req_ctx->fixup_src_len); | ||
238 | dst_nents = sg_nents_for_len(req->dst, req->dst_len); | 271 | dst_nents = sg_nents_for_len(req->dst, req->dst_len); |
239 | 272 | ||
240 | if (src_nents > 1) | 273 | if (!diff_size && src_nents == 1) |
241 | sec4_sg_len = src_nents; | 274 | sec4_sg_len = 0; /* no need for an input hw s/g table */ |
275 | else | ||
276 | sec4_sg_len = src_nents + !!diff_size; | ||
277 | sec4_sg_index = sec4_sg_len; | ||
242 | if (dst_nents > 1) | 278 | if (dst_nents > 1) |
243 | sec4_sg_len += dst_nents; | 279 | sec4_sg_len += pad_sg_nents(dst_nents); |
280 | else | ||
281 | sec4_sg_len = pad_sg_nents(sec4_sg_len); | ||
244 | 282 | ||
245 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); | 283 | sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); |
246 | 284 | ||
@@ -250,7 +288,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
250 | if (!edesc) | 288 | if (!edesc) |
251 | return ERR_PTR(-ENOMEM); | 289 | return ERR_PTR(-ENOMEM); |
252 | 290 | ||
253 | sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); | 291 | sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); |
254 | if (unlikely(!sgc)) { | 292 | if (unlikely(!sgc)) { |
255 | dev_err(dev, "unable to map source\n"); | 293 | dev_err(dev, "unable to map source\n"); |
256 | goto src_fail; | 294 | goto src_fail; |
@@ -263,14 +301,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
263 | } | 301 | } |
264 | 302 | ||
265 | edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; | 303 | edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; |
304 | if (diff_size) | ||
305 | dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, | ||
306 | 0); | ||
307 | |||
308 | if (sec4_sg_index) | ||
309 | sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, | ||
310 | edesc->sec4_sg + !!diff_size, 0); | ||
266 | 311 | ||
267 | sec4_sg_index = 0; | ||
268 | if (src_nents > 1) { | ||
269 | sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); | ||
270 | sec4_sg_index += src_nents; | ||
271 | } | ||
272 | if (dst_nents > 1) | 312 | if (dst_nents > 1) |
273 | sg_to_sec4_sg_last(req->dst, dst_nents, | 313 | sg_to_sec4_sg_last(req->dst, req->dst_len, |
274 | edesc->sec4_sg + sec4_sg_index, 0); | 314 | edesc->sec4_sg + sec4_sg_index, 0); |
275 | 315 | ||
276 | /* Save nents for later use in Job Descriptor */ | 316 | /* Save nents for later use in Job Descriptor */ |
@@ -289,12 +329,16 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
289 | 329 | ||
290 | edesc->sec4_sg_bytes = sec4_sg_bytes; | 330 | edesc->sec4_sg_bytes = sec4_sg_bytes; |
291 | 331 | ||
332 | print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ", | ||
333 | DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, | ||
334 | edesc->sec4_sg_bytes, 1); | ||
335 | |||
292 | return edesc; | 336 | return edesc; |
293 | 337 | ||
294 | sec4_sg_fail: | 338 | sec4_sg_fail: |
295 | dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); | 339 | dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); |
296 | dst_fail: | 340 | dst_fail: |
297 | dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); | 341 | dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); |
298 | src_fail: | 342 | src_fail: |
299 | kfree(edesc); | 343 | kfree(edesc); |
300 | return ERR_PTR(-ENOMEM); | 344 | return ERR_PTR(-ENOMEM); |
@@ -304,6 +348,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req, | |||
304 | struct rsa_edesc *edesc) | 348 | struct rsa_edesc *edesc) |
305 | { | 349 | { |
306 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | 350 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
351 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); | ||
307 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 352 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
308 | struct caam_rsa_key *key = &ctx->key; | 353 | struct caam_rsa_key *key = &ctx->key; |
309 | struct device *dev = ctx->dev; | 354 | struct device *dev = ctx->dev; |
@@ -328,7 +373,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req, | |||
328 | pdb->f_dma = edesc->sec4_sg_dma; | 373 | pdb->f_dma = edesc->sec4_sg_dma; |
329 | sec4_sg_index += edesc->src_nents; | 374 | sec4_sg_index += edesc->src_nents; |
330 | } else { | 375 | } else { |
331 | pdb->f_dma = sg_dma_address(req->src); | 376 | pdb->f_dma = sg_dma_address(req_ctx->fixup_src); |
332 | } | 377 | } |
333 | 378 | ||
334 | if (edesc->dst_nents > 1) { | 379 | if (edesc->dst_nents > 1) { |
@@ -340,7 +385,7 @@ static int set_rsa_pub_pdb(struct akcipher_request *req, | |||
340 | } | 385 | } |
341 | 386 | ||
342 | pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; | 387 | pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; |
343 | pdb->f_len = req->src_len; | 388 | pdb->f_len = req_ctx->fixup_src_len; |
344 | 389 | ||
345 | return 0; | 390 | return 0; |
346 | } | 391 | } |
@@ -373,7 +418,9 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req, | |||
373 | pdb->g_dma = edesc->sec4_sg_dma; | 418 | pdb->g_dma = edesc->sec4_sg_dma; |
374 | sec4_sg_index += edesc->src_nents; | 419 | sec4_sg_index += edesc->src_nents; |
375 | } else { | 420 | } else { |
376 | pdb->g_dma = sg_dma_address(req->src); | 421 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
422 | |||
423 | pdb->g_dma = sg_dma_address(req_ctx->fixup_src); | ||
377 | } | 424 | } |
378 | 425 | ||
379 | if (edesc->dst_nents > 1) { | 426 | if (edesc->dst_nents > 1) { |
@@ -436,7 +483,9 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | |||
436 | pdb->g_dma = edesc->sec4_sg_dma; | 483 | pdb->g_dma = edesc->sec4_sg_dma; |
437 | sec4_sg_index += edesc->src_nents; | 484 | sec4_sg_index += edesc->src_nents; |
438 | } else { | 485 | } else { |
439 | pdb->g_dma = sg_dma_address(req->src); | 486 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
487 | |||
488 | pdb->g_dma = sg_dma_address(req_ctx->fixup_src); | ||
440 | } | 489 | } |
441 | 490 | ||
442 | if (edesc->dst_nents > 1) { | 491 | if (edesc->dst_nents > 1) { |
@@ -523,7 +572,9 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | |||
523 | pdb->g_dma = edesc->sec4_sg_dma; | 572 | pdb->g_dma = edesc->sec4_sg_dma; |
524 | sec4_sg_index += edesc->src_nents; | 573 | sec4_sg_index += edesc->src_nents; |
525 | } else { | 574 | } else { |
526 | pdb->g_dma = sg_dma_address(req->src); | 575 | struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); |
576 | |||
577 | pdb->g_dma = sg_dma_address(req_ctx->fixup_src); | ||
527 | } | 578 | } |
528 | 579 | ||
529 | if (edesc->dst_nents > 1) { | 580 | if (edesc->dst_nents > 1) { |
@@ -978,6 +1029,15 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) | |||
978 | return PTR_ERR(ctx->dev); | 1029 | return PTR_ERR(ctx->dev); |
979 | } | 1030 | } |
980 | 1031 | ||
1032 | ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer, | ||
1033 | CAAM_RSA_MAX_INPUT_SIZE - 1, | ||
1034 | DMA_TO_DEVICE); | ||
1035 | if (dma_mapping_error(ctx->dev, ctx->padding_dma)) { | ||
1036 | dev_err(ctx->dev, "unable to map padding\n"); | ||
1037 | caam_jr_free(ctx->dev); | ||
1038 | return -ENOMEM; | ||
1039 | } | ||
1040 | |||
981 | return 0; | 1041 | return 0; |
982 | } | 1042 | } |
983 | 1043 | ||
@@ -987,6 +1047,8 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) | |||
987 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 1047 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
988 | struct caam_rsa_key *key = &ctx->key; | 1048 | struct caam_rsa_key *key = &ctx->key; |
989 | 1049 | ||
1050 | dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - | ||
1051 | 1, DMA_TO_DEVICE); | ||
990 | caam_rsa_free_key(key); | 1052 | caam_rsa_free_key(key); |
991 | caam_jr_free(ctx->dev); | 1053 | caam_jr_free(ctx->dev); |
992 | } | 1054 | } |
@@ -1010,41 +1072,12 @@ static struct akcipher_alg caam_rsa = { | |||
1010 | }; | 1072 | }; |
1011 | 1073 | ||
1012 | /* Public Key Cryptography module initialization handler */ | 1074 | /* Public Key Cryptography module initialization handler */ |
1013 | static int __init caam_pkc_init(void) | 1075 | int caam_pkc_init(struct device *ctrldev) |
1014 | { | 1076 | { |
1015 | struct device_node *dev_node; | 1077 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
1016 | struct platform_device *pdev; | ||
1017 | struct device *ctrldev; | ||
1018 | struct caam_drv_private *priv; | ||
1019 | u32 pk_inst; | 1078 | u32 pk_inst; |
1020 | int err; | 1079 | int err; |
1021 | 1080 | ||
1022 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
1023 | if (!dev_node) { | ||
1024 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
1025 | if (!dev_node) | ||
1026 | return -ENODEV; | ||
1027 | } | ||
1028 | |||
1029 | pdev = of_find_device_by_node(dev_node); | ||
1030 | if (!pdev) { | ||
1031 | of_node_put(dev_node); | ||
1032 | return -ENODEV; | ||
1033 | } | ||
1034 | |||
1035 | ctrldev = &pdev->dev; | ||
1036 | priv = dev_get_drvdata(ctrldev); | ||
1037 | of_node_put(dev_node); | ||
1038 | |||
1039 | /* | ||
1040 | * If priv is NULL, it's probably because the caam driver wasn't | ||
1041 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
1042 | */ | ||
1043 | if (!priv) { | ||
1044 | err = -ENODEV; | ||
1045 | goto out_put_dev; | ||
1046 | } | ||
1047 | |||
1048 | /* Determine public key hardware accelerator presence. */ | 1081 | /* Determine public key hardware accelerator presence. */ |
1049 | if (priv->era < 10) | 1082 | if (priv->era < 10) |
1050 | pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & | 1083 | pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & |
@@ -1053,31 +1086,29 @@ static int __init caam_pkc_init(void) | |||
1053 | pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; | 1086 | pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; |
1054 | 1087 | ||
1055 | /* Do not register algorithms if PKHA is not present. */ | 1088 | /* Do not register algorithms if PKHA is not present. */ |
1056 | if (!pk_inst) { | 1089 | if (!pk_inst) |
1057 | err = -ENODEV; | 1090 | return 0; |
1058 | goto out_put_dev; | 1091 | |
1059 | } | 1092 | /* allocate zero buffer, used for padding input */ |
1093 | zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA | | ||
1094 | GFP_KERNEL); | ||
1095 | if (!zero_buffer) | ||
1096 | return -ENOMEM; | ||
1060 | 1097 | ||
1061 | err = crypto_register_akcipher(&caam_rsa); | 1098 | err = crypto_register_akcipher(&caam_rsa); |
1062 | if (err) | 1099 | if (err) { |
1100 | kfree(zero_buffer); | ||
1063 | dev_warn(ctrldev, "%s alg registration failed\n", | 1101 | dev_warn(ctrldev, "%s alg registration failed\n", |
1064 | caam_rsa.base.cra_driver_name); | 1102 | caam_rsa.base.cra_driver_name); |
1065 | else | 1103 | } else { |
1066 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); | 1104 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); |
1105 | } | ||
1067 | 1106 | ||
1068 | out_put_dev: | ||
1069 | put_device(ctrldev); | ||
1070 | return err; | 1107 | return err; |
1071 | } | 1108 | } |
1072 | 1109 | ||
1073 | static void __exit caam_pkc_exit(void) | 1110 | void caam_pkc_exit(void) |
1074 | { | 1111 | { |
1112 | kfree(zero_buffer); | ||
1075 | crypto_unregister_akcipher(&caam_rsa); | 1113 | crypto_unregister_akcipher(&caam_rsa); |
1076 | } | 1114 | } |
1077 | |||
1078 | module_init(caam_pkc_init); | ||
1079 | module_exit(caam_pkc_exit); | ||
1080 | |||
1081 | MODULE_LICENSE("Dual BSD/GPL"); | ||
1082 | MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API"); | ||
1083 | MODULE_AUTHOR("Freescale Semiconductor"); | ||
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h index 82645bcf8b27..2c488c9a3812 100644 --- a/drivers/crypto/caam/caampkc.h +++ b/drivers/crypto/caam/caampkc.h | |||
@@ -89,18 +89,25 @@ struct caam_rsa_key { | |||
89 | * caam_rsa_ctx - per session context. | 89 | * caam_rsa_ctx - per session context. |
90 | * @key : RSA key in DMA zone | 90 | * @key : RSA key in DMA zone |
91 | * @dev : device structure | 91 | * @dev : device structure |
92 | * @padding_dma : dma address of padding, for adding it to the input | ||
92 | */ | 93 | */ |
93 | struct caam_rsa_ctx { | 94 | struct caam_rsa_ctx { |
94 | struct caam_rsa_key key; | 95 | struct caam_rsa_key key; |
95 | struct device *dev; | 96 | struct device *dev; |
97 | dma_addr_t padding_dma; | ||
98 | |||
96 | }; | 99 | }; |
97 | 100 | ||
98 | /** | 101 | /** |
99 | * caam_rsa_req_ctx - per request context. | 102 | * caam_rsa_req_ctx - per request context. |
100 | * @src: input scatterlist (stripped of leading zeros) | 103 | * @src : input scatterlist (stripped of leading zeros) |
104 | * @fixup_src : input scatterlist (that might be stripped of leading zeros) | ||
105 | * @fixup_src_len : length of the fixup_src input scatterlist | ||
101 | */ | 106 | */ |
102 | struct caam_rsa_req_ctx { | 107 | struct caam_rsa_req_ctx { |
103 | struct scatterlist src[2]; | 108 | struct scatterlist src[2]; |
109 | struct scatterlist *fixup_src; | ||
110 | unsigned int fixup_src_len; | ||
104 | }; | 111 | }; |
105 | 112 | ||
106 | /** | 113 | /** |
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 95eb5402c59f..561bcb535184 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * caam - Freescale FSL CAAM support for hw_random | 3 | * caam - Freescale FSL CAAM support for hw_random |
4 | * | 4 | * |
5 | * Copyright 2011 Freescale Semiconductor, Inc. | 5 | * Copyright 2011 Freescale Semiconductor, Inc. |
6 | * Copyright 2018 NXP | 6 | * Copyright 2018-2019 NXP |
7 | * | 7 | * |
8 | * Based on caamalg.c crypto API driver. | 8 | * Based on caamalg.c crypto API driver. |
9 | * | 9 | * |
@@ -113,10 +113,8 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) | |||
113 | /* Buffer refilled, invalidate cache */ | 113 | /* Buffer refilled, invalidate cache */ |
114 | dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); | 114 | dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); |
115 | 115 | ||
116 | #ifdef DEBUG | 116 | print_hex_dump_debug("rng refreshed buf@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
117 | print_hex_dump(KERN_ERR, "rng refreshed buf@: ", | 117 | bd->buf, RN_BUF_SIZE, 1); |
118 | DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); | ||
119 | #endif | ||
120 | } | 118 | } |
121 | 119 | ||
122 | static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) | 120 | static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) |
@@ -209,10 +207,10 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) | |||
209 | dev_err(jrdev, "unable to map shared descriptor\n"); | 207 | dev_err(jrdev, "unable to map shared descriptor\n"); |
210 | return -ENOMEM; | 208 | return -ENOMEM; |
211 | } | 209 | } |
212 | #ifdef DEBUG | 210 | |
213 | print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | 211 | print_hex_dump_debug("rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
214 | desc, desc_bytes(desc), 1); | 212 | desc, desc_bytes(desc), 1); |
215 | #endif | 213 | |
216 | return 0; | 214 | return 0; |
217 | } | 215 | } |
218 | 216 | ||
@@ -233,10 +231,10 @@ static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) | |||
233 | } | 231 | } |
234 | 232 | ||
235 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); | 233 | append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); |
236 | #ifdef DEBUG | 234 | |
237 | print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, | 235 | print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
238 | desc, desc_bytes(desc), 1); | 236 | desc, desc_bytes(desc), 1); |
239 | #endif | 237 | |
240 | return 0; | 238 | return 0; |
241 | } | 239 | } |
242 | 240 | ||
@@ -296,47 +294,20 @@ static struct hwrng caam_rng = { | |||
296 | .read = caam_read, | 294 | .read = caam_read, |
297 | }; | 295 | }; |
298 | 296 | ||
299 | static void __exit caam_rng_exit(void) | 297 | void caam_rng_exit(void) |
300 | { | 298 | { |
301 | caam_jr_free(rng_ctx->jrdev); | 299 | caam_jr_free(rng_ctx->jrdev); |
302 | hwrng_unregister(&caam_rng); | 300 | hwrng_unregister(&caam_rng); |
303 | kfree(rng_ctx); | 301 | kfree(rng_ctx); |
304 | } | 302 | } |
305 | 303 | ||
306 | static int __init caam_rng_init(void) | 304 | int caam_rng_init(struct device *ctrldev) |
307 | { | 305 | { |
308 | struct device *dev; | 306 | struct device *dev; |
309 | struct device_node *dev_node; | ||
310 | struct platform_device *pdev; | ||
311 | struct caam_drv_private *priv; | ||
312 | u32 rng_inst; | 307 | u32 rng_inst; |
308 | struct caam_drv_private *priv = dev_get_drvdata(ctrldev); | ||
313 | int err; | 309 | int err; |
314 | 310 | ||
315 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
316 | if (!dev_node) { | ||
317 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
318 | if (!dev_node) | ||
319 | return -ENODEV; | ||
320 | } | ||
321 | |||
322 | pdev = of_find_device_by_node(dev_node); | ||
323 | if (!pdev) { | ||
324 | of_node_put(dev_node); | ||
325 | return -ENODEV; | ||
326 | } | ||
327 | |||
328 | priv = dev_get_drvdata(&pdev->dev); | ||
329 | of_node_put(dev_node); | ||
330 | |||
331 | /* | ||
332 | * If priv is NULL, it's probably because the caam driver wasn't | ||
333 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | ||
334 | */ | ||
335 | if (!priv) { | ||
336 | err = -ENODEV; | ||
337 | goto out_put_dev; | ||
338 | } | ||
339 | |||
340 | /* Check for an instantiated RNG before registration */ | 311 | /* Check for an instantiated RNG before registration */ |
341 | if (priv->era < 10) | 312 | if (priv->era < 10) |
342 | rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & | 313 | rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & |
@@ -344,16 +315,13 @@ static int __init caam_rng_init(void) | |||
344 | else | 315 | else |
345 | rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; | 316 | rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; |
346 | 317 | ||
347 | if (!rng_inst) { | 318 | if (!rng_inst) |
348 | err = -ENODEV; | 319 | return 0; |
349 | goto out_put_dev; | ||
350 | } | ||
351 | 320 | ||
352 | dev = caam_jr_alloc(); | 321 | dev = caam_jr_alloc(); |
353 | if (IS_ERR(dev)) { | 322 | if (IS_ERR(dev)) { |
354 | pr_err("Job Ring Device allocation for transform failed\n"); | 323 | pr_err("Job Ring Device allocation for transform failed\n"); |
355 | err = PTR_ERR(dev); | 324 | return PTR_ERR(dev); |
356 | goto out_put_dev; | ||
357 | } | 325 | } |
358 | rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); | 326 | rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); |
359 | if (!rng_ctx) { | 327 | if (!rng_ctx) { |
@@ -364,7 +332,6 @@ static int __init caam_rng_init(void) | |||
364 | if (err) | 332 | if (err) |
365 | goto free_rng_ctx; | 333 | goto free_rng_ctx; |
366 | 334 | ||
367 | put_device(&pdev->dev); | ||
368 | dev_info(dev, "registering rng-caam\n"); | 335 | dev_info(dev, "registering rng-caam\n"); |
369 | return hwrng_register(&caam_rng); | 336 | return hwrng_register(&caam_rng); |
370 | 337 | ||
@@ -372,14 +339,5 @@ free_rng_ctx: | |||
372 | kfree(rng_ctx); | 339 | kfree(rng_ctx); |
373 | free_caam_alloc: | 340 | free_caam_alloc: |
374 | caam_jr_free(dev); | 341 | caam_jr_free(dev); |
375 | out_put_dev: | ||
376 | put_device(&pdev->dev); | ||
377 | return err; | 342 | return err; |
378 | } | 343 | } |
379 | |||
380 | module_init(caam_rng_init); | ||
381 | module_exit(caam_rng_exit); | ||
382 | |||
383 | MODULE_LICENSE("GPL"); | ||
384 | MODULE_DESCRIPTION("FSL CAAM support for hw_random API"); | ||
385 | MODULE_AUTHOR("Freescale Semiconductor - NMG"); | ||
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index fec39c35c877..4e43ca4d3656 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * Controller-level driver, kernel property detection, initialization | 3 | * Controller-level driver, kernel property detection, initialization |
4 | * | 4 | * |
5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
6 | * Copyright 2018 NXP | 6 | * Copyright 2018-2019 NXP |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
@@ -323,8 +323,8 @@ static int caam_remove(struct platform_device *pdev) | |||
323 | of_platform_depopulate(ctrldev); | 323 | of_platform_depopulate(ctrldev); |
324 | 324 | ||
325 | #ifdef CONFIG_CAAM_QI | 325 | #ifdef CONFIG_CAAM_QI |
326 | if (ctrlpriv->qidev) | 326 | if (ctrlpriv->qi_init) |
327 | caam_qi_shutdown(ctrlpriv->qidev); | 327 | caam_qi_shutdown(ctrldev); |
328 | #endif | 328 | #endif |
329 | 329 | ||
330 | /* | 330 | /* |
@@ -540,7 +540,8 @@ static int caam_probe(struct platform_device *pdev) | |||
540 | ctrlpriv->caam_ipg = clk; | 540 | ctrlpriv->caam_ipg = clk; |
541 | 541 | ||
542 | if (!of_machine_is_compatible("fsl,imx7d") && | 542 | if (!of_machine_is_compatible("fsl,imx7d") && |
543 | !of_machine_is_compatible("fsl,imx7s")) { | 543 | !of_machine_is_compatible("fsl,imx7s") && |
544 | !of_machine_is_compatible("fsl,imx7ulp")) { | ||
544 | clk = caam_drv_identify_clk(&pdev->dev, "mem"); | 545 | clk = caam_drv_identify_clk(&pdev->dev, "mem"); |
545 | if (IS_ERR(clk)) { | 546 | if (IS_ERR(clk)) { |
546 | ret = PTR_ERR(clk); | 547 | ret = PTR_ERR(clk); |
@@ -562,7 +563,8 @@ static int caam_probe(struct platform_device *pdev) | |||
562 | 563 | ||
563 | if (!of_machine_is_compatible("fsl,imx6ul") && | 564 | if (!of_machine_is_compatible("fsl,imx6ul") && |
564 | !of_machine_is_compatible("fsl,imx7d") && | 565 | !of_machine_is_compatible("fsl,imx7d") && |
565 | !of_machine_is_compatible("fsl,imx7s")) { | 566 | !of_machine_is_compatible("fsl,imx7s") && |
567 | !of_machine_is_compatible("fsl,imx7ulp")) { | ||
566 | clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); | 568 | clk = caam_drv_identify_clk(&pdev->dev, "emi_slow"); |
567 | if (IS_ERR(clk)) { | 569 | if (IS_ERR(clk)) { |
568 | ret = PTR_ERR(clk); | 570 | ret = PTR_ERR(clk); |
@@ -702,12 +704,7 @@ static int caam_probe(struct platform_device *pdev) | |||
702 | } | 704 | } |
703 | 705 | ||
704 | ctrlpriv->era = caam_get_era(ctrl); | 706 | ctrlpriv->era = caam_get_era(ctrl); |
705 | 707 | ctrlpriv->domain = iommu_get_domain_for_dev(dev); | |
706 | ret = of_platform_populate(nprop, caam_match, NULL, dev); | ||
707 | if (ret) { | ||
708 | dev_err(dev, "JR platform devices creation error\n"); | ||
709 | goto iounmap_ctrl; | ||
710 | } | ||
711 | 708 | ||
712 | #ifdef CONFIG_DEBUG_FS | 709 | #ifdef CONFIG_DEBUG_FS |
713 | /* | 710 | /* |
@@ -721,19 +718,6 @@ static int caam_probe(struct platform_device *pdev) | |||
721 | ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); | 718 | ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root); |
722 | #endif | 719 | #endif |
723 | 720 | ||
724 | ring = 0; | ||
725 | for_each_available_child_of_node(nprop, np) | ||
726 | if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || | ||
727 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { | ||
728 | ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) | ||
729 | ((__force uint8_t *)ctrl + | ||
730 | (ring + JR_BLOCK_NUMBER) * | ||
731 | BLOCK_OFFSET | ||
732 | ); | ||
733 | ctrlpriv->total_jobrs++; | ||
734 | ring++; | ||
735 | } | ||
736 | |||
737 | /* Check to see if (DPAA 1.x) QI present. If so, enable */ | 721 | /* Check to see if (DPAA 1.x) QI present. If so, enable */ |
738 | ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); | 722 | ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); |
739 | if (ctrlpriv->qi_present && !caam_dpaa2) { | 723 | if (ctrlpriv->qi_present && !caam_dpaa2) { |
@@ -752,6 +736,25 @@ static int caam_probe(struct platform_device *pdev) | |||
752 | #endif | 736 | #endif |
753 | } | 737 | } |
754 | 738 | ||
739 | ret = of_platform_populate(nprop, caam_match, NULL, dev); | ||
740 | if (ret) { | ||
741 | dev_err(dev, "JR platform devices creation error\n"); | ||
742 | goto shutdown_qi; | ||
743 | } | ||
744 | |||
745 | ring = 0; | ||
746 | for_each_available_child_of_node(nprop, np) | ||
747 | if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || | ||
748 | of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { | ||
749 | ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) | ||
750 | ((__force uint8_t *)ctrl + | ||
751 | (ring + JR_BLOCK_NUMBER) * | ||
752 | BLOCK_OFFSET | ||
753 | ); | ||
754 | ctrlpriv->total_jobrs++; | ||
755 | ring++; | ||
756 | } | ||
757 | |||
755 | /* If no QI and no rings specified, quit and go home */ | 758 | /* If no QI and no rings specified, quit and go home */ |
756 | if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { | 759 | if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { |
757 | dev_err(dev, "no queues configured, terminating\n"); | 760 | dev_err(dev, "no queues configured, terminating\n"); |
@@ -898,6 +901,11 @@ caam_remove: | |||
898 | caam_remove(pdev); | 901 | caam_remove(pdev); |
899 | return ret; | 902 | return ret; |
900 | 903 | ||
904 | shutdown_qi: | ||
905 | #ifdef CONFIG_CAAM_QI | ||
906 | if (ctrlpriv->qi_init) | ||
907 | caam_qi_shutdown(dev); | ||
908 | #endif | ||
901 | iounmap_ctrl: | 909 | iounmap_ctrl: |
902 | iounmap(ctrl); | 910 | iounmap(ctrl); |
903 | disable_caam_emi_slow: | 911 | disable_caam_emi_slow: |
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h index 2980b8ef1fb1..5988a26a2441 100644 --- a/drivers/crypto/caam/desc_constr.h +++ b/drivers/crypto/caam/desc_constr.h | |||
@@ -3,6 +3,7 @@ | |||
3 | * caam descriptor construction helper functions | 3 | * caam descriptor construction helper functions |
4 | * | 4 | * |
5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | 5 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
6 | * Copyright 2019 NXP | ||
6 | */ | 7 | */ |
7 | 8 | ||
8 | #ifndef DESC_CONSTR_H | 9 | #ifndef DESC_CONSTR_H |
@@ -37,6 +38,16 @@ | |||
37 | 38 | ||
38 | extern bool caam_little_end; | 39 | extern bool caam_little_end; |
39 | 40 | ||
41 | /* | ||
42 | * HW fetches 4 S/G table entries at a time, irrespective of how many entries | ||
43 | * are in the table. It's SW's responsibility to make sure these accesses | ||
44 | * do not have side effects. | ||
45 | */ | ||
46 | static inline int pad_sg_nents(int sg_nents) | ||
47 | { | ||
48 | return ALIGN(sg_nents, 4); | ||
49 | } | ||
50 | |||
40 | static inline int desc_len(u32 * const desc) | 51 | static inline int desc_len(u32 * const desc) |
41 | { | 52 | { |
42 | return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; | 53 | return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK; |
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 4da844e4b61d..4f0d45865aa2 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #ifdef DEBUG | 13 | #ifdef DEBUG |
14 | #include <linux/highmem.h> | 14 | #include <linux/highmem.h> |
15 | 15 | ||
16 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | 16 | void caam_dump_sg(const char *prefix_str, int prefix_type, |
17 | int rowsize, int groupsize, struct scatterlist *sg, | 17 | int rowsize, int groupsize, struct scatterlist *sg, |
18 | size_t tlen, bool ascii) | 18 | size_t tlen, bool ascii) |
19 | { | 19 | { |
@@ -35,15 +35,15 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | |||
35 | 35 | ||
36 | buf = it_page + it->offset; | 36 | buf = it_page + it->offset; |
37 | len = min_t(size_t, tlen, it->length); | 37 | len = min_t(size_t, tlen, it->length); |
38 | print_hex_dump(level, prefix_str, prefix_type, rowsize, | 38 | print_hex_dump_debug(prefix_str, prefix_type, rowsize, |
39 | groupsize, buf, len, ascii); | 39 | groupsize, buf, len, ascii); |
40 | tlen -= len; | 40 | tlen -= len; |
41 | 41 | ||
42 | kunmap_atomic(it_page); | 42 | kunmap_atomic(it_page); |
43 | } | 43 | } |
44 | } | 44 | } |
45 | #else | 45 | #else |
46 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | 46 | void caam_dump_sg(const char *prefix_str, int prefix_type, |
47 | int rowsize, int groupsize, struct scatterlist *sg, | 47 | int rowsize, int groupsize, struct scatterlist *sg, |
48 | size_t tlen, bool ascii) | 48 | size_t tlen, bool ascii) |
49 | {} | 49 | {} |
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h index 8c6b83e02a70..d9726e66edbf 100644 --- a/drivers/crypto/caam/error.h +++ b/drivers/crypto/caam/error.h | |||
@@ -17,7 +17,7 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2); | |||
17 | #define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) | 17 | #define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false) |
18 | #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) | 18 | #define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true) |
19 | 19 | ||
20 | void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, | 20 | void caam_dump_sg(const char *prefix_str, int prefix_type, |
21 | int rowsize, int groupsize, struct scatterlist *sg, | 21 | int rowsize, int groupsize, struct scatterlist *sg, |
22 | size_t tlen, bool ascii); | 22 | size_t tlen, bool ascii); |
23 | 23 | ||
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 3392615dc91b..6af84bbc612c 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Private/internal definitions between modules | 4 | * Private/internal definitions between modules |
5 | * | 5 | * |
6 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | 6 | * Copyright 2008-2011 Freescale Semiconductor, Inc. |
7 | * | 7 | * Copyright 2019 NXP |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef INTERN_H | 10 | #ifndef INTERN_H |
@@ -63,10 +63,6 @@ struct caam_drv_private_jr { | |||
63 | * Driver-private storage for a single CAAM block instance | 63 | * Driver-private storage for a single CAAM block instance |
64 | */ | 64 | */ |
65 | struct caam_drv_private { | 65 | struct caam_drv_private { |
66 | #ifdef CONFIG_CAAM_QI | ||
67 | struct device *qidev; | ||
68 | #endif | ||
69 | |||
70 | /* Physical-presence section */ | 66 | /* Physical-presence section */ |
71 | struct caam_ctrl __iomem *ctrl; /* controller region */ | 67 | struct caam_ctrl __iomem *ctrl; /* controller region */ |
72 | struct caam_deco __iomem *deco; /* DECO/CCB views */ | 68 | struct caam_deco __iomem *deco; /* DECO/CCB views */ |
@@ -74,12 +70,17 @@ struct caam_drv_private { | |||
74 | struct caam_queue_if __iomem *qi; /* QI control region */ | 70 | struct caam_queue_if __iomem *qi; /* QI control region */ |
75 | struct caam_job_ring __iomem *jr[4]; /* JobR's register space */ | 71 | struct caam_job_ring __iomem *jr[4]; /* JobR's register space */ |
76 | 72 | ||
73 | struct iommu_domain *domain; | ||
74 | |||
77 | /* | 75 | /* |
78 | * Detected geometry block. Filled in from device tree if powerpc, | 76 | * Detected geometry block. Filled in from device tree if powerpc, |
79 | * or from register-based version detection code | 77 | * or from register-based version detection code |
80 | */ | 78 | */ |
81 | u8 total_jobrs; /* Total Job Rings in device */ | 79 | u8 total_jobrs; /* Total Job Rings in device */ |
82 | u8 qi_present; /* Nonzero if QI present in device */ | 80 | u8 qi_present; /* Nonzero if QI present in device */ |
81 | #ifdef CONFIG_CAAM_QI | ||
82 | u8 qi_init; /* Nonzero if QI has been initialized */ | ||
83 | #endif | ||
83 | u8 mc_en; /* Nonzero if MC f/w is active */ | 84 | u8 mc_en; /* Nonzero if MC f/w is active */ |
84 | int secvio_irq; /* Security violation interrupt number */ | 85 | int secvio_irq; /* Security violation interrupt number */ |
85 | int virt_en; /* Virtualization enabled in CAAM */ | 86 | int virt_en; /* Virtualization enabled in CAAM */ |
@@ -107,8 +108,95 @@ struct caam_drv_private { | |||
107 | #endif | 108 | #endif |
108 | }; | 109 | }; |
109 | 110 | ||
110 | void caam_jr_algapi_init(struct device *dev); | 111 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API |
111 | void caam_jr_algapi_remove(struct device *dev); | 112 | |
113 | int caam_algapi_init(struct device *dev); | ||
114 | void caam_algapi_exit(void); | ||
115 | |||
116 | #else | ||
117 | |||
118 | static inline int caam_algapi_init(struct device *dev) | ||
119 | { | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static inline void caam_algapi_exit(void) | ||
124 | { | ||
125 | } | ||
126 | |||
127 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */ | ||
128 | |||
129 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API | ||
130 | |||
131 | int caam_algapi_hash_init(struct device *dev); | ||
132 | void caam_algapi_hash_exit(void); | ||
133 | |||
134 | #else | ||
135 | |||
136 | static inline int caam_algapi_hash_init(struct device *dev) | ||
137 | { | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static inline void caam_algapi_hash_exit(void) | ||
142 | { | ||
143 | } | ||
144 | |||
145 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */ | ||
146 | |||
147 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API | ||
148 | |||
149 | int caam_pkc_init(struct device *dev); | ||
150 | void caam_pkc_exit(void); | ||
151 | |||
152 | #else | ||
153 | |||
154 | static inline int caam_pkc_init(struct device *dev) | ||
155 | { | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static inline void caam_pkc_exit(void) | ||
160 | { | ||
161 | } | ||
162 | |||
163 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */ | ||
164 | |||
165 | #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API | ||
166 | |||
167 | int caam_rng_init(struct device *dev); | ||
168 | void caam_rng_exit(void); | ||
169 | |||
170 | #else | ||
171 | |||
172 | static inline int caam_rng_init(struct device *dev) | ||
173 | { | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static inline void caam_rng_exit(void) | ||
178 | { | ||
179 | } | ||
180 | |||
181 | #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */ | ||
182 | |||
183 | #ifdef CONFIG_CAAM_QI | ||
184 | |||
185 | int caam_qi_algapi_init(struct device *dev); | ||
186 | void caam_qi_algapi_exit(void); | ||
187 | |||
188 | #else | ||
189 | |||
190 | static inline int caam_qi_algapi_init(struct device *dev) | ||
191 | { | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static inline void caam_qi_algapi_exit(void) | ||
196 | { | ||
197 | } | ||
198 | |||
199 | #endif /* CONFIG_CAAM_QI */ | ||
112 | 200 | ||
113 | #ifdef CONFIG_DEBUG_FS | 201 | #ifdef CONFIG_DEBUG_FS |
114 | static int caam_debugfs_u64_get(void *data, u64 *val) | 202 | static int caam_debugfs_u64_get(void *data, u64 *val) |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 1de2562d0982..cea811fed320 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * JobR backend functionality | 4 | * JobR backend functionality |
5 | * | 5 | * |
6 | * Copyright 2008-2012 Freescale Semiconductor, Inc. | 6 | * Copyright 2008-2012 Freescale Semiconductor, Inc. |
7 | * Copyright 2019 NXP | ||
7 | */ | 8 | */ |
8 | 9 | ||
9 | #include <linux/of_irq.h> | 10 | #include <linux/of_irq.h> |
@@ -23,6 +24,43 @@ struct jr_driver_data { | |||
23 | } ____cacheline_aligned; | 24 | } ____cacheline_aligned; |
24 | 25 | ||
25 | static struct jr_driver_data driver_data; | 26 | static struct jr_driver_data driver_data; |
27 | static DEFINE_MUTEX(algs_lock); | ||
28 | static unsigned int active_devs; | ||
29 | |||
30 | static void register_algs(struct device *dev) | ||
31 | { | ||
32 | mutex_lock(&algs_lock); | ||
33 | |||
34 | if (++active_devs != 1) | ||
35 | goto algs_unlock; | ||
36 | |||
37 | caam_algapi_init(dev); | ||
38 | caam_algapi_hash_init(dev); | ||
39 | caam_pkc_init(dev); | ||
40 | caam_rng_init(dev); | ||
41 | caam_qi_algapi_init(dev); | ||
42 | |||
43 | algs_unlock: | ||
44 | mutex_unlock(&algs_lock); | ||
45 | } | ||
46 | |||
47 | static void unregister_algs(void) | ||
48 | { | ||
49 | mutex_lock(&algs_lock); | ||
50 | |||
51 | if (--active_devs != 0) | ||
52 | goto algs_unlock; | ||
53 | |||
54 | caam_qi_algapi_exit(); | ||
55 | |||
56 | caam_rng_exit(); | ||
57 | caam_pkc_exit(); | ||
58 | caam_algapi_hash_exit(); | ||
59 | caam_algapi_exit(); | ||
60 | |||
61 | algs_unlock: | ||
62 | mutex_unlock(&algs_lock); | ||
63 | } | ||
26 | 64 | ||
27 | static int caam_reset_hw_jr(struct device *dev) | 65 | static int caam_reset_hw_jr(struct device *dev) |
28 | { | 66 | { |
@@ -109,6 +147,9 @@ static int caam_jr_remove(struct platform_device *pdev) | |||
109 | return -EBUSY; | 147 | return -EBUSY; |
110 | } | 148 | } |
111 | 149 | ||
150 | /* Unregister JR-based RNG & crypto algorithms */ | ||
151 | unregister_algs(); | ||
152 | |||
112 | /* Remove the node from Physical JobR list maintained by driver */ | 153 | /* Remove the node from Physical JobR list maintained by driver */ |
113 | spin_lock(&driver_data.jr_alloc_lock); | 154 | spin_lock(&driver_data.jr_alloc_lock); |
114 | list_del(&jrpriv->list_node); | 155 | list_del(&jrpriv->list_node); |
@@ -541,6 +582,8 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
541 | 582 | ||
542 | atomic_set(&jrpriv->tfm_count, 0); | 583 | atomic_set(&jrpriv->tfm_count, 0); |
543 | 584 | ||
585 | register_algs(jrdev->parent); | ||
586 | |||
544 | return 0; | 587 | return 0; |
545 | } | 588 | } |
546 | 589 | ||
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 8d0713fae6ac..48dd3536060d 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c | |||
@@ -16,9 +16,7 @@ void split_key_done(struct device *dev, u32 *desc, u32 err, | |||
16 | { | 16 | { |
17 | struct split_key_result *res = context; | 17 | struct split_key_result *res = context; |
18 | 18 | ||
19 | #ifdef DEBUG | 19 | dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); |
20 | dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); | ||
21 | #endif | ||
22 | 20 | ||
23 | if (err) | 21 | if (err) |
24 | caam_jr_strstatus(dev, err); | 22 | caam_jr_strstatus(dev, err); |
@@ -55,12 +53,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, | |||
55 | adata->keylen_pad = split_key_pad_len(adata->algtype & | 53 | adata->keylen_pad = split_key_pad_len(adata->algtype & |
56 | OP_ALG_ALGSEL_MASK); | 54 | OP_ALG_ALGSEL_MASK); |
57 | 55 | ||
58 | #ifdef DEBUG | 56 | dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", |
59 | dev_err(jrdev, "split keylen %d split keylen padded %d\n", | ||
60 | adata->keylen, adata->keylen_pad); | 57 | adata->keylen, adata->keylen_pad); |
61 | print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", | 58 | print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", |
62 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); | 59 | DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); |
63 | #endif | ||
64 | 60 | ||
65 | if (adata->keylen_pad > max_keylen) | 61 | if (adata->keylen_pad > max_keylen) |
66 | return -EINVAL; | 62 | return -EINVAL; |
@@ -102,10 +98,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, | |||
102 | append_fifo_store(desc, dma_addr, adata->keylen, | 98 | append_fifo_store(desc, dma_addr, adata->keylen, |
103 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); | 99 | LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); |
104 | 100 | ||
105 | #ifdef DEBUG | 101 | print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", |
106 | print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", | 102 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), |
107 | DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); | 103 | 1); |
108 | #endif | ||
109 | 104 | ||
110 | result.err = 0; | 105 | result.err = 0; |
111 | init_completion(&result.completion); | 106 | init_completion(&result.completion); |
@@ -115,11 +110,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, | |||
115 | /* in progress */ | 110 | /* in progress */ |
116 | wait_for_completion(&result.completion); | 111 | wait_for_completion(&result.completion); |
117 | ret = result.err; | 112 | ret = result.err; |
118 | #ifdef DEBUG | 113 | |
119 | print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", | 114 | print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", |
120 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, | 115 | DUMP_PREFIX_ADDRESS, 16, 4, key_out, |
121 | adata->keylen_pad, 1); | 116 | adata->keylen_pad, 1); |
122 | #endif | ||
123 | } | 117 | } |
124 | 118 | ||
125 | dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); | 119 | dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); |
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 9f08f84cca59..0fe618e3804a 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Queue Interface backend functionality | 4 | * Queue Interface backend functionality |
5 | * | 5 | * |
6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. | 6 | * Copyright 2013-2016 Freescale Semiconductor, Inc. |
7 | * Copyright 2016-2017 NXP | 7 | * Copyright 2016-2017, 2019 NXP |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/cpumask.h> | 10 | #include <linux/cpumask.h> |
@@ -18,6 +18,7 @@ | |||
18 | #include "desc_constr.h" | 18 | #include "desc_constr.h" |
19 | 19 | ||
20 | #define PREHDR_RSLS_SHIFT 31 | 20 | #define PREHDR_RSLS_SHIFT 31 |
21 | #define PREHDR_ABS BIT(25) | ||
21 | 22 | ||
22 | /* | 23 | /* |
23 | * Use a reasonable backlog of frames (per CPU) as congestion threshold, | 24 | * Use a reasonable backlog of frames (per CPU) as congestion threshold, |
@@ -58,11 +59,9 @@ static DEFINE_PER_CPU(int, last_cpu); | |||
58 | /* | 59 | /* |
59 | * caam_qi_priv - CAAM QI backend private params | 60 | * caam_qi_priv - CAAM QI backend private params |
60 | * @cgr: QMan congestion group | 61 | * @cgr: QMan congestion group |
61 | * @qi_pdev: platform device for QI backend | ||
62 | */ | 62 | */ |
63 | struct caam_qi_priv { | 63 | struct caam_qi_priv { |
64 | struct qman_cgr cgr; | 64 | struct qman_cgr cgr; |
65 | struct platform_device *qi_pdev; | ||
66 | }; | 65 | }; |
67 | 66 | ||
68 | static struct caam_qi_priv qipriv ____cacheline_aligned; | 67 | static struct caam_qi_priv qipriv ____cacheline_aligned; |
@@ -95,6 +94,16 @@ static u64 times_congested; | |||
95 | */ | 94 | */ |
96 | static struct kmem_cache *qi_cache; | 95 | static struct kmem_cache *qi_cache; |
97 | 96 | ||
97 | static void *caam_iova_to_virt(struct iommu_domain *domain, | ||
98 | dma_addr_t iova_addr) | ||
99 | { | ||
100 | phys_addr_t phys_addr; | ||
101 | |||
102 | phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; | ||
103 | |||
104 | return phys_to_virt(phys_addr); | ||
105 | } | ||
106 | |||
98 | int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) | 107 | int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) |
99 | { | 108 | { |
100 | struct qm_fd fd; | 109 | struct qm_fd fd; |
@@ -135,6 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, | |||
135 | const struct qm_fd *fd; | 144 | const struct qm_fd *fd; |
136 | struct caam_drv_req *drv_req; | 145 | struct caam_drv_req *drv_req; |
137 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); | 146 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
147 | struct caam_drv_private *priv = dev_get_drvdata(qidev); | ||
138 | 148 | ||
139 | fd = &msg->ern.fd; | 149 | fd = &msg->ern.fd; |
140 | 150 | ||
@@ -143,7 +153,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, | |||
143 | return; | 153 | return; |
144 | } | 154 | } |
145 | 155 | ||
146 | drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); | 156 | drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); |
147 | if (!drv_req) { | 157 | if (!drv_req) { |
148 | dev_err(qidev, | 158 | dev_err(qidev, |
149 | "Can't find original request for CAAM response\n"); | 159 | "Can't find original request for CAAM response\n"); |
@@ -346,6 +356,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) | |||
346 | */ | 356 | */ |
347 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | | 357 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
348 | num_words); | 358 | num_words); |
359 | drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); | ||
349 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); | 360 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
350 | dma_sync_single_for_device(qidev, drv_ctx->context_a, | 361 | dma_sync_single_for_device(qidev, drv_ctx->context_a, |
351 | sizeof(drv_ctx->sh_desc) + | 362 | sizeof(drv_ctx->sh_desc) + |
@@ -401,6 +412,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, | |||
401 | */ | 412 | */ |
402 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | | 413 | drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | |
403 | num_words); | 414 | num_words); |
415 | drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); | ||
404 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); | 416 | memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); |
405 | size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); | 417 | size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); |
406 | hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, | 418 | hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, |
@@ -488,7 +500,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel); | |||
488 | void caam_qi_shutdown(struct device *qidev) | 500 | void caam_qi_shutdown(struct device *qidev) |
489 | { | 501 | { |
490 | int i; | 502 | int i; |
491 | struct caam_qi_priv *priv = dev_get_drvdata(qidev); | 503 | struct caam_qi_priv *priv = &qipriv; |
492 | const cpumask_t *cpus = qman_affine_cpus(); | 504 | const cpumask_t *cpus = qman_affine_cpus(); |
493 | 505 | ||
494 | for_each_cpu(i, cpus) { | 506 | for_each_cpu(i, cpus) { |
@@ -506,8 +518,6 @@ void caam_qi_shutdown(struct device *qidev) | |||
506 | qman_release_cgrid(priv->cgr.cgrid); | 518 | qman_release_cgrid(priv->cgr.cgrid); |
507 | 519 | ||
508 | kmem_cache_destroy(qi_cache); | 520 | kmem_cache_destroy(qi_cache); |
509 | |||
510 | platform_device_unregister(priv->qi_pdev); | ||
511 | } | 521 | } |
512 | 522 | ||
513 | static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) | 523 | static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) |
@@ -550,6 +560,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, | |||
550 | struct caam_drv_req *drv_req; | 560 | struct caam_drv_req *drv_req; |
551 | const struct qm_fd *fd; | 561 | const struct qm_fd *fd; |
552 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); | 562 | struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); |
563 | struct caam_drv_private *priv = dev_get_drvdata(qidev); | ||
553 | u32 status; | 564 | u32 status; |
554 | 565 | ||
555 | if (caam_qi_napi_schedule(p, caam_napi)) | 566 | if (caam_qi_napi_schedule(p, caam_napi)) |
@@ -572,7 +583,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, | |||
572 | return qman_cb_dqrr_consume; | 583 | return qman_cb_dqrr_consume; |
573 | } | 584 | } |
574 | 585 | ||
575 | drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd)); | 586 | drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); |
576 | if (unlikely(!drv_req)) { | 587 | if (unlikely(!drv_req)) { |
577 | dev_err(qidev, | 588 | dev_err(qidev, |
578 | "Can't find original request for caam response\n"); | 589 | "Can't find original request for caam response\n"); |
@@ -692,33 +703,17 @@ static void free_rsp_fqs(void) | |||
692 | int caam_qi_init(struct platform_device *caam_pdev) | 703 | int caam_qi_init(struct platform_device *caam_pdev) |
693 | { | 704 | { |
694 | int err, i; | 705 | int err, i; |
695 | struct platform_device *qi_pdev; | ||
696 | struct device *ctrldev = &caam_pdev->dev, *qidev; | 706 | struct device *ctrldev = &caam_pdev->dev, *qidev; |
697 | struct caam_drv_private *ctrlpriv; | 707 | struct caam_drv_private *ctrlpriv; |
698 | const cpumask_t *cpus = qman_affine_cpus(); | 708 | const cpumask_t *cpus = qman_affine_cpus(); |
699 | static struct platform_device_info qi_pdev_info = { | ||
700 | .name = "caam_qi", | ||
701 | .id = PLATFORM_DEVID_NONE | ||
702 | }; | ||
703 | |||
704 | qi_pdev_info.parent = ctrldev; | ||
705 | qi_pdev_info.dma_mask = dma_get_mask(ctrldev); | ||
706 | qi_pdev = platform_device_register_full(&qi_pdev_info); | ||
707 | if (IS_ERR(qi_pdev)) | ||
708 | return PTR_ERR(qi_pdev); | ||
709 | set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev)); | ||
710 | 709 | ||
711 | ctrlpriv = dev_get_drvdata(ctrldev); | 710 | ctrlpriv = dev_get_drvdata(ctrldev); |
712 | qidev = &qi_pdev->dev; | 711 | qidev = ctrldev; |
713 | |||
714 | qipriv.qi_pdev = qi_pdev; | ||
715 | dev_set_drvdata(qidev, &qipriv); | ||
716 | 712 | ||
717 | /* Initialize the congestion detection */ | 713 | /* Initialize the congestion detection */ |
718 | err = init_cgr(qidev); | 714 | err = init_cgr(qidev); |
719 | if (err) { | 715 | if (err) { |
720 | dev_err(qidev, "CGR initialization failed: %d\n", err); | 716 | dev_err(qidev, "CGR initialization failed: %d\n", err); |
721 | platform_device_unregister(qi_pdev); | ||
722 | return err; | 717 | return err; |
723 | } | 718 | } |
724 | 719 | ||
@@ -727,7 +722,6 @@ int caam_qi_init(struct platform_device *caam_pdev) | |||
727 | if (err) { | 722 | if (err) { |
728 | dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); | 723 | dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); |
729 | free_rsp_fqs(); | 724 | free_rsp_fqs(); |
730 | platform_device_unregister(qi_pdev); | ||
731 | return err; | 725 | return err; |
732 | } | 726 | } |
733 | 727 | ||
@@ -750,15 +744,11 @@ int caam_qi_init(struct platform_device *caam_pdev) | |||
750 | napi_enable(irqtask); | 744 | napi_enable(irqtask); |
751 | } | 745 | } |
752 | 746 | ||
753 | /* Hook up QI device to parent controlling caam device */ | ||
754 | ctrlpriv->qidev = qidev; | ||
755 | |||
756 | qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, | 747 | qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, |
757 | SLAB_CACHE_DMA, NULL); | 748 | SLAB_CACHE_DMA, NULL); |
758 | if (!qi_cache) { | 749 | if (!qi_cache) { |
759 | dev_err(qidev, "Can't allocate CAAM cache\n"); | 750 | dev_err(qidev, "Can't allocate CAAM cache\n"); |
760 | free_rsp_fqs(); | 751 | free_rsp_fqs(); |
761 | platform_device_unregister(qi_pdev); | ||
762 | return -ENOMEM; | 752 | return -ENOMEM; |
763 | } | 753 | } |
764 | 754 | ||
@@ -766,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev) | |||
766 | debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, | 756 | debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, |
767 | ×_congested, &caam_fops_u64_ro); | 757 | ×_congested, &caam_fops_u64_ro); |
768 | #endif | 758 | #endif |
759 | |||
760 | ctrlpriv->qi_init = 1; | ||
769 | dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); | 761 | dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); |
770 | return 0; | 762 | return 0; |
771 | } | 763 | } |
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h index b3e1aaaeffea..d56cc7efbc13 100644 --- a/drivers/crypto/caam/sg_sw_qm.h +++ b/drivers/crypto/caam/sg_sw_qm.h | |||
@@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr, | |||
54 | * but does not have final bit; instead, returns last entry | 54 | * but does not have final bit; instead, returns last entry |
55 | */ | 55 | */ |
56 | static inline struct qm_sg_entry * | 56 | static inline struct qm_sg_entry * |
57 | sg_to_qm_sg(struct scatterlist *sg, int sg_count, | 57 | sg_to_qm_sg(struct scatterlist *sg, int len, |
58 | struct qm_sg_entry *qm_sg_ptr, u16 offset) | 58 | struct qm_sg_entry *qm_sg_ptr, u16 offset) |
59 | { | 59 | { |
60 | while (sg_count && sg) { | 60 | int ent_len; |
61 | dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), | 61 | |
62 | sg_dma_len(sg), offset); | 62 | while (len) { |
63 | ent_len = min_t(int, sg_dma_len(sg), len); | ||
64 | |||
65 | dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, | ||
66 | offset); | ||
63 | qm_sg_ptr++; | 67 | qm_sg_ptr++; |
64 | sg = sg_next(sg); | 68 | sg = sg_next(sg); |
65 | sg_count--; | 69 | len -= ent_len; |
66 | } | 70 | } |
67 | return qm_sg_ptr - 1; | 71 | return qm_sg_ptr - 1; |
68 | } | 72 | } |
@@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count, | |||
71 | * convert scatterlist to h/w link table format | 75 | * convert scatterlist to h/w link table format |
72 | * scatterlist must have been previously dma mapped | 76 | * scatterlist must have been previously dma mapped |
73 | */ | 77 | */ |
74 | static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, | 78 | static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, |
75 | struct qm_sg_entry *qm_sg_ptr, u16 offset) | 79 | struct qm_sg_entry *qm_sg_ptr, u16 offset) |
76 | { | 80 | { |
77 | qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); | 81 | qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); |
78 | qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); | 82 | qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); |
79 | } | 83 | } |
80 | 84 | ||
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h index c9378402a5f8..b8b737d2b0ea 100644 --- a/drivers/crypto/caam/sg_sw_qm2.h +++ b/drivers/crypto/caam/sg_sw_qm2.h | |||
@@ -25,15 +25,19 @@ static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr, | |||
25 | * but does not have final bit; instead, returns last entry | 25 | * but does not have final bit; instead, returns last entry |
26 | */ | 26 | */ |
27 | static inline struct dpaa2_sg_entry * | 27 | static inline struct dpaa2_sg_entry * |
28 | sg_to_qm_sg(struct scatterlist *sg, int sg_count, | 28 | sg_to_qm_sg(struct scatterlist *sg, int len, |
29 | struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) | 29 | struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) |
30 | { | 30 | { |
31 | while (sg_count && sg) { | 31 | int ent_len; |
32 | dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), | 32 | |
33 | sg_dma_len(sg), offset); | 33 | while (len) { |
34 | ent_len = min_t(int, sg_dma_len(sg), len); | ||
35 | |||
36 | dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, | ||
37 | offset); | ||
34 | qm_sg_ptr++; | 38 | qm_sg_ptr++; |
35 | sg = sg_next(sg); | 39 | sg = sg_next(sg); |
36 | sg_count--; | 40 | len -= ent_len; |
37 | } | 41 | } |
38 | return qm_sg_ptr - 1; | 42 | return qm_sg_ptr - 1; |
39 | } | 43 | } |
@@ -42,11 +46,11 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count, | |||
42 | * convert scatterlist to h/w link table format | 46 | * convert scatterlist to h/w link table format |
43 | * scatterlist must have been previously dma mapped | 47 | * scatterlist must have been previously dma mapped |
44 | */ | 48 | */ |
45 | static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, | 49 | static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, |
46 | struct dpaa2_sg_entry *qm_sg_ptr, | 50 | struct dpaa2_sg_entry *qm_sg_ptr, |
47 | u16 offset) | 51 | u16 offset) |
48 | { | 52 | { |
49 | qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); | 53 | qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); |
50 | dpaa2_sg_set_final(qm_sg_ptr, true); | 54 | dpaa2_sg_set_final(qm_sg_ptr, true); |
51 | } | 55 | } |
52 | 56 | ||
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index dbfa9fce33e0..07e1ee99273b 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
@@ -35,11 +35,9 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, | |||
35 | sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & | 35 | sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & |
36 | SEC4_SG_OFFSET_MASK); | 36 | SEC4_SG_OFFSET_MASK); |
37 | } | 37 | } |
38 | #ifdef DEBUG | 38 | |
39 | print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ", | 39 | print_hex_dump_debug("sec4_sg_ptr@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
40 | DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr, | 40 | sec4_sg_ptr, sizeof(struct sec4_sg_entry), 1); |
41 | sizeof(struct sec4_sg_entry), 1); | ||
42 | #endif | ||
43 | } | 41 | } |
44 | 42 | ||
45 | /* | 43 | /* |
@@ -47,15 +45,19 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, | |||
47 | * but does not have final bit; instead, returns last entry | 45 | * but does not have final bit; instead, returns last entry |
48 | */ | 46 | */ |
49 | static inline struct sec4_sg_entry * | 47 | static inline struct sec4_sg_entry * |
50 | sg_to_sec4_sg(struct scatterlist *sg, int sg_count, | 48 | sg_to_sec4_sg(struct scatterlist *sg, int len, |
51 | struct sec4_sg_entry *sec4_sg_ptr, u16 offset) | 49 | struct sec4_sg_entry *sec4_sg_ptr, u16 offset) |
52 | { | 50 | { |
53 | while (sg_count) { | 51 | int ent_len; |
54 | dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), | 52 | |
55 | sg_dma_len(sg), offset); | 53 | while (len) { |
54 | ent_len = min_t(int, sg_dma_len(sg), len); | ||
55 | |||
56 | dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len, | ||
57 | offset); | ||
56 | sec4_sg_ptr++; | 58 | sec4_sg_ptr++; |
57 | sg = sg_next(sg); | 59 | sg = sg_next(sg); |
58 | sg_count--; | 60 | len -= ent_len; |
59 | } | 61 | } |
60 | return sec4_sg_ptr - 1; | 62 | return sec4_sg_ptr - 1; |
61 | } | 63 | } |
@@ -72,11 +74,11 @@ static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr) | |||
72 | * convert scatterlist to h/w link table format | 74 | * convert scatterlist to h/w link table format |
73 | * scatterlist must have been previously dma mapped | 75 | * scatterlist must have been previously dma mapped |
74 | */ | 76 | */ |
75 | static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, | 77 | static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len, |
76 | struct sec4_sg_entry *sec4_sg_ptr, | 78 | struct sec4_sg_entry *sec4_sg_ptr, |
77 | u16 offset) | 79 | u16 offset) |
78 | { | 80 | { |
79 | sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); | 81 | sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset); |
80 | sg_to_sec4_set_last(sec4_sg_ptr); | 82 | sg_to_sec4_set_last(sec4_sg_ptr); |
81 | } | 83 | } |
82 | 84 | ||
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index e9f4704494fb..ff3cb1f8f2b6 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <crypto/aes.h> | 7 | #include <crypto/aes.h> |
8 | #include <crypto/algapi.h> | 8 | #include <crypto/algapi.h> |
9 | #include <crypto/authenc.h> | 9 | #include <crypto/authenc.h> |
10 | #include <crypto/crypto_wq.h> | ||
11 | #include <crypto/des.h> | 10 | #include <crypto/des.h> |
12 | #include <crypto/xts.h> | 11 | #include <crypto/xts.h> |
13 | #include <linux/crypto.h> | 12 | #include <linux/crypto.h> |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h index f177b79bbab0..09c4cf2513fb 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h +++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __NITROX_DEBUGFS_H | 2 | #ifndef __NITROX_DEBUGFS_H |
3 | #define __NITROX_DEBUGFS_H | 3 | #define __NITROX_DEBUGFS_H |
4 | 4 | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.h b/drivers/crypto/cavium/nitrox/nitrox_mbx.h index 5008399775a9..7c93d0282174 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_mbx.h +++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.h | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __NITROX_MBX_H | 2 | #ifndef __NITROX_MBX_H |
3 | #define __NITROX_MBX_H | 3 | #define __NITROX_MBX_H |
4 | 4 | ||
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c index ea3d6de55ff6..58c6dddfc5e1 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes.c +++ b/drivers/crypto/ccp/ccp-crypto-aes.c | |||
@@ -2,7 +2,7 @@ | |||
2 | /* | 2 | /* |
3 | * AMD Cryptographic Coprocessor (CCP) AES crypto API support | 3 | * AMD Cryptographic Coprocessor (CCP) AES crypto API support |
4 | * | 4 | * |
5 | * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. | 5 | * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. |
6 | * | 6 | * |
7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
8 | */ | 8 | */ |
@@ -76,8 +76,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt) | |||
76 | return -EINVAL; | 76 | return -EINVAL; |
77 | 77 | ||
78 | if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || | 78 | if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || |
79 | (ctx->u.aes.mode == CCP_AES_MODE_CBC) || | 79 | (ctx->u.aes.mode == CCP_AES_MODE_CBC)) && |
80 | (ctx->u.aes.mode == CCP_AES_MODE_CFB)) && | ||
81 | (req->nbytes & (AES_BLOCK_SIZE - 1))) | 80 | (req->nbytes & (AES_BLOCK_SIZE - 1))) |
82 | return -EINVAL; | 81 | return -EINVAL; |
83 | 82 | ||
@@ -288,7 +287,7 @@ static struct ccp_aes_def aes_algs[] = { | |||
288 | .version = CCP_VERSION(3, 0), | 287 | .version = CCP_VERSION(3, 0), |
289 | .name = "cfb(aes)", | 288 | .name = "cfb(aes)", |
290 | .driver_name = "cfb-aes-ccp", | 289 | .driver_name = "cfb-aes-ccp", |
291 | .blocksize = AES_BLOCK_SIZE, | 290 | .blocksize = 1, |
292 | .ivsize = AES_BLOCK_SIZE, | 291 | .ivsize = AES_BLOCK_SIZE, |
293 | .alg_defaults = &ccp_aes_defaults, | 292 | .alg_defaults = &ccp_aes_defaults, |
294 | }, | 293 | }, |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index cc3e96c4f5fb..f79eede71c62 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
@@ -32,56 +32,62 @@ struct ccp_tasklet_data { | |||
32 | }; | 32 | }; |
33 | 33 | ||
34 | /* Human-readable error strings */ | 34 | /* Human-readable error strings */ |
35 | #define CCP_MAX_ERROR_CODE 64 | ||
35 | static char *ccp_error_codes[] = { | 36 | static char *ccp_error_codes[] = { |
36 | "", | 37 | "", |
37 | "ERR 01: ILLEGAL_ENGINE", | 38 | "ILLEGAL_ENGINE", |
38 | "ERR 02: ILLEGAL_KEY_ID", | 39 | "ILLEGAL_KEY_ID", |
39 | "ERR 03: ILLEGAL_FUNCTION_TYPE", | 40 | "ILLEGAL_FUNCTION_TYPE", |
40 | "ERR 04: ILLEGAL_FUNCTION_MODE", | 41 | "ILLEGAL_FUNCTION_MODE", |
41 | "ERR 05: ILLEGAL_FUNCTION_ENCRYPT", | 42 | "ILLEGAL_FUNCTION_ENCRYPT", |
42 | "ERR 06: ILLEGAL_FUNCTION_SIZE", | 43 | "ILLEGAL_FUNCTION_SIZE", |
43 | "ERR 07: Zlib_MISSING_INIT_EOM", | 44 | "Zlib_MISSING_INIT_EOM", |
44 | "ERR 08: ILLEGAL_FUNCTION_RSVD", | 45 | "ILLEGAL_FUNCTION_RSVD", |
45 | "ERR 09: ILLEGAL_BUFFER_LENGTH", | 46 | "ILLEGAL_BUFFER_LENGTH", |
46 | "ERR 10: VLSB_FAULT", | 47 | "VLSB_FAULT", |
47 | "ERR 11: ILLEGAL_MEM_ADDR", | 48 | "ILLEGAL_MEM_ADDR", |
48 | "ERR 12: ILLEGAL_MEM_SEL", | 49 | "ILLEGAL_MEM_SEL", |
49 | "ERR 13: ILLEGAL_CONTEXT_ID", | 50 | "ILLEGAL_CONTEXT_ID", |
50 | "ERR 14: ILLEGAL_KEY_ADDR", | 51 | "ILLEGAL_KEY_ADDR", |
51 | "ERR 15: 0xF Reserved", | 52 | "0xF Reserved", |
52 | "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE", | 53 | "Zlib_ILLEGAL_MULTI_QUEUE", |
53 | "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE", | 54 | "Zlib_ILLEGAL_JOBID_CHANGE", |
54 | "ERR 18: CMD_TIMEOUT", | 55 | "CMD_TIMEOUT", |
55 | "ERR 19: IDMA0_AXI_SLVERR", | 56 | "IDMA0_AXI_SLVERR", |
56 | "ERR 20: IDMA0_AXI_DECERR", | 57 | "IDMA0_AXI_DECERR", |
57 | "ERR 21: 0x15 Reserved", | 58 | "0x15 Reserved", |
58 | "ERR 22: IDMA1_AXI_SLAVE_FAULT", | 59 | "IDMA1_AXI_SLAVE_FAULT", |
59 | "ERR 23: IDMA1_AIXI_DECERR", | 60 | "IDMA1_AIXI_DECERR", |
60 | "ERR 24: 0x18 Reserved", | 61 | "0x18 Reserved", |
61 | "ERR 25: ZLIBVHB_AXI_SLVERR", | 62 | "ZLIBVHB_AXI_SLVERR", |
62 | "ERR 26: ZLIBVHB_AXI_DECERR", | 63 | "ZLIBVHB_AXI_DECERR", |
63 | "ERR 27: 0x1B Reserved", | 64 | "0x1B Reserved", |
64 | "ERR 27: ZLIB_UNEXPECTED_EOM", | 65 | "ZLIB_UNEXPECTED_EOM", |
65 | "ERR 27: ZLIB_EXTRA_DATA", | 66 | "ZLIB_EXTRA_DATA", |
66 | "ERR 30: ZLIB_BTYPE", | 67 | "ZLIB_BTYPE", |
67 | "ERR 31: ZLIB_UNDEFINED_SYMBOL", | 68 | "ZLIB_UNDEFINED_SYMBOL", |
68 | "ERR 32: ZLIB_UNDEFINED_DISTANCE_S", | 69 | "ZLIB_UNDEFINED_DISTANCE_S", |
69 | "ERR 33: ZLIB_CODE_LENGTH_SYMBOL", | 70 | "ZLIB_CODE_LENGTH_SYMBOL", |
70 | "ERR 34: ZLIB _VHB_ILLEGAL_FETCH", | 71 | "ZLIB _VHB_ILLEGAL_FETCH", |
71 | "ERR 35: ZLIB_UNCOMPRESSED_LEN", | 72 | "ZLIB_UNCOMPRESSED_LEN", |
72 | "ERR 36: ZLIB_LIMIT_REACHED", | 73 | "ZLIB_LIMIT_REACHED", |
73 | "ERR 37: ZLIB_CHECKSUM_MISMATCH0", | 74 | "ZLIB_CHECKSUM_MISMATCH0", |
74 | "ERR 38: ODMA0_AXI_SLVERR", | 75 | "ODMA0_AXI_SLVERR", |
75 | "ERR 39: ODMA0_AXI_DECERR", | 76 | "ODMA0_AXI_DECERR", |
76 | "ERR 40: 0x28 Reserved", | 77 | "0x28 Reserved", |
77 | "ERR 41: ODMA1_AXI_SLVERR", | 78 | "ODMA1_AXI_SLVERR", |
78 | "ERR 42: ODMA1_AXI_DECERR", | 79 | "ODMA1_AXI_DECERR", |
79 | "ERR 43: LSB_PARITY_ERR", | ||
80 | }; | 80 | }; |
81 | 81 | ||
82 | void ccp_log_error(struct ccp_device *d, int e) | 82 | void ccp_log_error(struct ccp_device *d, unsigned int e) |
83 | { | 83 | { |
84 | dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e); | 84 | if (WARN_ON(e >= CCP_MAX_ERROR_CODE)) |
85 | return; | ||
86 | |||
87 | if (e < ARRAY_SIZE(ccp_error_codes)) | ||
88 | dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]); | ||
89 | else | ||
90 | dev_err(d->dev, "CCP error %d: Unknown Error\n", e); | ||
85 | } | 91 | } |
86 | 92 | ||
87 | /* List of CCPs, CCP count, read-write access lock, and access functions | 93 | /* List of CCPs, CCP count, read-write access lock, and access functions |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 90523a069bff..5e624920fd99 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -629,7 +629,7 @@ struct ccp5_desc { | |||
629 | void ccp_add_device(struct ccp_device *ccp); | 629 | void ccp_add_device(struct ccp_device *ccp); |
630 | void ccp_del_device(struct ccp_device *ccp); | 630 | void ccp_del_device(struct ccp_device *ccp); |
631 | 631 | ||
632 | extern void ccp_log_error(struct ccp_device *, int); | 632 | extern void ccp_log_error(struct ccp_device *, unsigned int); |
633 | 633 | ||
634 | struct ccp_device *ccp_alloc_struct(struct sp_device *sp); | 634 | struct ccp_device *ccp_alloc_struct(struct sp_device *sp); |
635 | bool ccp_queues_suspended(struct ccp_device *ccp); | 635 | bool ccp_queues_suspended(struct ccp_device *ccp); |
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index db8de89d990f..866b2e05ca77 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c | |||
@@ -2,7 +2,7 @@ | |||
2 | /* | 2 | /* |
3 | * AMD Cryptographic Coprocessor (CCP) driver | 3 | * AMD Cryptographic Coprocessor (CCP) driver |
4 | * | 4 | * |
5 | * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. | 5 | * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. |
6 | * | 6 | * |
7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | 7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
8 | * Author: Gary R Hook <gary.hook@amd.com> | 8 | * Author: Gary R Hook <gary.hook@amd.com> |
@@ -890,8 +890,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
890 | return -EINVAL; | 890 | return -EINVAL; |
891 | 891 | ||
892 | if (((aes->mode == CCP_AES_MODE_ECB) || | 892 | if (((aes->mode == CCP_AES_MODE_ECB) || |
893 | (aes->mode == CCP_AES_MODE_CBC) || | 893 | (aes->mode == CCP_AES_MODE_CBC)) && |
894 | (aes->mode == CCP_AES_MODE_CFB)) && | ||
895 | (aes->src_len & (AES_BLOCK_SIZE - 1))) | 894 | (aes->src_len & (AES_BLOCK_SIZE - 1))) |
896 | return -EINVAL; | 895 | return -EINVAL; |
897 | 896 | ||
@@ -1264,6 +1263,9 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1264 | int ret; | 1263 | int ret; |
1265 | 1264 | ||
1266 | /* Error checks */ | 1265 | /* Error checks */ |
1266 | if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) | ||
1267 | return -EINVAL; | ||
1268 | |||
1267 | if (!cmd_q->ccp->vdata->perform->des3) | 1269 | if (!cmd_q->ccp->vdata->perform->des3) |
1268 | return -EINVAL; | 1270 | return -EINVAL; |
1269 | 1271 | ||
@@ -1346,8 +1348,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1346 | * passthru option to convert from big endian to little endian. | 1348 | * passthru option to convert from big endian to little endian. |
1347 | */ | 1349 | */ |
1348 | if (des3->mode != CCP_DES3_MODE_ECB) { | 1350 | if (des3->mode != CCP_DES3_MODE_ECB) { |
1349 | u32 load_mode; | ||
1350 | |||
1351 | op.sb_ctx = cmd_q->sb_ctx; | 1351 | op.sb_ctx = cmd_q->sb_ctx; |
1352 | 1352 | ||
1353 | ret = ccp_init_dm_workarea(&ctx, cmd_q, | 1353 | ret = ccp_init_dm_workarea(&ctx, cmd_q, |
@@ -1363,12 +1363,8 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1363 | if (ret) | 1363 | if (ret) |
1364 | goto e_ctx; | 1364 | goto e_ctx; |
1365 | 1365 | ||
1366 | if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) | ||
1367 | load_mode = CCP_PASSTHRU_BYTESWAP_NOOP; | ||
1368 | else | ||
1369 | load_mode = CCP_PASSTHRU_BYTESWAP_256BIT; | ||
1370 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, | 1366 | ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, |
1371 | load_mode); | 1367 | CCP_PASSTHRU_BYTESWAP_256BIT); |
1372 | if (ret) { | 1368 | if (ret) { |
1373 | cmd->engine_error = cmd_q->cmd_error; | 1369 | cmd->engine_error = cmd_q->cmd_error; |
1374 | goto e_ctx; | 1370 | goto e_ctx; |
@@ -1430,10 +1426,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) | |||
1430 | } | 1426 | } |
1431 | 1427 | ||
1432 | /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ | 1428 | /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ |
1433 | if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) | ||
1434 | dm_offset = CCP_SB_BYTES - des3->iv_len; | ||
1435 | else | ||
1436 | dm_offset = 0; | ||
1437 | ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, | 1429 | ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, |
1438 | DES3_EDE_BLOCK_SIZE); | 1430 | DES3_EDE_BLOCK_SIZE); |
1439 | } | 1431 | } |
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 86ac7b443355..980aa04b655b 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c | |||
@@ -48,6 +48,7 @@ struct cc_hw_data { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | #define CC_NUM_IDRS 4 | 50 | #define CC_NUM_IDRS 4 |
51 | #define CC_HW_RESET_LOOP_COUNT 10 | ||
51 | 52 | ||
52 | /* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */ | 53 | /* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */ |
53 | static const u32 pidr_0124_offsets[CC_NUM_IDRS] = { | 54 | static const u32 pidr_0124_offsets[CC_NUM_IDRS] = { |
@@ -133,6 +134,9 @@ static irqreturn_t cc_isr(int irq, void *dev_id) | |||
133 | u32 imr; | 134 | u32 imr; |
134 | 135 | ||
135 | /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */ | 136 | /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */ |
137 | /* if driver suspended return, probebly shared interrupt */ | ||
138 | if (cc_pm_is_dev_suspended(dev)) | ||
139 | return IRQ_NONE; | ||
136 | 140 | ||
137 | /* read the interrupt status */ | 141 | /* read the interrupt status */ |
138 | irr = cc_ioread(drvdata, CC_REG(HOST_IRR)); | 142 | irr = cc_ioread(drvdata, CC_REG(HOST_IRR)); |
@@ -188,6 +192,31 @@ static irqreturn_t cc_isr(int irq, void *dev_id) | |||
188 | return IRQ_HANDLED; | 192 | return IRQ_HANDLED; |
189 | } | 193 | } |
190 | 194 | ||
195 | bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata) | ||
196 | { | ||
197 | unsigned int val; | ||
198 | unsigned int i; | ||
199 | |||
200 | /* 712/710/63 has no reset completion indication, always return true */ | ||
201 | if (drvdata->hw_rev <= CC_HW_REV_712) | ||
202 | return true; | ||
203 | |||
204 | for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) { | ||
205 | /* in cc7x3 NVM_IS_IDLE indicates that CC reset is | ||
206 | * completed and device is fully functional | ||
207 | */ | ||
208 | val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE)); | ||
209 | if (val & CC_NVM_IS_IDLE_MASK) { | ||
210 | /* hw indicate reset completed */ | ||
211 | return true; | ||
212 | } | ||
213 | /* allow scheduling other process on the processor */ | ||
214 | schedule(); | ||
215 | } | ||
216 | /* reset not completed */ | ||
217 | return false; | ||
218 | } | ||
219 | |||
191 | int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe) | 220 | int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe) |
192 | { | 221 | { |
193 | unsigned int val, cache_params; | 222 | unsigned int val, cache_params; |
@@ -315,15 +344,6 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
315 | return new_drvdata->irq; | 344 | return new_drvdata->irq; |
316 | } | 345 | } |
317 | 346 | ||
318 | rc = devm_request_irq(dev, new_drvdata->irq, cc_isr, | ||
319 | IRQF_SHARED, "ccree", new_drvdata); | ||
320 | if (rc) { | ||
321 | dev_err(dev, "Could not register to interrupt %d\n", | ||
322 | new_drvdata->irq); | ||
323 | return rc; | ||
324 | } | ||
325 | dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq); | ||
326 | |||
327 | init_completion(&new_drvdata->hw_queue_avail); | 347 | init_completion(&new_drvdata->hw_queue_avail); |
328 | 348 | ||
329 | if (!plat_dev->dev.dma_mask) | 349 | if (!plat_dev->dev.dma_mask) |
@@ -352,6 +372,11 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
352 | 372 | ||
353 | new_drvdata->sec_disabled = cc_sec_disable; | 373 | new_drvdata->sec_disabled = cc_sec_disable; |
354 | 374 | ||
375 | /* wait for Crytpcell reset completion */ | ||
376 | if (!cc_wait_for_reset_completion(new_drvdata)) { | ||
377 | dev_err(dev, "Cryptocell reset not completed"); | ||
378 | } | ||
379 | |||
355 | if (hw_rev->rev <= CC_HW_REV_712) { | 380 | if (hw_rev->rev <= CC_HW_REV_712) { |
356 | /* Verify correct mapping */ | 381 | /* Verify correct mapping */ |
357 | val = cc_ioread(new_drvdata, new_drvdata->sig_offset); | 382 | val = cc_ioread(new_drvdata, new_drvdata->sig_offset); |
@@ -383,6 +408,24 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
383 | } | 408 | } |
384 | sig_cidr = val; | 409 | sig_cidr = val; |
385 | 410 | ||
411 | /* Check HW engine configuration */ | ||
412 | val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS)); | ||
413 | switch (val) { | ||
414 | case CC_PINS_FULL: | ||
415 | /* This is fine */ | ||
416 | break; | ||
417 | case CC_PINS_SLIM: | ||
418 | if (new_drvdata->std_bodies & CC_STD_NIST) { | ||
419 | dev_warn(dev, "703 mode forced due to HW configuration.\n"); | ||
420 | new_drvdata->std_bodies = CC_STD_OSCCA; | ||
421 | } | ||
422 | break; | ||
423 | default: | ||
424 | dev_err(dev, "Unsupported engines configration.\n"); | ||
425 | rc = -EINVAL; | ||
426 | goto post_clk_err; | ||
427 | } | ||
428 | |||
386 | /* Check security disable state */ | 429 | /* Check security disable state */ |
387 | val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED)); | 430 | val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED)); |
388 | val &= CC_SECURITY_DISABLED_MASK; | 431 | val &= CC_SECURITY_DISABLED_MASK; |
@@ -401,6 +444,15 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
401 | /* Display HW versions */ | 444 | /* Display HW versions */ |
402 | dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n", | 445 | dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n", |
403 | hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION); | 446 | hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION); |
447 | /* register the driver isr function */ | ||
448 | rc = devm_request_irq(dev, new_drvdata->irq, cc_isr, | ||
449 | IRQF_SHARED, "ccree", new_drvdata); | ||
450 | if (rc) { | ||
451 | dev_err(dev, "Could not register to interrupt %d\n", | ||
452 | new_drvdata->irq); | ||
453 | goto post_clk_err; | ||
454 | } | ||
455 | dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq); | ||
404 | 456 | ||
405 | rc = init_cc_regs(new_drvdata, true); | 457 | rc = init_cc_regs(new_drvdata, true); |
406 | if (rc) { | 458 | if (rc) { |
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index b76181335c08..7cd99380bf1f 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h | |||
@@ -53,6 +53,9 @@ enum cc_std_body { | |||
53 | 53 | ||
54 | #define CC_COHERENT_CACHE_PARAMS 0xEEE | 54 | #define CC_COHERENT_CACHE_PARAMS 0xEEE |
55 | 55 | ||
56 | #define CC_PINS_FULL 0x0 | ||
57 | #define CC_PINS_SLIM 0x9F | ||
58 | |||
56 | /* Maximum DMA mask supported by IP */ | 59 | /* Maximum DMA mask supported by IP */ |
57 | #define DMA_BIT_MASK_LEN 48 | 60 | #define DMA_BIT_MASK_LEN 48 |
58 | 61 | ||
@@ -67,6 +70,8 @@ enum cc_std_body { | |||
67 | 70 | ||
68 | #define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT) | 71 | #define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT) |
69 | 72 | ||
73 | #define CC_NVM_IS_IDLE_MASK BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT) | ||
74 | |||
70 | #define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \ | 75 | #define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \ |
71 | CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \ | 76 | CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \ |
72 | CC_AXIM_MON_COMP_VALUE_BIT_SHIFT) | 77 | CC_AXIM_MON_COMP_VALUE_BIT_SHIFT) |
@@ -216,6 +221,7 @@ static inline void dump_byte_array(const char *name, const u8 *the_array, | |||
216 | __dump_byte_array(name, the_array, size); | 221 | __dump_byte_array(name, the_array, size); |
217 | } | 222 | } |
218 | 223 | ||
224 | bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata); | ||
219 | int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe); | 225 | int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe); |
220 | void fini_cc_regs(struct cc_drvdata *drvdata); | 226 | void fini_cc_regs(struct cc_drvdata *drvdata); |
221 | int cc_clk_on(struct cc_drvdata *drvdata); | 227 | int cc_clk_on(struct cc_drvdata *drvdata); |
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h index d0764147573f..efe3e1d8b87b 100644 --- a/drivers/crypto/ccree/cc_host_regs.h +++ b/drivers/crypto/ccree/cc_host_regs.h | |||
@@ -114,6 +114,9 @@ | |||
114 | #define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL | 114 | #define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL |
115 | #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL | 115 | #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL |
116 | #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL | 116 | #define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL |
117 | #define CC_NVM_IS_IDLE_REG_OFFSET 0x0A10UL | ||
118 | #define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL | ||
119 | #define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL | ||
117 | #define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL | 120 | #define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL |
118 | #define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL | 121 | #define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL |
119 | #define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL | 122 | #define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL |
@@ -203,6 +206,23 @@ | |||
203 | #define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL | 206 | #define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL |
204 | #define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL | 207 | #define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL |
205 | #define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL | 208 | #define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL |
209 | #define CC_HOST_REMOVE_INPUT_PINS_REG_OFFSET 0x0A7CUL | ||
210 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SHIFT 0x0UL | ||
211 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_ENGINE_BIT_SIZE 0x1UL | ||
212 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SHIFT 0x1UL | ||
213 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_AES_MAC_ENGINE_BIT_SIZE 0x1UL | ||
214 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SHIFT 0x2UL | ||
215 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_GHASH_ENGINE_BIT_SIZE 0x1UL | ||
216 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SHIFT 0x3UL | ||
217 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_DES_ENGINE_BIT_SIZE 0x1UL | ||
218 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SHIFT 0x4UL | ||
219 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_HASH_ENGINE_BIT_SIZE 0x1UL | ||
220 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SHIFT 0x5UL | ||
221 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM3_ENGINE_BIT_SIZE 0x1UL | ||
222 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SHIFT 0x6UL | ||
223 | #define CC_HOST_REMOVE_INPUT_PINS_REMOVE_SM4_ENGINE_BIT_SIZE 0x1UL | ||
224 | #define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SHIFT 0x7UL | ||
225 | #define CC_HOST_REMOVE_INPUT_PINS_OTP_DISCONNECTED_BIT_SIZE 0x1UL | ||
206 | // -------------------------------------- | 226 | // -------------------------------------- |
207 | // BLOCK: ID_REGISTERS | 227 | // BLOCK: ID_REGISTERS |
208 | // -------------------------------------- | 228 | // -------------------------------------- |
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index 2dad9c9543c6..899a52f05b7a 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c | |||
@@ -49,6 +49,11 @@ int cc_pm_resume(struct device *dev) | |||
49 | dev_err(dev, "failed getting clock back on. We're toast.\n"); | 49 | dev_err(dev, "failed getting clock back on. We're toast.\n"); |
50 | return rc; | 50 | return rc; |
51 | } | 51 | } |
52 | /* wait for Crytpcell reset completion */ | ||
53 | if (!cc_wait_for_reset_completion(drvdata)) { | ||
54 | dev_err(dev, "Cryptocell reset not completed"); | ||
55 | return -EBUSY; | ||
56 | } | ||
52 | 57 | ||
53 | cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); | 58 | cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); |
54 | rc = init_cc_regs(drvdata, false); | 59 | rc = init_cc_regs(drvdata, false); |
@@ -101,6 +106,12 @@ int cc_pm_put_suspend(struct device *dev) | |||
101 | return rc; | 106 | return rc; |
102 | } | 107 | } |
103 | 108 | ||
109 | bool cc_pm_is_dev_suspended(struct device *dev) | ||
110 | { | ||
111 | /* check device state using runtime api */ | ||
112 | return pm_runtime_suspended(dev); | ||
113 | } | ||
114 | |||
104 | int cc_pm_init(struct cc_drvdata *drvdata) | 115 | int cc_pm_init(struct cc_drvdata *drvdata) |
105 | { | 116 | { |
106 | struct device *dev = drvdata_to_dev(drvdata); | 117 | struct device *dev = drvdata_to_dev(drvdata); |
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 6190cdba5dad..a7d98a5da2e1 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h | |||
@@ -22,6 +22,7 @@ int cc_pm_suspend(struct device *dev); | |||
22 | int cc_pm_resume(struct device *dev); | 22 | int cc_pm_resume(struct device *dev); |
23 | int cc_pm_get(struct device *dev); | 23 | int cc_pm_get(struct device *dev); |
24 | int cc_pm_put_suspend(struct device *dev); | 24 | int cc_pm_put_suspend(struct device *dev); |
25 | bool cc_pm_is_dev_suspended(struct device *dev); | ||
25 | 26 | ||
26 | #else | 27 | #else |
27 | 28 | ||
@@ -54,6 +55,12 @@ static inline int cc_pm_put_suspend(struct device *dev) | |||
54 | return 0; | 55 | return 0; |
55 | } | 56 | } |
56 | 57 | ||
58 | static inline bool cc_pm_is_dev_suspended(struct device *dev) | ||
59 | { | ||
60 | /* if PM not supported device is never suspend */ | ||
61 | return false; | ||
62 | } | ||
63 | |||
57 | #endif | 64 | #endif |
58 | 65 | ||
59 | #endif /*__POWER_MGR_H__*/ | 66 | #endif /*__POWER_MGR_H__*/ |
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h index 2d2f186674ba..4d9063a8b10b 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.h +++ b/drivers/crypto/hisilicon/sec/sec_drv.h | |||
@@ -1,4 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright (c) 2016-2017 Hisilicon Limited. */ | 2 | /* Copyright (c) 2016-2017 Hisilicon Limited. */ |
3 | 3 | ||
4 | #ifndef _SEC_DRV_H_ | 4 | #ifndef _SEC_DRV_H_ |
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 86c699c14f84..df43a2c6933b 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c | |||
@@ -398,6 +398,12 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | |||
398 | 398 | ||
399 | /* Processing Engine configuration */ | 399 | /* Processing Engine configuration */ |
400 | 400 | ||
401 | /* Token & context configuration */ | ||
402 | val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES | | ||
403 | EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX | | ||
404 | EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX; | ||
405 | writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe)); | ||
406 | |||
401 | /* H/W capabilities selection */ | 407 | /* H/W capabilities selection */ |
402 | val = EIP197_FUNCTION_RSVD; | 408 | val = EIP197_FUNCTION_RSVD; |
403 | val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; | 409 | val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; |
@@ -589,9 +595,9 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, | |||
589 | if (rdesc->result_data.error_code & 0x407f) { | 595 | if (rdesc->result_data.error_code & 0x407f) { |
590 | /* Fatal error (bits 0-7, 14) */ | 596 | /* Fatal error (bits 0-7, 14) */ |
591 | dev_err(priv->dev, | 597 | dev_err(priv->dev, |
592 | "cipher: result: result descriptor error (%d)\n", | 598 | "cipher: result: result descriptor error (0x%x)\n", |
593 | rdesc->result_data.error_code); | 599 | rdesc->result_data.error_code); |
594 | return -EIO; | 600 | return -EINVAL; |
595 | } else if (rdesc->result_data.error_code == BIT(9)) { | 601 | } else if (rdesc->result_data.error_code == BIT(9)) { |
596 | /* Authentication failed */ | 602 | /* Authentication failed */ |
597 | return -EBADMSG; | 603 | return -EBADMSG; |
@@ -720,11 +726,10 @@ handle_results: | |||
720 | } | 726 | } |
721 | 727 | ||
722 | acknowledge: | 728 | acknowledge: |
723 | if (i) { | 729 | if (i) |
724 | writel(EIP197_xDR_PROC_xD_PKT(i) | | 730 | writel(EIP197_xDR_PROC_xD_PKT(i) | |
725 | EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), | 731 | EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), |
726 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); | 732 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); |
727 | } | ||
728 | 733 | ||
729 | /* If the number of requests overflowed the counter, try to proceed more | 734 | /* If the number of requests overflowed the counter, try to proceed more |
730 | * requests. | 735 | * requests. |
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index 65624a81f0fd..e0c202f33674 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h | |||
@@ -118,6 +118,7 @@ | |||
118 | #define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) | 118 | #define EIP197_PE_ICE_SCRATCH_CTRL(n) (0x0d04 + (0x2000 * (n))) |
119 | #define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) | 119 | #define EIP197_PE_ICE_FPP_CTRL(n) (0x0d80 + (0x2000 * (n))) |
120 | #define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) | 120 | #define EIP197_PE_ICE_RAM_CTRL(n) (0x0ff0 + (0x2000 * (n))) |
121 | #define EIP197_PE_EIP96_TOKEN_CTRL(n) (0x1000 + (0x2000 * (n))) | ||
121 | #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) | 122 | #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n))) |
122 | #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) | 123 | #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n))) |
123 | #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) | 124 | #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n))) |
@@ -249,6 +250,11 @@ | |||
249 | #define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0) | 250 | #define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0) |
250 | #define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1) | 251 | #define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1) |
251 | 252 | ||
253 | /* EIP197_PE_EIP96_TOKEN_CTRL */ | ||
254 | #define EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES BIT(16) | ||
255 | #define EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX BIT(19) | ||
256 | #define EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX BIT(20) | ||
257 | |||
252 | /* EIP197_PE_EIP96_FUNCTION_EN */ | 258 | /* EIP197_PE_EIP96_FUNCTION_EN */ |
253 | #define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) | 259 | #define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) |
254 | #define EIP197_PROTOCOL_HASH_ONLY BIT(0) | 260 | #define EIP197_PROTOCOL_HASH_ONLY BIT(0) |
@@ -333,6 +339,7 @@ struct safexcel_context_record { | |||
333 | #define CONTEXT_CONTROL_IV3 BIT(8) | 339 | #define CONTEXT_CONTROL_IV3 BIT(8) |
334 | #define CONTEXT_CONTROL_DIGEST_CNT BIT(9) | 340 | #define CONTEXT_CONTROL_DIGEST_CNT BIT(9) |
335 | #define CONTEXT_CONTROL_COUNTER_MODE BIT(10) | 341 | #define CONTEXT_CONTROL_COUNTER_MODE BIT(10) |
342 | #define CONTEXT_CONTROL_CRYPTO_STORE BIT(12) | ||
336 | #define CONTEXT_CONTROL_HASH_STORE BIT(19) | 343 | #define CONTEXT_CONTROL_HASH_STORE BIT(19) |
337 | 344 | ||
338 | /* The hash counter given to the engine in the context has a granularity of | 345 | /* The hash counter given to the engine in the context has a granularity of |
@@ -425,6 +432,10 @@ struct safexcel_token { | |||
425 | 432 | ||
426 | #define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16) | 433 | #define EIP197_TOKEN_HASH_RESULT_VERIFY BIT(16) |
427 | 434 | ||
435 | #define EIP197_TOKEN_CTX_OFFSET(x) (x) | ||
436 | #define EIP197_TOKEN_DIRECTION_EXTERNAL BIT(11) | ||
437 | #define EIP197_TOKEN_EXEC_IF_SUCCESSFUL (0x1 << 12) | ||
438 | |||
428 | #define EIP197_TOKEN_STAT_LAST_HASH BIT(0) | 439 | #define EIP197_TOKEN_STAT_LAST_HASH BIT(0) |
429 | #define EIP197_TOKEN_STAT_LAST_PACKET BIT(1) | 440 | #define EIP197_TOKEN_STAT_LAST_PACKET BIT(1) |
430 | #define EIP197_TOKEN_OPCODE_DIRECTION 0x0 | 441 | #define EIP197_TOKEN_OPCODE_DIRECTION 0x0 |
@@ -432,6 +443,7 @@ struct safexcel_token { | |||
432 | #define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT | 443 | #define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT |
433 | #define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 | 444 | #define EIP197_TOKEN_OPCODE_RETRIEVE 0x4 |
434 | #define EIP197_TOKEN_OPCODE_VERIFY 0xd | 445 | #define EIP197_TOKEN_OPCODE_VERIFY 0xd |
446 | #define EIP197_TOKEN_OPCODE_CTX_ACCESS 0xe | ||
435 | #define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) | 447 | #define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) |
436 | 448 | ||
437 | static inline void eip197_noop_token(struct safexcel_token *token) | 449 | static inline void eip197_noop_token(struct safexcel_token *token) |
@@ -442,6 +454,8 @@ static inline void eip197_noop_token(struct safexcel_token *token) | |||
442 | 454 | ||
443 | /* Instructions */ | 455 | /* Instructions */ |
444 | #define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c | 456 | #define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c |
457 | #define EIP197_TOKEN_INS_ORIGIN_IV0 0x14 | ||
458 | #define EIP197_TOKEN_INS_ORIGIN_LEN(x) ((x) << 5) | ||
445 | #define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) | 459 | #define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) |
446 | #define EIP197_TOKEN_INS_TYPE_HASH BIT(6) | 460 | #define EIP197_TOKEN_INS_TYPE_HASH BIT(6) |
447 | #define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) | 461 | #define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) |
@@ -468,6 +482,7 @@ struct safexcel_control_data_desc { | |||
468 | 482 | ||
469 | #define EIP197_OPTION_MAGIC_VALUE BIT(0) | 483 | #define EIP197_OPTION_MAGIC_VALUE BIT(0) |
470 | #define EIP197_OPTION_64BIT_CTX BIT(1) | 484 | #define EIP197_OPTION_64BIT_CTX BIT(1) |
485 | #define EIP197_OPTION_RC_AUTO (0x2 << 3) | ||
471 | #define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8) | 486 | #define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8) |
472 | #define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10) | 487 | #define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10) |
473 | #define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9) | 488 | #define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9) |
@@ -629,7 +644,7 @@ struct safexcel_ahash_export_state { | |||
629 | u32 digest; | 644 | u32 digest; |
630 | 645 | ||
631 | u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; | 646 | u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; |
632 | u8 cache[SHA512_BLOCK_SIZE]; | 647 | u8 cache[SHA512_BLOCK_SIZE << 1]; |
633 | }; | 648 | }; |
634 | 649 | ||
635 | /* | 650 | /* |
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index de4be10b172f..8cdbdbe35681 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c | |||
@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx { | |||
51 | 51 | ||
52 | struct safexcel_cipher_req { | 52 | struct safexcel_cipher_req { |
53 | enum safexcel_cipher_direction direction; | 53 | enum safexcel_cipher_direction direction; |
54 | /* Number of result descriptors associated to the request */ | ||
55 | unsigned int rdescs; | ||
54 | bool needs_inv; | 56 | bool needs_inv; |
55 | }; | 57 | }; |
56 | 58 | ||
@@ -59,27 +61,26 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, | |||
59 | u32 length) | 61 | u32 length) |
60 | { | 62 | { |
61 | struct safexcel_token *token; | 63 | struct safexcel_token *token; |
62 | unsigned offset = 0; | 64 | u32 offset = 0, block_sz = 0; |
63 | 65 | ||
64 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { | 66 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { |
65 | switch (ctx->alg) { | 67 | switch (ctx->alg) { |
66 | case SAFEXCEL_DES: | 68 | case SAFEXCEL_DES: |
67 | offset = DES_BLOCK_SIZE / sizeof(u32); | 69 | block_sz = DES_BLOCK_SIZE; |
68 | memcpy(cdesc->control_data.token, iv, DES_BLOCK_SIZE); | ||
69 | cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; | 70 | cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; |
70 | break; | 71 | break; |
71 | case SAFEXCEL_3DES: | 72 | case SAFEXCEL_3DES: |
72 | offset = DES3_EDE_BLOCK_SIZE / sizeof(u32); | 73 | block_sz = DES3_EDE_BLOCK_SIZE; |
73 | memcpy(cdesc->control_data.token, iv, DES3_EDE_BLOCK_SIZE); | ||
74 | cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; | 74 | cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD; |
75 | break; | 75 | break; |
76 | |||
77 | case SAFEXCEL_AES: | 76 | case SAFEXCEL_AES: |
78 | offset = AES_BLOCK_SIZE / sizeof(u32); | 77 | block_sz = AES_BLOCK_SIZE; |
79 | memcpy(cdesc->control_data.token, iv, AES_BLOCK_SIZE); | ||
80 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; | 78 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; |
81 | break; | 79 | break; |
82 | } | 80 | } |
81 | |||
82 | offset = block_sz / sizeof(u32); | ||
83 | memcpy(cdesc->control_data.token, iv, block_sz); | ||
83 | } | 84 | } |
84 | 85 | ||
85 | token = (struct safexcel_token *)(cdesc->control_data.token + offset); | 86 | token = (struct safexcel_token *)(cdesc->control_data.token + offset); |
@@ -91,6 +92,25 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv, | |||
91 | token[0].instructions = EIP197_TOKEN_INS_LAST | | 92 | token[0].instructions = EIP197_TOKEN_INS_LAST | |
92 | EIP197_TOKEN_INS_TYPE_CRYTO | | 93 | EIP197_TOKEN_INS_TYPE_CRYTO | |
93 | EIP197_TOKEN_INS_TYPE_OUTPUT; | 94 | EIP197_TOKEN_INS_TYPE_OUTPUT; |
95 | |||
96 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { | ||
97 | u32 last = (EIP197_MAX_TOKENS - 1) - offset; | ||
98 | |||
99 | token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS; | ||
100 | token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL | | ||
101 | EIP197_TOKEN_EXEC_IF_SUCCESSFUL| | ||
102 | EIP197_TOKEN_CTX_OFFSET(0x2); | ||
103 | token[last].stat = EIP197_TOKEN_STAT_LAST_HASH | | ||
104 | EIP197_TOKEN_STAT_LAST_PACKET; | ||
105 | token[last].instructions = | ||
106 | EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) | | ||
107 | EIP197_TOKEN_INS_ORIGIN_IV0; | ||
108 | |||
109 | /* Store the updated IV values back in the internal context | ||
110 | * registers. | ||
111 | */ | ||
112 | cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE; | ||
113 | } | ||
94 | } | 114 | } |
95 | 115 | ||
96 | static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, | 116 | static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv, |
@@ -333,7 +353,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin | |||
333 | 353 | ||
334 | *ret = 0; | 354 | *ret = 0; |
335 | 355 | ||
336 | do { | 356 | if (unlikely(!sreq->rdescs)) |
357 | return 0; | ||
358 | |||
359 | while (sreq->rdescs--) { | ||
337 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); | 360 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); |
338 | if (IS_ERR(rdesc)) { | 361 | if (IS_ERR(rdesc)) { |
339 | dev_err(priv->dev, | 362 | dev_err(priv->dev, |
@@ -346,21 +369,15 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin | |||
346 | *ret = safexcel_rdesc_check_errors(priv, rdesc); | 369 | *ret = safexcel_rdesc_check_errors(priv, rdesc); |
347 | 370 | ||
348 | ndesc++; | 371 | ndesc++; |
349 | } while (!rdesc->last_seg); | 372 | } |
350 | 373 | ||
351 | safexcel_complete(priv, ring); | 374 | safexcel_complete(priv, ring); |
352 | 375 | ||
353 | if (src == dst) { | 376 | if (src == dst) { |
354 | dma_unmap_sg(priv->dev, src, | 377 | dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL); |
355 | sg_nents_for_len(src, cryptlen), | ||
356 | DMA_BIDIRECTIONAL); | ||
357 | } else { | 378 | } else { |
358 | dma_unmap_sg(priv->dev, src, | 379 | dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE); |
359 | sg_nents_for_len(src, cryptlen), | 380 | dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE); |
360 | DMA_TO_DEVICE); | ||
361 | dma_unmap_sg(priv->dev, dst, | ||
362 | sg_nents_for_len(dst, cryptlen), | ||
363 | DMA_FROM_DEVICE); | ||
364 | } | 381 | } |
365 | 382 | ||
366 | *should_complete = true; | 383 | *should_complete = true; |
@@ -385,26 +402,21 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, | |||
385 | int i, ret = 0; | 402 | int i, ret = 0; |
386 | 403 | ||
387 | if (src == dst) { | 404 | if (src == dst) { |
388 | nr_src = dma_map_sg(priv->dev, src, | 405 | nr_src = dma_map_sg(priv->dev, src, sg_nents(src), |
389 | sg_nents_for_len(src, totlen), | ||
390 | DMA_BIDIRECTIONAL); | 406 | DMA_BIDIRECTIONAL); |
391 | nr_dst = nr_src; | 407 | nr_dst = nr_src; |
392 | if (!nr_src) | 408 | if (!nr_src) |
393 | return -EINVAL; | 409 | return -EINVAL; |
394 | } else { | 410 | } else { |
395 | nr_src = dma_map_sg(priv->dev, src, | 411 | nr_src = dma_map_sg(priv->dev, src, sg_nents(src), |
396 | sg_nents_for_len(src, totlen), | ||
397 | DMA_TO_DEVICE); | 412 | DMA_TO_DEVICE); |
398 | if (!nr_src) | 413 | if (!nr_src) |
399 | return -EINVAL; | 414 | return -EINVAL; |
400 | 415 | ||
401 | nr_dst = dma_map_sg(priv->dev, dst, | 416 | nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst), |
402 | sg_nents_for_len(dst, totlen), | ||
403 | DMA_FROM_DEVICE); | 417 | DMA_FROM_DEVICE); |
404 | if (!nr_dst) { | 418 | if (!nr_dst) { |
405 | dma_unmap_sg(priv->dev, src, | 419 | dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); |
406 | sg_nents_for_len(src, totlen), | ||
407 | DMA_TO_DEVICE); | ||
408 | return -EINVAL; | 420 | return -EINVAL; |
409 | } | 421 | } |
410 | } | 422 | } |
@@ -454,7 +466,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, | |||
454 | 466 | ||
455 | /* result descriptors */ | 467 | /* result descriptors */ |
456 | for_each_sg(dst, sg, nr_dst, i) { | 468 | for_each_sg(dst, sg, nr_dst, i) { |
457 | bool first = !i, last = (i == nr_dst - 1); | 469 | bool first = !i, last = sg_is_last(sg); |
458 | u32 len = sg_dma_len(sg); | 470 | u32 len = sg_dma_len(sg); |
459 | 471 | ||
460 | rdesc = safexcel_add_rdesc(priv, ring, first, last, | 472 | rdesc = safexcel_add_rdesc(priv, ring, first, last, |
@@ -483,16 +495,10 @@ cdesc_rollback: | |||
483 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); | 495 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); |
484 | 496 | ||
485 | if (src == dst) { | 497 | if (src == dst) { |
486 | dma_unmap_sg(priv->dev, src, | 498 | dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL); |
487 | sg_nents_for_len(src, totlen), | ||
488 | DMA_BIDIRECTIONAL); | ||
489 | } else { | 499 | } else { |
490 | dma_unmap_sg(priv->dev, src, | 500 | dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE); |
491 | sg_nents_for_len(src, totlen), | 501 | dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE); |
492 | DMA_TO_DEVICE); | ||
493 | dma_unmap_sg(priv->dev, dst, | ||
494 | sg_nents_for_len(dst, totlen), | ||
495 | DMA_FROM_DEVICE); | ||
496 | } | 502 | } |
497 | 503 | ||
498 | return ret; | 504 | return ret; |
@@ -501,6 +507,7 @@ cdesc_rollback: | |||
501 | static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | 507 | static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, |
502 | int ring, | 508 | int ring, |
503 | struct crypto_async_request *base, | 509 | struct crypto_async_request *base, |
510 | struct safexcel_cipher_req *sreq, | ||
504 | bool *should_complete, int *ret) | 511 | bool *should_complete, int *ret) |
505 | { | 512 | { |
506 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); | 513 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); |
@@ -509,7 +516,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | |||
509 | 516 | ||
510 | *ret = 0; | 517 | *ret = 0; |
511 | 518 | ||
512 | do { | 519 | if (unlikely(!sreq->rdescs)) |
520 | return 0; | ||
521 | |||
522 | while (sreq->rdescs--) { | ||
513 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); | 523 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); |
514 | if (IS_ERR(rdesc)) { | 524 | if (IS_ERR(rdesc)) { |
515 | dev_err(priv->dev, | 525 | dev_err(priv->dev, |
@@ -522,7 +532,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | |||
522 | *ret = safexcel_rdesc_check_errors(priv, rdesc); | 532 | *ret = safexcel_rdesc_check_errors(priv, rdesc); |
523 | 533 | ||
524 | ndesc++; | 534 | ndesc++; |
525 | } while (!rdesc->last_seg); | 535 | } |
526 | 536 | ||
527 | safexcel_complete(priv, ring); | 537 | safexcel_complete(priv, ring); |
528 | 538 | ||
@@ -560,16 +570,35 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv, | |||
560 | { | 570 | { |
561 | struct skcipher_request *req = skcipher_request_cast(async); | 571 | struct skcipher_request *req = skcipher_request_cast(async); |
562 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); | 572 | struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); |
573 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm); | ||
563 | int err; | 574 | int err; |
564 | 575 | ||
565 | if (sreq->needs_inv) { | 576 | if (sreq->needs_inv) { |
566 | sreq->needs_inv = false; | 577 | sreq->needs_inv = false; |
567 | err = safexcel_handle_inv_result(priv, ring, async, | 578 | err = safexcel_handle_inv_result(priv, ring, async, sreq, |
568 | should_complete, ret); | 579 | should_complete, ret); |
569 | } else { | 580 | } else { |
570 | err = safexcel_handle_req_result(priv, ring, async, req->src, | 581 | err = safexcel_handle_req_result(priv, ring, async, req->src, |
571 | req->dst, req->cryptlen, sreq, | 582 | req->dst, req->cryptlen, sreq, |
572 | should_complete, ret); | 583 | should_complete, ret); |
584 | |||
585 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { | ||
586 | u32 block_sz = 0; | ||
587 | |||
588 | switch (ctx->alg) { | ||
589 | case SAFEXCEL_DES: | ||
590 | block_sz = DES_BLOCK_SIZE; | ||
591 | break; | ||
592 | case SAFEXCEL_3DES: | ||
593 | block_sz = DES3_EDE_BLOCK_SIZE; | ||
594 | break; | ||
595 | case SAFEXCEL_AES: | ||
596 | block_sz = AES_BLOCK_SIZE; | ||
597 | break; | ||
598 | } | ||
599 | |||
600 | memcpy(req->iv, ctx->base.ctxr->data, block_sz); | ||
601 | } | ||
573 | } | 602 | } |
574 | 603 | ||
575 | return err; | 604 | return err; |
@@ -587,7 +616,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv, | |||
587 | 616 | ||
588 | if (sreq->needs_inv) { | 617 | if (sreq->needs_inv) { |
589 | sreq->needs_inv = false; | 618 | sreq->needs_inv = false; |
590 | err = safexcel_handle_inv_result(priv, ring, async, | 619 | err = safexcel_handle_inv_result(priv, ring, async, sreq, |
591 | should_complete, ret); | 620 | should_complete, ret); |
592 | } else { | 621 | } else { |
593 | err = safexcel_handle_req_result(priv, ring, async, req->src, | 622 | err = safexcel_handle_req_result(priv, ring, async, req->src, |
@@ -633,6 +662,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, | |||
633 | ret = safexcel_send_req(async, ring, sreq, req->src, | 662 | ret = safexcel_send_req(async, ring, sreq, req->src, |
634 | req->dst, req->cryptlen, 0, 0, req->iv, | 663 | req->dst, req->cryptlen, 0, 0, req->iv, |
635 | commands, results); | 664 | commands, results); |
665 | |||
666 | sreq->rdescs = *results; | ||
636 | return ret; | 667 | return ret; |
637 | } | 668 | } |
638 | 669 | ||
@@ -655,6 +686,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring, | |||
655 | req->cryptlen, req->assoclen, | 686 | req->cryptlen, req->assoclen, |
656 | crypto_aead_authsize(tfm), req->iv, | 687 | crypto_aead_authsize(tfm), req->iv, |
657 | commands, results); | 688 | commands, results); |
689 | sreq->rdescs = *results; | ||
658 | return ret; | 690 | return ret; |
659 | } | 691 | } |
660 | 692 | ||
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index ac9282c1a5ec..a80a5e757b1f 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c | |||
@@ -41,19 +41,21 @@ struct safexcel_ahash_req { | |||
41 | u64 len[2]; | 41 | u64 len[2]; |
42 | u64 processed[2]; | 42 | u64 processed[2]; |
43 | 43 | ||
44 | u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); | 44 | u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); |
45 | dma_addr_t cache_dma; | 45 | dma_addr_t cache_dma; |
46 | unsigned int cache_sz; | 46 | unsigned int cache_sz; |
47 | 47 | ||
48 | u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); | 48 | u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32)); |
49 | }; | 49 | }; |
50 | 50 | ||
51 | static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) | 51 | static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) |
52 | { | 52 | { |
53 | if (req->len[1] > req->processed[1]) | 53 | u64 len, processed; |
54 | return 0xffffffff - (req->len[0] - req->processed[0]); | ||
55 | 54 | ||
56 | return req->len[0] - req->processed[0]; | 55 | len = (0xffffffff * req->len[1]) + req->len[0]; |
56 | processed = (0xffffffff * req->processed[1]) + req->processed[0]; | ||
57 | |||
58 | return len - processed; | ||
57 | } | 59 | } |
58 | 60 | ||
59 | static void safexcel_hash_token(struct safexcel_command_desc *cdesc, | 61 | static void safexcel_hash_token(struct safexcel_command_desc *cdesc, |
@@ -87,6 +89,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, | |||
87 | cdesc->control_data.control0 |= ctx->alg; | 89 | cdesc->control_data.control0 |= ctx->alg; |
88 | cdesc->control_data.control0 |= req->digest; | 90 | cdesc->control_data.control0 |= req->digest; |
89 | 91 | ||
92 | if (!req->finish) | ||
93 | cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; | ||
94 | |||
90 | if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { | 95 | if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { |
91 | if (req->processed[0] || req->processed[1]) { | 96 | if (req->processed[0] || req->processed[1]) { |
92 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) | 97 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5) |
@@ -105,9 +110,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, | |||
105 | cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; | 110 | cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; |
106 | } | 111 | } |
107 | 112 | ||
108 | if (!req->finish) | ||
109 | cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; | ||
110 | |||
111 | /* | 113 | /* |
112 | * Copy the input digest if needed, and setup the context | 114 | * Copy the input digest if needed, and setup the context |
113 | * fields. Do this now as we need it to setup the first command | 115 | * fields. Do this now as we need it to setup the first command |
@@ -183,6 +185,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin | |||
183 | dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz, | 185 | dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz, |
184 | DMA_TO_DEVICE); | 186 | DMA_TO_DEVICE); |
185 | sreq->cache_dma = 0; | 187 | sreq->cache_dma = 0; |
188 | sreq->cache_sz = 0; | ||
186 | } | 189 | } |
187 | 190 | ||
188 | if (sreq->finish) | 191 | if (sreq->finish) |
@@ -209,11 +212,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
209 | struct safexcel_command_desc *cdesc, *first_cdesc = NULL; | 212 | struct safexcel_command_desc *cdesc, *first_cdesc = NULL; |
210 | struct safexcel_result_desc *rdesc; | 213 | struct safexcel_result_desc *rdesc; |
211 | struct scatterlist *sg; | 214 | struct scatterlist *sg; |
212 | int i, extra, n_cdesc = 0, ret = 0; | 215 | int i, extra = 0, n_cdesc = 0, ret = 0; |
213 | u64 queued, len, cache_len; | 216 | u64 queued, len, cache_len, cache_max; |
217 | |||
218 | cache_max = crypto_ahash_blocksize(ahash); | ||
219 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) | ||
220 | cache_max <<= 1; | ||
214 | 221 | ||
215 | queued = len = safexcel_queued_len(req); | 222 | queued = len = safexcel_queued_len(req); |
216 | if (queued <= crypto_ahash_blocksize(ahash)) | 223 | if (queued <= cache_max) |
217 | cache_len = queued; | 224 | cache_len = queued; |
218 | else | 225 | else |
219 | cache_len = queued - areq->nbytes; | 226 | cache_len = queued - areq->nbytes; |
@@ -223,26 +230,23 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
223 | * fit into full blocks, cache it for the next send() call. | 230 | * fit into full blocks, cache it for the next send() call. |
224 | */ | 231 | */ |
225 | extra = queued & (crypto_ahash_blocksize(ahash) - 1); | 232 | extra = queued & (crypto_ahash_blocksize(ahash) - 1); |
233 | |||
234 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC && | ||
235 | extra < crypto_ahash_blocksize(ahash)) | ||
236 | extra += crypto_ahash_blocksize(ahash); | ||
237 | |||
238 | /* If this is not the last request and the queued data | ||
239 | * is a multiple of a block, cache the last one for now. | ||
240 | */ | ||
226 | if (!extra) | 241 | if (!extra) |
227 | /* If this is not the last request and the queued data | ||
228 | * is a multiple of a block, cache the last one for now. | ||
229 | */ | ||
230 | extra = crypto_ahash_blocksize(ahash); | 242 | extra = crypto_ahash_blocksize(ahash); |
231 | 243 | ||
232 | if (extra) { | 244 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), |
233 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | 245 | req->cache_next, extra, |
234 | req->cache_next, extra, | 246 | areq->nbytes - extra); |
235 | areq->nbytes - extra); | ||
236 | |||
237 | queued -= extra; | ||
238 | len -= extra; | ||
239 | 247 | ||
240 | if (!queued) { | 248 | queued -= extra; |
241 | *commands = 0; | 249 | len -= extra; |
242 | *results = 0; | ||
243 | return 0; | ||
244 | } | ||
245 | } | ||
246 | } | 250 | } |
247 | 251 | ||
248 | /* Add a command descriptor for the cached data, if any */ | 252 | /* Add a command descriptor for the cached data, if any */ |
@@ -269,8 +273,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, | |||
269 | } | 273 | } |
270 | 274 | ||
271 | /* Now handle the current ahash request buffer(s) */ | 275 | /* Now handle the current ahash request buffer(s) */ |
272 | req->nents = dma_map_sg(priv->dev, areq->src, | 276 | req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src), |
273 | sg_nents_for_len(areq->src, areq->nbytes), | ||
274 | DMA_TO_DEVICE); | 277 | DMA_TO_DEVICE); |
275 | if (!req->nents) { | 278 | if (!req->nents) { |
276 | ret = -ENOMEM; | 279 | ret = -ENOMEM; |
@@ -345,6 +348,7 @@ unmap_cache: | |||
345 | if (req->cache_dma) { | 348 | if (req->cache_dma) { |
346 | dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz, | 349 | dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz, |
347 | DMA_TO_DEVICE); | 350 | DMA_TO_DEVICE); |
351 | req->cache_dma = 0; | ||
348 | req->cache_sz = 0; | 352 | req->cache_sz = 0; |
349 | } | 353 | } |
350 | 354 | ||
@@ -486,7 +490,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) | |||
486 | struct safexcel_inv_result result = {}; | 490 | struct safexcel_inv_result result = {}; |
487 | int ring = ctx->base.ring; | 491 | int ring = ctx->base.ring; |
488 | 492 | ||
489 | memset(req, 0, sizeof(struct ahash_request)); | 493 | memset(req, 0, EIP197_AHASH_REQ_SIZE); |
490 | 494 | ||
491 | /* create invalidation request */ | 495 | /* create invalidation request */ |
492 | init_completion(&result.completion); | 496 | init_completion(&result.completion); |
@@ -519,10 +523,9 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) | |||
519 | /* safexcel_ahash_cache: cache data until at least one request can be sent to | 523 | /* safexcel_ahash_cache: cache data until at least one request can be sent to |
520 | * the engine, aka. when there is at least 1 block size in the pipe. | 524 | * the engine, aka. when there is at least 1 block size in the pipe. |
521 | */ | 525 | */ |
522 | static int safexcel_ahash_cache(struct ahash_request *areq) | 526 | static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max) |
523 | { | 527 | { |
524 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 528 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
525 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
526 | u64 queued, cache_len; | 529 | u64 queued, cache_len; |
527 | 530 | ||
528 | /* queued: everything accepted by the driver which will be handled by | 531 | /* queued: everything accepted by the driver which will be handled by |
@@ -539,7 +542,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq) | |||
539 | * In case there isn't enough bytes to proceed (less than a | 542 | * In case there isn't enough bytes to proceed (less than a |
540 | * block size), cache the data until we have enough. | 543 | * block size), cache the data until we have enough. |
541 | */ | 544 | */ |
542 | if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) { | 545 | if (cache_len + areq->nbytes <= cache_max) { |
543 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | 546 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), |
544 | req->cache + cache_len, | 547 | req->cache + cache_len, |
545 | areq->nbytes, 0); | 548 | areq->nbytes, 0); |
@@ -599,6 +602,7 @@ static int safexcel_ahash_update(struct ahash_request *areq) | |||
599 | { | 602 | { |
600 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 603 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
601 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | 604 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); |
605 | u32 cache_max; | ||
602 | 606 | ||
603 | /* If the request is 0 length, do nothing */ | 607 | /* If the request is 0 length, do nothing */ |
604 | if (!areq->nbytes) | 608 | if (!areq->nbytes) |
@@ -608,7 +612,11 @@ static int safexcel_ahash_update(struct ahash_request *areq) | |||
608 | if (req->len[0] < areq->nbytes) | 612 | if (req->len[0] < areq->nbytes) |
609 | req->len[1]++; | 613 | req->len[1]++; |
610 | 614 | ||
611 | safexcel_ahash_cache(areq); | 615 | cache_max = crypto_ahash_blocksize(ahash); |
616 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) | ||
617 | cache_max <<= 1; | ||
618 | |||
619 | safexcel_ahash_cache(areq, cache_max); | ||
612 | 620 | ||
613 | /* | 621 | /* |
614 | * We're not doing partial updates when performing an hmac request. | 622 | * We're not doing partial updates when performing an hmac request. |
@@ -621,7 +629,7 @@ static int safexcel_ahash_update(struct ahash_request *areq) | |||
621 | return safexcel_ahash_enqueue(areq); | 629 | return safexcel_ahash_enqueue(areq); |
622 | 630 | ||
623 | if (!req->last_req && | 631 | if (!req->last_req && |
624 | safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) | 632 | safexcel_queued_len(req) > cache_max) |
625 | return safexcel_ahash_enqueue(areq); | 633 | return safexcel_ahash_enqueue(areq); |
626 | 634 | ||
627 | return 0; | 635 | return 0; |
@@ -678,6 +686,11 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) | |||
678 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | 686 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); |
679 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 687 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
680 | struct safexcel_ahash_export_state *export = out; | 688 | struct safexcel_ahash_export_state *export = out; |
689 | u32 cache_sz; | ||
690 | |||
691 | cache_sz = crypto_ahash_blocksize(ahash); | ||
692 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) | ||
693 | cache_sz <<= 1; | ||
681 | 694 | ||
682 | export->len[0] = req->len[0]; | 695 | export->len[0] = req->len[0]; |
683 | export->len[1] = req->len[1]; | 696 | export->len[1] = req->len[1]; |
@@ -687,7 +700,7 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) | |||
687 | export->digest = req->digest; | 700 | export->digest = req->digest; |
688 | 701 | ||
689 | memcpy(export->state, req->state, req->state_sz); | 702 | memcpy(export->state, req->state, req->state_sz); |
690 | memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); | 703 | memcpy(export->cache, req->cache, cache_sz); |
691 | 704 | ||
692 | return 0; | 705 | return 0; |
693 | } | 706 | } |
@@ -697,12 +710,17 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in) | |||
697 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | 710 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); |
698 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | 711 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); |
699 | const struct safexcel_ahash_export_state *export = in; | 712 | const struct safexcel_ahash_export_state *export = in; |
713 | u32 cache_sz; | ||
700 | int ret; | 714 | int ret; |
701 | 715 | ||
702 | ret = crypto_ahash_init(areq); | 716 | ret = crypto_ahash_init(areq); |
703 | if (ret) | 717 | if (ret) |
704 | return ret; | 718 | return ret; |
705 | 719 | ||
720 | cache_sz = crypto_ahash_blocksize(ahash); | ||
721 | if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) | ||
722 | cache_sz <<= 1; | ||
723 | |||
706 | req->len[0] = export->len[0]; | 724 | req->len[0] = export->len[0]; |
707 | req->len[1] = export->len[1]; | 725 | req->len[1] = export->len[1]; |
708 | req->processed[0] = export->processed[0]; | 726 | req->processed[0] = export->processed[0]; |
@@ -710,7 +728,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in) | |||
710 | 728 | ||
711 | req->digest = export->digest; | 729 | req->digest = export->digest; |
712 | 730 | ||
713 | memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); | 731 | memcpy(req->cache, export->cache, cache_sz); |
714 | memcpy(req->state, export->state, req->state_sz); | 732 | memcpy(req->state, export->state, req->state_sz); |
715 | 733 | ||
716 | return 0; | 734 | return 0; |
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c index eb75fa684876..142bc3f5c45c 100644 --- a/drivers/crypto/inside-secure/safexcel_ring.c +++ b/drivers/crypto/inside-secure/safexcel_ring.c | |||
@@ -145,6 +145,9 @@ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *pr | |||
145 | (lower_32_bits(context) & GENMASK(31, 2)) >> 2; | 145 | (lower_32_bits(context) & GENMASK(31, 2)) >> 2; |
146 | cdesc->control_data.context_hi = upper_32_bits(context); | 146 | cdesc->control_data.context_hi = upper_32_bits(context); |
147 | 147 | ||
148 | if (priv->version == EIP197B || priv->version == EIP197D) | ||
149 | cdesc->control_data.options |= EIP197_OPTION_RC_AUTO; | ||
150 | |||
148 | /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ | 151 | /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ |
149 | cdesc->control_data.refresh = 2; | 152 | cdesc->control_data.refresh = 2; |
150 | 153 | ||
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index e5cf3a59c420..acedafe3fa98 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -100,7 +100,7 @@ struct buffer_desc { | |||
100 | u16 pkt_len; | 100 | u16 pkt_len; |
101 | u16 buf_len; | 101 | u16 buf_len; |
102 | #endif | 102 | #endif |
103 | u32 phys_addr; | 103 | dma_addr_t phys_addr; |
104 | u32 __reserved[4]; | 104 | u32 __reserved[4]; |
105 | struct buffer_desc *next; | 105 | struct buffer_desc *next; |
106 | enum dma_data_direction dir; | 106 | enum dma_data_direction dir; |
@@ -117,9 +117,9 @@ struct crypt_ctl { | |||
117 | u8 mode; /* NPE_OP_* operation mode */ | 117 | u8 mode; /* NPE_OP_* operation mode */ |
118 | #endif | 118 | #endif |
119 | u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ | 119 | u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ |
120 | u32 icv_rev_aes; /* icv or rev aes */ | 120 | dma_addr_t icv_rev_aes; /* icv or rev aes */ |
121 | u32 src_buf; | 121 | dma_addr_t src_buf; |
122 | u32 dst_buf; | 122 | dma_addr_t dst_buf; |
123 | #ifdef __ARMEB__ | 123 | #ifdef __ARMEB__ |
124 | u16 auth_offs; /* Authentication start offset */ | 124 | u16 auth_offs; /* Authentication start offset */ |
125 | u16 auth_len; /* Authentication data length */ | 125 | u16 auth_len; /* Authentication data length */ |
@@ -320,7 +320,8 @@ static struct crypt_ctl *get_crypt_desc_emerg(void) | |||
320 | } | 320 | } |
321 | } | 321 | } |
322 | 322 | ||
323 | static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) | 323 | static void free_buf_chain(struct device *dev, struct buffer_desc *buf, |
324 | dma_addr_t phys) | ||
324 | { | 325 | { |
325 | while (buf) { | 326 | while (buf) { |
326 | struct buffer_desc *buf1; | 327 | struct buffer_desc *buf1; |
@@ -602,7 +603,7 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, | |||
602 | struct buffer_desc *buf; | 603 | struct buffer_desc *buf; |
603 | int i; | 604 | int i; |
604 | u8 *pad; | 605 | u8 *pad; |
605 | u32 pad_phys, buf_phys; | 606 | dma_addr_t pad_phys, buf_phys; |
606 | 607 | ||
607 | BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); | 608 | BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); |
608 | pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); | 609 | pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); |
@@ -787,7 +788,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev, | |||
787 | for (; nbytes > 0; sg = sg_next(sg)) { | 788 | for (; nbytes > 0; sg = sg_next(sg)) { |
788 | unsigned len = min(nbytes, sg->length); | 789 | unsigned len = min(nbytes, sg->length); |
789 | struct buffer_desc *next_buf; | 790 | struct buffer_desc *next_buf; |
790 | u32 next_buf_phys; | 791 | dma_addr_t next_buf_phys; |
791 | void *ptr; | 792 | void *ptr; |
792 | 793 | ||
793 | nbytes -= len; | 794 | nbytes -= len; |
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index bdc4c42d3ac8..f1fa637cb029 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c | |||
@@ -986,8 +986,6 @@ static int mxs_dcp_probe(struct platform_device *pdev) | |||
986 | struct device *dev = &pdev->dev; | 986 | struct device *dev = &pdev->dev; |
987 | struct dcp *sdcp = NULL; | 987 | struct dcp *sdcp = NULL; |
988 | int i, ret; | 988 | int i, ret; |
989 | |||
990 | struct resource *iores; | ||
991 | int dcp_vmi_irq, dcp_irq; | 989 | int dcp_vmi_irq, dcp_irq; |
992 | 990 | ||
993 | if (global_sdcp) { | 991 | if (global_sdcp) { |
@@ -995,7 +993,6 @@ static int mxs_dcp_probe(struct platform_device *pdev) | |||
995 | return -ENODEV; | 993 | return -ENODEV; |
996 | } | 994 | } |
997 | 995 | ||
998 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
999 | dcp_vmi_irq = platform_get_irq(pdev, 0); | 996 | dcp_vmi_irq = platform_get_irq(pdev, 0); |
1000 | if (dcp_vmi_irq < 0) { | 997 | if (dcp_vmi_irq < 0) { |
1001 | dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); | 998 | dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq); |
@@ -1013,7 +1010,7 @@ static int mxs_dcp_probe(struct platform_device *pdev) | |||
1013 | return -ENOMEM; | 1010 | return -ENOMEM; |
1014 | 1011 | ||
1015 | sdcp->dev = dev; | 1012 | sdcp->dev = dev; |
1016 | sdcp->base = devm_ioremap_resource(dev, iores); | 1013 | sdcp->base = devm_platform_ioremap_resource(pdev, 0); |
1017 | if (IS_ERR(sdcp->base)) | 1014 | if (IS_ERR(sdcp->base)) |
1018 | return PTR_ERR(sdcp->base); | 1015 | return PTR_ERR(sdcp->base); |
1019 | 1016 | ||
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 4acbc47973e9..e78ff5c65ed6 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c | |||
@@ -27,8 +27,6 @@ MODULE_ALIAS_CRYPTO("842-nx"); | |||
27 | #define WORKMEM_ALIGN (CRB_ALIGN) | 27 | #define WORKMEM_ALIGN (CRB_ALIGN) |
28 | #define CSB_WAIT_MAX (5000) /* ms */ | 28 | #define CSB_WAIT_MAX (5000) /* ms */ |
29 | #define VAS_RETRIES (10) | 29 | #define VAS_RETRIES (10) |
30 | /* # of requests allowed per RxFIFO at a time. 0 for unlimited */ | ||
31 | #define MAX_CREDITS_PER_RXFIFO (1024) | ||
32 | 30 | ||
33 | struct nx842_workmem { | 31 | struct nx842_workmem { |
34 | /* Below fields must be properly aligned */ | 32 | /* Below fields must be properly aligned */ |
@@ -812,7 +810,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, | |||
812 | rxattr.lnotify_lpid = lpid; | 810 | rxattr.lnotify_lpid = lpid; |
813 | rxattr.lnotify_pid = pid; | 811 | rxattr.lnotify_pid = pid; |
814 | rxattr.lnotify_tid = tid; | 812 | rxattr.lnotify_tid = tid; |
815 | rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO; | 813 | /* |
814 | * Maximum RX window credits can not be more than #CRBs in | ||
815 | * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns. | ||
816 | */ | ||
817 | rxattr.wcreds_max = fifo_size / CRB_SIZE; | ||
816 | 818 | ||
817 | /* | 819 | /* |
818 | * Open a VAS receice window which is used to configure RxFIFO | 820 | * Open a VAS receice window which is used to configure RxFIFO |
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 428c273a1ab6..28817880c76d 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c | |||
@@ -569,9 +569,7 @@ static int nx_register_algs(void) | |||
569 | 569 | ||
570 | memset(&nx_driver.stats, 0, sizeof(struct nx_stats)); | 570 | memset(&nx_driver.stats, 0, sizeof(struct nx_stats)); |
571 | 571 | ||
572 | rc = NX_DEBUGFS_INIT(&nx_driver); | 572 | NX_DEBUGFS_INIT(&nx_driver); |
573 | if (rc) | ||
574 | goto out; | ||
575 | 573 | ||
576 | nx_driver.of.status = NX_OKAY; | 574 | nx_driver.of.status = NX_OKAY; |
577 | 575 | ||
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index c3e54af18645..c6b5a3be02be 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h | |||
@@ -76,20 +76,12 @@ struct nx_stats { | |||
76 | atomic_t last_error_pid; | 76 | atomic_t last_error_pid; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | struct nx_debugfs { | ||
80 | struct dentry *dfs_root; | ||
81 | struct dentry *dfs_aes_ops, *dfs_aes_bytes; | ||
82 | struct dentry *dfs_sha256_ops, *dfs_sha256_bytes; | ||
83 | struct dentry *dfs_sha512_ops, *dfs_sha512_bytes; | ||
84 | struct dentry *dfs_errors, *dfs_last_error, *dfs_last_error_pid; | ||
85 | }; | ||
86 | |||
87 | struct nx_crypto_driver { | 79 | struct nx_crypto_driver { |
88 | struct nx_stats stats; | 80 | struct nx_stats stats; |
89 | struct nx_of of; | 81 | struct nx_of of; |
90 | struct vio_dev *viodev; | 82 | struct vio_dev *viodev; |
91 | struct vio_driver viodriver; | 83 | struct vio_driver viodriver; |
92 | struct nx_debugfs dfs; | 84 | struct dentry *dfs_root; |
93 | }; | 85 | }; |
94 | 86 | ||
95 | #define NX_GCM4106_NONCE_LEN (4) | 87 | #define NX_GCM4106_NONCE_LEN (4) |
@@ -177,7 +169,7 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, | |||
177 | #define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv) | 169 | #define NX_DEBUGFS_INIT(drv) nx_debugfs_init(drv) |
178 | #define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv) | 170 | #define NX_DEBUGFS_FINI(drv) nx_debugfs_fini(drv) |
179 | 171 | ||
180 | int nx_debugfs_init(struct nx_crypto_driver *); | 172 | void nx_debugfs_init(struct nx_crypto_driver *); |
181 | void nx_debugfs_fini(struct nx_crypto_driver *); | 173 | void nx_debugfs_fini(struct nx_crypto_driver *); |
182 | #else | 174 | #else |
183 | #define NX_DEBUGFS_INIT(drv) (0) | 175 | #define NX_DEBUGFS_INIT(drv) (0) |
diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c index 03e4f0363c6a..e0d44a5512ab 100644 --- a/drivers/crypto/nx/nx_debugfs.c +++ b/drivers/crypto/nx/nx_debugfs.c | |||
@@ -30,62 +30,37 @@ | |||
30 | * Documentation/ABI/testing/debugfs-pfo-nx-crypto | 30 | * Documentation/ABI/testing/debugfs-pfo-nx-crypto |
31 | */ | 31 | */ |
32 | 32 | ||
33 | int nx_debugfs_init(struct nx_crypto_driver *drv) | 33 | void nx_debugfs_init(struct nx_crypto_driver *drv) |
34 | { | 34 | { |
35 | struct nx_debugfs *dfs = &drv->dfs; | 35 | struct dentry *root; |
36 | 36 | ||
37 | dfs->dfs_root = debugfs_create_dir(NX_NAME, NULL); | 37 | root = debugfs_create_dir(NX_NAME, NULL); |
38 | drv->dfs_root = root; | ||
38 | 39 | ||
39 | dfs->dfs_aes_ops = | 40 | debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH, |
40 | debugfs_create_u32("aes_ops", | 41 | root, (u32 *)&drv->stats.aes_ops); |
41 | S_IRUSR | S_IRGRP | S_IROTH, | 42 | debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH, |
42 | dfs->dfs_root, (u32 *)&drv->stats.aes_ops); | 43 | root, (u32 *)&drv->stats.sha256_ops); |
43 | dfs->dfs_sha256_ops = | 44 | debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH, |
44 | debugfs_create_u32("sha256_ops", | 45 | root, (u32 *)&drv->stats.sha512_ops); |
45 | S_IRUSR | S_IRGRP | S_IROTH, | 46 | debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH, |
46 | dfs->dfs_root, | 47 | root, (u64 *)&drv->stats.aes_bytes); |
47 | (u32 *)&drv->stats.sha256_ops); | 48 | debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH, |
48 | dfs->dfs_sha512_ops = | 49 | root, (u64 *)&drv->stats.sha256_bytes); |
49 | debugfs_create_u32("sha512_ops", | 50 | debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH, |
50 | S_IRUSR | S_IRGRP | S_IROTH, | 51 | root, (u64 *)&drv->stats.sha512_bytes); |
51 | dfs->dfs_root, | 52 | debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH, |
52 | (u32 *)&drv->stats.sha512_ops); | 53 | root, (u32 *)&drv->stats.errors); |
53 | dfs->dfs_aes_bytes = | 54 | debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH, |
54 | debugfs_create_u64("aes_bytes", | 55 | root, (u32 *)&drv->stats.last_error); |
55 | S_IRUSR | S_IRGRP | S_IROTH, | 56 | debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH, |
56 | dfs->dfs_root, | 57 | root, (u32 *)&drv->stats.last_error_pid); |
57 | (u64 *)&drv->stats.aes_bytes); | ||
58 | dfs->dfs_sha256_bytes = | ||
59 | debugfs_create_u64("sha256_bytes", | ||
60 | S_IRUSR | S_IRGRP | S_IROTH, | ||
61 | dfs->dfs_root, | ||
62 | (u64 *)&drv->stats.sha256_bytes); | ||
63 | dfs->dfs_sha512_bytes = | ||
64 | debugfs_create_u64("sha512_bytes", | ||
65 | S_IRUSR | S_IRGRP | S_IROTH, | ||
66 | dfs->dfs_root, | ||
67 | (u64 *)&drv->stats.sha512_bytes); | ||
68 | dfs->dfs_errors = | ||
69 | debugfs_create_u32("errors", | ||
70 | S_IRUSR | S_IRGRP | S_IROTH, | ||
71 | dfs->dfs_root, (u32 *)&drv->stats.errors); | ||
72 | dfs->dfs_last_error = | ||
73 | debugfs_create_u32("last_error", | ||
74 | S_IRUSR | S_IRGRP | S_IROTH, | ||
75 | dfs->dfs_root, | ||
76 | (u32 *)&drv->stats.last_error); | ||
77 | dfs->dfs_last_error_pid = | ||
78 | debugfs_create_u32("last_error_pid", | ||
79 | S_IRUSR | S_IRGRP | S_IROTH, | ||
80 | dfs->dfs_root, | ||
81 | (u32 *)&drv->stats.last_error_pid); | ||
82 | return 0; | ||
83 | } | 58 | } |
84 | 59 | ||
85 | void | 60 | void |
86 | nx_debugfs_fini(struct nx_crypto_driver *drv) | 61 | nx_debugfs_fini(struct nx_crypto_driver *drv) |
87 | { | 62 | { |
88 | debugfs_remove_recursive(drv->dfs.dfs_root); | 63 | debugfs_remove_recursive(drv->dfs_root); |
89 | } | 64 | } |
90 | 65 | ||
91 | #endif | 66 | #endif |
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index c8d401646902..b50eb55f8f57 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
@@ -131,7 +131,6 @@ struct qat_alg_ablkcipher_ctx { | |||
131 | struct icp_qat_fw_la_bulk_req dec_fw_req; | 131 | struct icp_qat_fw_la_bulk_req dec_fw_req; |
132 | struct qat_crypto_instance *inst; | 132 | struct qat_crypto_instance *inst; |
133 | struct crypto_tfm *tfm; | 133 | struct crypto_tfm *tfm; |
134 | spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */ | ||
135 | }; | 134 | }; |
136 | 135 | ||
137 | static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) | 136 | static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg) |
@@ -223,6 +222,9 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, | |||
223 | return -EFAULT; | 222 | return -EFAULT; |
224 | 223 | ||
225 | offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); | 224 | offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); |
225 | if (offset < 0) | ||
226 | return -EFAULT; | ||
227 | |||
226 | hash_state_out = (__be32 *)(hash->sha.state1 + offset); | 228 | hash_state_out = (__be32 *)(hash->sha.state1 + offset); |
227 | hash512_state_out = (__be64 *)hash_state_out; | 229 | hash512_state_out = (__be64 *)hash_state_out; |
228 | 230 | ||
@@ -253,7 +255,24 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, | |||
253 | return 0; | 255 | return 0; |
254 | } | 256 | } |
255 | 257 | ||
256 | static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) | 258 | static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header) |
259 | { | ||
260 | ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, | ||
261 | ICP_QAT_FW_CIPH_IV_64BIT_PTR); | ||
262 | ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, | ||
263 | ICP_QAT_FW_LA_UPDATE_STATE); | ||
264 | } | ||
265 | |||
266 | static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header) | ||
267 | { | ||
268 | ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, | ||
269 | ICP_QAT_FW_CIPH_IV_16BYTE_DATA); | ||
270 | ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, | ||
271 | ICP_QAT_FW_LA_NO_UPDATE_STATE); | ||
272 | } | ||
273 | |||
274 | static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, | ||
275 | int aead) | ||
257 | { | 276 | { |
258 | header->hdr_flags = | 277 | header->hdr_flags = |
259 | ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); | 278 | ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); |
@@ -263,12 +282,12 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) | |||
263 | QAT_COMN_PTR_TYPE_SGL); | 282 | QAT_COMN_PTR_TYPE_SGL); |
264 | ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, | 283 | ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, |
265 | ICP_QAT_FW_LA_PARTIAL_NONE); | 284 | ICP_QAT_FW_LA_PARTIAL_NONE); |
266 | ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, | 285 | if (aead) |
267 | ICP_QAT_FW_CIPH_IV_16BYTE_DATA); | 286 | qat_alg_init_hdr_no_iv_updt(header); |
287 | else | ||
288 | qat_alg_init_hdr_iv_updt(header); | ||
268 | ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, | 289 | ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, |
269 | ICP_QAT_FW_LA_NO_PROTO); | 290 | ICP_QAT_FW_LA_NO_PROTO); |
270 | ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, | ||
271 | ICP_QAT_FW_LA_NO_UPDATE_STATE); | ||
272 | } | 291 | } |
273 | 292 | ||
274 | static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, | 293 | static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, |
@@ -303,7 +322,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm, | |||
303 | return -EFAULT; | 322 | return -EFAULT; |
304 | 323 | ||
305 | /* Request setup */ | 324 | /* Request setup */ |
306 | qat_alg_init_common_hdr(header); | 325 | qat_alg_init_common_hdr(header, 1); |
307 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; | 326 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH; |
308 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, | 327 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, |
309 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); | 328 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); |
@@ -390,7 +409,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm, | |||
390 | return -EFAULT; | 409 | return -EFAULT; |
391 | 410 | ||
392 | /* Request setup */ | 411 | /* Request setup */ |
393 | qat_alg_init_common_hdr(header); | 412 | qat_alg_init_common_hdr(header, 1); |
394 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; | 413 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; |
395 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, | 414 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, |
396 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); | 415 | ICP_QAT_FW_LA_DIGEST_IN_BUFFER); |
@@ -454,7 +473,7 @@ static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, | |||
454 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl; | 473 | struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl; |
455 | 474 | ||
456 | memcpy(cd->aes.key, key, keylen); | 475 | memcpy(cd->aes.key, key, keylen); |
457 | qat_alg_init_common_hdr(header); | 476 | qat_alg_init_common_hdr(header, 0); |
458 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; | 477 | header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; |
459 | cd_pars->u.s.content_desc_params_sz = | 478 | cd_pars->u.s.content_desc_params_sz = |
460 | sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; | 479 | sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; |
@@ -576,45 +595,52 @@ bad_key: | |||
576 | return -EINVAL; | 595 | return -EINVAL; |
577 | } | 596 | } |
578 | 597 | ||
579 | static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, | 598 | static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key, |
599 | unsigned int keylen) | ||
600 | { | ||
601 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); | ||
602 | |||
603 | memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); | ||
604 | memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); | ||
605 | memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); | ||
606 | memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); | ||
607 | |||
608 | return qat_alg_aead_init_sessions(tfm, key, keylen, | ||
609 | ICP_QAT_HW_CIPHER_CBC_MODE); | ||
610 | } | ||
611 | |||
612 | static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key, | ||
580 | unsigned int keylen) | 613 | unsigned int keylen) |
581 | { | 614 | { |
582 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); | 615 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); |
616 | struct qat_crypto_instance *inst = NULL; | ||
617 | int node = get_current_node(); | ||
583 | struct device *dev; | 618 | struct device *dev; |
619 | int ret; | ||
584 | 620 | ||
585 | if (ctx->enc_cd) { | 621 | inst = qat_crypto_get_instance_node(node); |
586 | /* rekeying */ | 622 | if (!inst) |
587 | dev = &GET_DEV(ctx->inst->accel_dev); | 623 | return -EINVAL; |
588 | memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); | 624 | dev = &GET_DEV(inst->accel_dev); |
589 | memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); | 625 | ctx->inst = inst; |
590 | memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); | 626 | ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), |
591 | memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); | 627 | &ctx->enc_cd_paddr, |
592 | } else { | 628 | GFP_ATOMIC); |
593 | /* new key */ | 629 | if (!ctx->enc_cd) { |
594 | int node = get_current_node(); | 630 | ret = -ENOMEM; |
595 | struct qat_crypto_instance *inst = | 631 | goto out_free_inst; |
596 | qat_crypto_get_instance_node(node); | 632 | } |
597 | if (!inst) { | 633 | ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), |
598 | return -EINVAL; | 634 | &ctx->dec_cd_paddr, |
599 | } | 635 | GFP_ATOMIC); |
600 | 636 | if (!ctx->dec_cd) { | |
601 | dev = &GET_DEV(inst->accel_dev); | 637 | ret = -ENOMEM; |
602 | ctx->inst = inst; | 638 | goto out_free_enc; |
603 | ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), | ||
604 | &ctx->enc_cd_paddr, | ||
605 | GFP_ATOMIC); | ||
606 | if (!ctx->enc_cd) { | ||
607 | return -ENOMEM; | ||
608 | } | ||
609 | ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), | ||
610 | &ctx->dec_cd_paddr, | ||
611 | GFP_ATOMIC); | ||
612 | if (!ctx->dec_cd) { | ||
613 | goto out_free_enc; | ||
614 | } | ||
615 | } | 639 | } |
616 | if (qat_alg_aead_init_sessions(tfm, key, keylen, | 640 | |
617 | ICP_QAT_HW_CIPHER_CBC_MODE)) | 641 | ret = qat_alg_aead_init_sessions(tfm, key, keylen, |
642 | ICP_QAT_HW_CIPHER_CBC_MODE); | ||
643 | if (ret) | ||
618 | goto out_free_all; | 644 | goto out_free_all; |
619 | 645 | ||
620 | return 0; | 646 | return 0; |
@@ -629,7 +655,21 @@ out_free_enc: | |||
629 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), | 655 | dma_free_coherent(dev, sizeof(struct qat_alg_cd), |
630 | ctx->enc_cd, ctx->enc_cd_paddr); | 656 | ctx->enc_cd, ctx->enc_cd_paddr); |
631 | ctx->enc_cd = NULL; | 657 | ctx->enc_cd = NULL; |
632 | return -ENOMEM; | 658 | out_free_inst: |
659 | ctx->inst = NULL; | ||
660 | qat_crypto_put_instance(inst); | ||
661 | return ret; | ||
662 | } | ||
663 | |||
664 | static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, | ||
665 | unsigned int keylen) | ||
666 | { | ||
667 | struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); | ||
668 | |||
669 | if (ctx->enc_cd) | ||
670 | return qat_alg_aead_rekey(tfm, key, keylen); | ||
671 | else | ||
672 | return qat_alg_aead_newkey(tfm, key, keylen); | ||
633 | } | 673 | } |
634 | 674 | ||
635 | static void qat_alg_free_bufl(struct qat_crypto_instance *inst, | 675 | static void qat_alg_free_bufl(struct qat_crypto_instance *inst, |
@@ -677,8 +717,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
677 | dma_addr_t blp; | 717 | dma_addr_t blp; |
678 | dma_addr_t bloutp = 0; | 718 | dma_addr_t bloutp = 0; |
679 | struct scatterlist *sg; | 719 | struct scatterlist *sg; |
680 | size_t sz_out, sz = sizeof(struct qat_alg_buf_list) + | 720 | size_t sz_out, sz = struct_size(bufl, bufers, n + 1); |
681 | ((1 + n) * sizeof(struct qat_alg_buf)); | ||
682 | 721 | ||
683 | if (unlikely(!n)) | 722 | if (unlikely(!n)) |
684 | return -EINVAL; | 723 | return -EINVAL; |
@@ -715,8 +754,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, | |||
715 | struct qat_alg_buf *bufers; | 754 | struct qat_alg_buf *bufers; |
716 | 755 | ||
717 | n = sg_nents(sglout); | 756 | n = sg_nents(sglout); |
718 | sz_out = sizeof(struct qat_alg_buf_list) + | 757 | sz_out = struct_size(buflout, bufers, n + 1); |
719 | ((1 + n) * sizeof(struct qat_alg_buf)); | ||
720 | sg_nctr = 0; | 758 | sg_nctr = 0; |
721 | buflout = kzalloc_node(sz_out, GFP_ATOMIC, | 759 | buflout = kzalloc_node(sz_out, GFP_ATOMIC, |
722 | dev_to_node(&GET_DEV(inst->accel_dev))); | 760 | dev_to_node(&GET_DEV(inst->accel_dev))); |
@@ -801,11 +839,17 @@ static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp, | |||
801 | struct qat_crypto_instance *inst = ctx->inst; | 839 | struct qat_crypto_instance *inst = ctx->inst; |
802 | struct ablkcipher_request *areq = qat_req->ablkcipher_req; | 840 | struct ablkcipher_request *areq = qat_req->ablkcipher_req; |
803 | uint8_t stat_filed = qat_resp->comn_resp.comn_status; | 841 | uint8_t stat_filed = qat_resp->comn_resp.comn_status; |
842 | struct device *dev = &GET_DEV(ctx->inst->accel_dev); | ||
804 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); | 843 | int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed); |
805 | 844 | ||
806 | qat_alg_free_bufl(inst, qat_req); | 845 | qat_alg_free_bufl(inst, qat_req); |
807 | if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) | 846 | if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) |
808 | res = -EINVAL; | 847 | res = -EINVAL; |
848 | |||
849 | memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE); | ||
850 | dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, | ||
851 | qat_req->iv_paddr); | ||
852 | |||
809 | areq->base.complete(&areq->base, res); | 853 | areq->base.complete(&areq->base, res); |
810 | } | 854 | } |
811 | 855 | ||
@@ -905,50 +949,49 @@ static int qat_alg_aead_enc(struct aead_request *areq) | |||
905 | return -EINPROGRESS; | 949 | return -EINPROGRESS; |
906 | } | 950 | } |
907 | 951 | ||
908 | static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | 952 | static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx, |
953 | const u8 *key, unsigned int keylen, | ||
954 | int mode) | ||
955 | { | ||
956 | memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); | ||
957 | memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); | ||
958 | memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); | ||
959 | memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); | ||
960 | |||
961 | return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode); | ||
962 | } | ||
963 | |||
964 | static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx, | ||
909 | const u8 *key, unsigned int keylen, | 965 | const u8 *key, unsigned int keylen, |
910 | int mode) | 966 | int mode) |
911 | { | 967 | { |
912 | struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 968 | struct qat_crypto_instance *inst = NULL; |
913 | struct device *dev; | 969 | struct device *dev; |
970 | int node = get_current_node(); | ||
971 | int ret; | ||
914 | 972 | ||
915 | spin_lock(&ctx->lock); | 973 | inst = qat_crypto_get_instance_node(node); |
916 | if (ctx->enc_cd) { | 974 | if (!inst) |
917 | /* rekeying */ | 975 | return -EINVAL; |
918 | dev = &GET_DEV(ctx->inst->accel_dev); | 976 | dev = &GET_DEV(inst->accel_dev); |
919 | memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); | 977 | ctx->inst = inst; |
920 | memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); | 978 | ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), |
921 | memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); | 979 | &ctx->enc_cd_paddr, |
922 | memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); | 980 | GFP_ATOMIC); |
923 | } else { | 981 | if (!ctx->enc_cd) { |
924 | /* new key */ | 982 | ret = -ENOMEM; |
925 | int node = get_current_node(); | 983 | goto out_free_instance; |
926 | struct qat_crypto_instance *inst = | 984 | } |
927 | qat_crypto_get_instance_node(node); | 985 | ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), |
928 | if (!inst) { | 986 | &ctx->dec_cd_paddr, |
929 | spin_unlock(&ctx->lock); | 987 | GFP_ATOMIC); |
930 | return -EINVAL; | 988 | if (!ctx->dec_cd) { |
931 | } | 989 | ret = -ENOMEM; |
932 | 990 | goto out_free_enc; | |
933 | dev = &GET_DEV(inst->accel_dev); | ||
934 | ctx->inst = inst; | ||
935 | ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd), | ||
936 | &ctx->enc_cd_paddr, | ||
937 | GFP_ATOMIC); | ||
938 | if (!ctx->enc_cd) { | ||
939 | spin_unlock(&ctx->lock); | ||
940 | return -ENOMEM; | ||
941 | } | ||
942 | ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd), | ||
943 | &ctx->dec_cd_paddr, | ||
944 | GFP_ATOMIC); | ||
945 | if (!ctx->dec_cd) { | ||
946 | spin_unlock(&ctx->lock); | ||
947 | goto out_free_enc; | ||
948 | } | ||
949 | } | 991 | } |
950 | spin_unlock(&ctx->lock); | 992 | |
951 | if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode)) | 993 | ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode); |
994 | if (ret) | ||
952 | goto out_free_all; | 995 | goto out_free_all; |
953 | 996 | ||
954 | return 0; | 997 | return 0; |
@@ -963,7 +1006,22 @@ out_free_enc: | |||
963 | dma_free_coherent(dev, sizeof(*ctx->enc_cd), | 1006 | dma_free_coherent(dev, sizeof(*ctx->enc_cd), |
964 | ctx->enc_cd, ctx->enc_cd_paddr); | 1007 | ctx->enc_cd, ctx->enc_cd_paddr); |
965 | ctx->enc_cd = NULL; | 1008 | ctx->enc_cd = NULL; |
966 | return -ENOMEM; | 1009 | out_free_instance: |
1010 | ctx->inst = NULL; | ||
1011 | qat_crypto_put_instance(inst); | ||
1012 | return ret; | ||
1013 | } | ||
1014 | |||
1015 | static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | ||
1016 | const u8 *key, unsigned int keylen, | ||
1017 | int mode) | ||
1018 | { | ||
1019 | struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
1020 | |||
1021 | if (ctx->enc_cd) | ||
1022 | return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode); | ||
1023 | else | ||
1024 | return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode); | ||
967 | } | 1025 | } |
968 | 1026 | ||
969 | static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm, | 1027 | static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm, |
@@ -995,11 +1053,23 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) | |||
995 | struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); | 1053 | struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); |
996 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | 1054 | struct icp_qat_fw_la_cipher_req_params *cipher_param; |
997 | struct icp_qat_fw_la_bulk_req *msg; | 1055 | struct icp_qat_fw_la_bulk_req *msg; |
1056 | struct device *dev = &GET_DEV(ctx->inst->accel_dev); | ||
998 | int ret, ctr = 0; | 1057 | int ret, ctr = 0; |
999 | 1058 | ||
1059 | if (req->nbytes == 0) | ||
1060 | return 0; | ||
1061 | |||
1062 | qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, | ||
1063 | &qat_req->iv_paddr, GFP_ATOMIC); | ||
1064 | if (!qat_req->iv) | ||
1065 | return -ENOMEM; | ||
1066 | |||
1000 | ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); | 1067 | ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); |
1001 | if (unlikely(ret)) | 1068 | if (unlikely(ret)) { |
1069 | dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, | ||
1070 | qat_req->iv_paddr); | ||
1002 | return ret; | 1071 | return ret; |
1072 | } | ||
1003 | 1073 | ||
1004 | msg = &qat_req->req; | 1074 | msg = &qat_req->req; |
1005 | *msg = ctx->enc_fw_req; | 1075 | *msg = ctx->enc_fw_req; |
@@ -1012,18 +1082,29 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) | |||
1012 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | 1082 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; |
1013 | cipher_param->cipher_length = req->nbytes; | 1083 | cipher_param->cipher_length = req->nbytes; |
1014 | cipher_param->cipher_offset = 0; | 1084 | cipher_param->cipher_offset = 0; |
1015 | memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); | 1085 | cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; |
1086 | memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE); | ||
1016 | do { | 1087 | do { |
1017 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | 1088 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); |
1018 | } while (ret == -EAGAIN && ctr++ < 10); | 1089 | } while (ret == -EAGAIN && ctr++ < 10); |
1019 | 1090 | ||
1020 | if (ret == -EAGAIN) { | 1091 | if (ret == -EAGAIN) { |
1021 | qat_alg_free_bufl(ctx->inst, qat_req); | 1092 | qat_alg_free_bufl(ctx->inst, qat_req); |
1093 | dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, | ||
1094 | qat_req->iv_paddr); | ||
1022 | return -EBUSY; | 1095 | return -EBUSY; |
1023 | } | 1096 | } |
1024 | return -EINPROGRESS; | 1097 | return -EINPROGRESS; |
1025 | } | 1098 | } |
1026 | 1099 | ||
1100 | static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req) | ||
1101 | { | ||
1102 | if (req->nbytes % AES_BLOCK_SIZE != 0) | ||
1103 | return -EINVAL; | ||
1104 | |||
1105 | return qat_alg_ablkcipher_encrypt(req); | ||
1106 | } | ||
1107 | |||
1027 | static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) | 1108 | static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) |
1028 | { | 1109 | { |
1029 | struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); | 1110 | struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); |
@@ -1032,11 +1113,23 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) | |||
1032 | struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); | 1113 | struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req); |
1033 | struct icp_qat_fw_la_cipher_req_params *cipher_param; | 1114 | struct icp_qat_fw_la_cipher_req_params *cipher_param; |
1034 | struct icp_qat_fw_la_bulk_req *msg; | 1115 | struct icp_qat_fw_la_bulk_req *msg; |
1116 | struct device *dev = &GET_DEV(ctx->inst->accel_dev); | ||
1035 | int ret, ctr = 0; | 1117 | int ret, ctr = 0; |
1036 | 1118 | ||
1119 | if (req->nbytes == 0) | ||
1120 | return 0; | ||
1121 | |||
1122 | qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE, | ||
1123 | &qat_req->iv_paddr, GFP_ATOMIC); | ||
1124 | if (!qat_req->iv) | ||
1125 | return -ENOMEM; | ||
1126 | |||
1037 | ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); | 1127 | ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); |
1038 | if (unlikely(ret)) | 1128 | if (unlikely(ret)) { |
1129 | dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, | ||
1130 | qat_req->iv_paddr); | ||
1039 | return ret; | 1131 | return ret; |
1132 | } | ||
1040 | 1133 | ||
1041 | msg = &qat_req->req; | 1134 | msg = &qat_req->req; |
1042 | *msg = ctx->dec_fw_req; | 1135 | *msg = ctx->dec_fw_req; |
@@ -1049,18 +1142,28 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) | |||
1049 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; | 1142 | cipher_param = (void *)&qat_req->req.serv_specif_rqpars; |
1050 | cipher_param->cipher_length = req->nbytes; | 1143 | cipher_param->cipher_length = req->nbytes; |
1051 | cipher_param->cipher_offset = 0; | 1144 | cipher_param->cipher_offset = 0; |
1052 | memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE); | 1145 | cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr; |
1146 | memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE); | ||
1053 | do { | 1147 | do { |
1054 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); | 1148 | ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); |
1055 | } while (ret == -EAGAIN && ctr++ < 10); | 1149 | } while (ret == -EAGAIN && ctr++ < 10); |
1056 | 1150 | ||
1057 | if (ret == -EAGAIN) { | 1151 | if (ret == -EAGAIN) { |
1058 | qat_alg_free_bufl(ctx->inst, qat_req); | 1152 | qat_alg_free_bufl(ctx->inst, qat_req); |
1153 | dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv, | ||
1154 | qat_req->iv_paddr); | ||
1059 | return -EBUSY; | 1155 | return -EBUSY; |
1060 | } | 1156 | } |
1061 | return -EINPROGRESS; | 1157 | return -EINPROGRESS; |
1062 | } | 1158 | } |
1063 | 1159 | ||
1160 | static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req) | ||
1161 | { | ||
1162 | if (req->nbytes % AES_BLOCK_SIZE != 0) | ||
1163 | return -EINVAL; | ||
1164 | |||
1165 | return qat_alg_ablkcipher_decrypt(req); | ||
1166 | } | ||
1064 | static int qat_alg_aead_init(struct crypto_aead *tfm, | 1167 | static int qat_alg_aead_init(struct crypto_aead *tfm, |
1065 | enum icp_qat_hw_auth_algo hash, | 1168 | enum icp_qat_hw_auth_algo hash, |
1066 | const char *hash_name) | 1169 | const char *hash_name) |
@@ -1119,7 +1222,6 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm) | |||
1119 | { | 1222 | { |
1120 | struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | 1223 | struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); |
1121 | 1224 | ||
1122 | spin_lock_init(&ctx->lock); | ||
1123 | tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request); | 1225 | tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request); |
1124 | ctx->tfm = tfm; | 1226 | ctx->tfm = tfm; |
1125 | return 0; | 1227 | return 0; |
@@ -1221,8 +1323,8 @@ static struct crypto_alg qat_algs[] = { { | |||
1221 | .cra_u = { | 1323 | .cra_u = { |
1222 | .ablkcipher = { | 1324 | .ablkcipher = { |
1223 | .setkey = qat_alg_ablkcipher_cbc_setkey, | 1325 | .setkey = qat_alg_ablkcipher_cbc_setkey, |
1224 | .decrypt = qat_alg_ablkcipher_decrypt, | 1326 | .decrypt = qat_alg_ablkcipher_blk_decrypt, |
1225 | .encrypt = qat_alg_ablkcipher_encrypt, | 1327 | .encrypt = qat_alg_ablkcipher_blk_encrypt, |
1226 | .min_keysize = AES_MIN_KEY_SIZE, | 1328 | .min_keysize = AES_MIN_KEY_SIZE, |
1227 | .max_keysize = AES_MAX_KEY_SIZE, | 1329 | .max_keysize = AES_MAX_KEY_SIZE, |
1228 | .ivsize = AES_BLOCK_SIZE, | 1330 | .ivsize = AES_BLOCK_SIZE, |
@@ -1233,7 +1335,7 @@ static struct crypto_alg qat_algs[] = { { | |||
1233 | .cra_driver_name = "qat_aes_ctr", | 1335 | .cra_driver_name = "qat_aes_ctr", |
1234 | .cra_priority = 4001, | 1336 | .cra_priority = 4001, |
1235 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 1337 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, |
1236 | .cra_blocksize = AES_BLOCK_SIZE, | 1338 | .cra_blocksize = 1, |
1237 | .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), | 1339 | .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx), |
1238 | .cra_alignmask = 0, | 1340 | .cra_alignmask = 0, |
1239 | .cra_type = &crypto_ablkcipher_type, | 1341 | .cra_type = &crypto_ablkcipher_type, |
@@ -1265,8 +1367,8 @@ static struct crypto_alg qat_algs[] = { { | |||
1265 | .cra_u = { | 1367 | .cra_u = { |
1266 | .ablkcipher = { | 1368 | .ablkcipher = { |
1267 | .setkey = qat_alg_ablkcipher_xts_setkey, | 1369 | .setkey = qat_alg_ablkcipher_xts_setkey, |
1268 | .decrypt = qat_alg_ablkcipher_decrypt, | 1370 | .decrypt = qat_alg_ablkcipher_blk_decrypt, |
1269 | .encrypt = qat_alg_ablkcipher_encrypt, | 1371 | .encrypt = qat_alg_ablkcipher_blk_encrypt, |
1270 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | 1372 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
1271 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | 1373 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
1272 | .ivsize = AES_BLOCK_SIZE, | 1374 | .ivsize = AES_BLOCK_SIZE, |
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index dc0273fe3620..c77a80020cde 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h | |||
@@ -88,6 +88,8 @@ struct qat_crypto_request { | |||
88 | struct qat_crypto_request_buffs buf; | 88 | struct qat_crypto_request_buffs buf; |
89 | void (*cb)(struct icp_qat_fw_la_resp *resp, | 89 | void (*cb)(struct icp_qat_fw_la_resp *resp, |
90 | struct qat_crypto_request *req); | 90 | struct qat_crypto_request *req); |
91 | void *iv; | ||
92 | dma_addr_t iv_paddr; | ||
91 | }; | 93 | }; |
92 | 94 | ||
93 | #endif | 95 | #endif |
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 6b498a90181e..b0b8e3d48aef 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
@@ -1384,7 +1384,6 @@ MODULE_DEVICE_TABLE(of, sahara_dt_ids); | |||
1384 | static int sahara_probe(struct platform_device *pdev) | 1384 | static int sahara_probe(struct platform_device *pdev) |
1385 | { | 1385 | { |
1386 | struct sahara_dev *dev; | 1386 | struct sahara_dev *dev; |
1387 | struct resource *res; | ||
1388 | u32 version; | 1387 | u32 version; |
1389 | int irq; | 1388 | int irq; |
1390 | int err; | 1389 | int err; |
@@ -1398,8 +1397,7 @@ static int sahara_probe(struct platform_device *pdev) | |||
1398 | platform_set_drvdata(pdev, dev); | 1397 | platform_set_drvdata(pdev, dev); |
1399 | 1398 | ||
1400 | /* Get the base address */ | 1399 | /* Get the base address */ |
1401 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1400 | dev->regs_base = devm_platform_ioremap_resource(pdev, 0); |
1402 | dev->regs_base = devm_ioremap_resource(&pdev->dev, res); | ||
1403 | if (IS_ERR(dev->regs_base)) | 1401 | if (IS_ERR(dev->regs_base)) |
1404 | return PTR_ERR(dev->regs_base); | 1402 | return PTR_ERR(dev->regs_base); |
1405 | 1403 | ||
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile index ce77e38c77e0..518e0e0b11a9 100644 --- a/drivers/crypto/stm32/Makefile +++ b/drivers/crypto/stm32/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | 1 | # SPDX-License-Identifier: GPL-2.0-only |
2 | obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o | 2 | obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o |
3 | obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o | 3 | obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o |
4 | obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o | 4 | obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o |
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 440c9f1bd006..440c9f1bd006 100644 --- a/drivers/crypto/stm32/stm32_crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c | |||
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 29519d1c403f..23061f2bc74b 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c | |||
@@ -349,7 +349,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, | |||
349 | return -ETIMEDOUT; | 349 | return -ETIMEDOUT; |
350 | 350 | ||
351 | if ((hdev->flags & HASH_FLAGS_HMAC) && | 351 | if ((hdev->flags & HASH_FLAGS_HMAC) && |
352 | (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) { | 352 | (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { |
353 | hdev->flags |= HASH_FLAGS_HMAC_KEY; | 353 | hdev->flags |= HASH_FLAGS_HMAC_KEY; |
354 | stm32_hash_write_key(hdev); | 354 | stm32_hash_write_key(hdev); |
355 | if (stm32_hash_wait_busy(hdev)) | 355 | if (stm32_hash_wait_busy(hdev)) |
@@ -447,8 +447,8 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, | |||
447 | 447 | ||
448 | dma_async_issue_pending(hdev->dma_lch); | 448 | dma_async_issue_pending(hdev->dma_lch); |
449 | 449 | ||
450 | if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion, | 450 | if (!wait_for_completion_timeout(&hdev->dma_completion, |
451 | msecs_to_jiffies(100))) | 451 | msecs_to_jiffies(100))) |
452 | err = -ETIMEDOUT; | 452 | err = -ETIMEDOUT; |
453 | 453 | ||
454 | if (dma_async_is_tx_complete(hdev->dma_lch, cookie, | 454 | if (dma_async_is_tx_complete(hdev->dma_lch, cookie, |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 7b0c42882830..4ab14d58e85b 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | |||
@@ -12,7 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | #include "sun4i-ss.h" | 13 | #include "sun4i-ss.h" |
14 | 14 | ||
15 | static int sun4i_ss_opti_poll(struct skcipher_request *areq) | 15 | static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) |
16 | { | 16 | { |
17 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); | 17 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
18 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); | 18 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
@@ -114,6 +114,29 @@ release_ss: | |||
114 | return err; | 114 | return err; |
115 | } | 115 | } |
116 | 116 | ||
117 | |||
118 | static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq) | ||
119 | { | ||
120 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); | ||
121 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); | ||
122 | struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); | ||
123 | SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); | ||
124 | int err; | ||
125 | |||
126 | skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); | ||
127 | skcipher_request_set_callback(subreq, areq->base.flags, NULL, | ||
128 | NULL); | ||
129 | skcipher_request_set_crypt(subreq, areq->src, areq->dst, | ||
130 | areq->cryptlen, areq->iv); | ||
131 | if (ctx->mode & SS_DECRYPTION) | ||
132 | err = crypto_skcipher_decrypt(subreq); | ||
133 | else | ||
134 | err = crypto_skcipher_encrypt(subreq); | ||
135 | skcipher_request_zero(subreq); | ||
136 | |||
137 | return err; | ||
138 | } | ||
139 | |||
117 | /* Generic function that support SG with size not multiple of 4 */ | 140 | /* Generic function that support SG with size not multiple of 4 */ |
118 | static int sun4i_ss_cipher_poll(struct skcipher_request *areq) | 141 | static int sun4i_ss_cipher_poll(struct skcipher_request *areq) |
119 | { | 142 | { |
@@ -140,8 +163,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) | |||
140 | unsigned int todo; | 163 | unsigned int todo; |
141 | struct sg_mapping_iter mi, mo; | 164 | struct sg_mapping_iter mi, mo; |
142 | unsigned int oi, oo; /* offset for in and out */ | 165 | unsigned int oi, oo; /* offset for in and out */ |
143 | char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ | ||
144 | char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ | ||
145 | unsigned int ob = 0; /* offset in buf */ | 166 | unsigned int ob = 0; /* offset in buf */ |
146 | unsigned int obo = 0; /* offset in bufo*/ | 167 | unsigned int obo = 0; /* offset in bufo*/ |
147 | unsigned int obl = 0; /* length of data in bufo */ | 168 | unsigned int obl = 0; /* length of data in bufo */ |
@@ -178,20 +199,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) | |||
178 | if (no_chunk == 1 && !need_fallback) | 199 | if (no_chunk == 1 && !need_fallback) |
179 | return sun4i_ss_opti_poll(areq); | 200 | return sun4i_ss_opti_poll(areq); |
180 | 201 | ||
181 | if (need_fallback) { | 202 | if (need_fallback) |
182 | SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm); | 203 | return sun4i_ss_cipher_poll_fallback(areq); |
183 | skcipher_request_set_sync_tfm(subreq, op->fallback_tfm); | ||
184 | skcipher_request_set_callback(subreq, areq->base.flags, NULL, | ||
185 | NULL); | ||
186 | skcipher_request_set_crypt(subreq, areq->src, areq->dst, | ||
187 | areq->cryptlen, areq->iv); | ||
188 | if (ctx->mode & SS_DECRYPTION) | ||
189 | err = crypto_skcipher_decrypt(subreq); | ||
190 | else | ||
191 | err = crypto_skcipher_encrypt(subreq); | ||
192 | skcipher_request_zero(subreq); | ||
193 | return err; | ||
194 | } | ||
195 | 204 | ||
196 | spin_lock_irqsave(&ss->slock, flags); | 205 | spin_lock_irqsave(&ss->slock, flags); |
197 | 206 | ||
@@ -224,6 +233,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) | |||
224 | 233 | ||
225 | while (oleft) { | 234 | while (oleft) { |
226 | if (ileft) { | 235 | if (ileft) { |
236 | char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ | ||
237 | |||
227 | /* | 238 | /* |
228 | * todo is the number of consecutive 4byte word that we | 239 | * todo is the number of consecutive 4byte word that we |
229 | * can read from current SG | 240 | * can read from current SG |
@@ -281,6 +292,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) | |||
281 | oo = 0; | 292 | oo = 0; |
282 | } | 293 | } |
283 | } else { | 294 | } else { |
295 | char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ | ||
296 | |||
284 | /* | 297 | /* |
285 | * read obl bytes in bufo, we read at maximum for | 298 | * read obl bytes in bufo, we read at maximum for |
286 | * emptying the device | 299 | * emptying the device |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index fbc7bf9d7380..c9d686a0e805 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -265,11 +265,11 @@ static int init_device(struct device *dev) | |||
265 | * callback must check err and feedback in descriptor header | 265 | * callback must check err and feedback in descriptor header |
266 | * for device processing status. | 266 | * for device processing status. |
267 | */ | 267 | */ |
268 | int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | 268 | static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, |
269 | void (*callback)(struct device *dev, | 269 | void (*callback)(struct device *dev, |
270 | struct talitos_desc *desc, | 270 | struct talitos_desc *desc, |
271 | void *context, int error), | 271 | void *context, int error), |
272 | void *context) | 272 | void *context) |
273 | { | 273 | { |
274 | struct talitos_private *priv = dev_get_drvdata(dev); | 274 | struct talitos_private *priv = dev_get_drvdata(dev); |
275 | struct talitos_request *request; | 275 | struct talitos_request *request; |
@@ -319,7 +319,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | |||
319 | 319 | ||
320 | return -EINPROGRESS; | 320 | return -EINPROGRESS; |
321 | } | 321 | } |
322 | EXPORT_SYMBOL(talitos_submit); | 322 | |
323 | static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1) | ||
324 | { | ||
325 | struct talitos_edesc *edesc; | ||
326 | |||
327 | if (!is_sec1) | ||
328 | return request->desc->hdr; | ||
329 | |||
330 | if (!request->desc->next_desc) | ||
331 | return request->desc->hdr1; | ||
332 | |||
333 | edesc = container_of(request->desc, struct talitos_edesc, desc); | ||
334 | |||
335 | return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1; | ||
336 | } | ||
323 | 337 | ||
324 | /* | 338 | /* |
325 | * process what was done, notify callback of error if not | 339 | * process what was done, notify callback of error if not |
@@ -342,12 +356,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | |||
342 | 356 | ||
343 | /* descriptors with their done bits set don't get the error */ | 357 | /* descriptors with their done bits set don't get the error */ |
344 | rmb(); | 358 | rmb(); |
345 | if (!is_sec1) | 359 | hdr = get_request_hdr(request, is_sec1); |
346 | hdr = request->desc->hdr; | ||
347 | else if (request->desc->next_desc) | ||
348 | hdr = (request->desc + 1)->hdr1; | ||
349 | else | ||
350 | hdr = request->desc->hdr1; | ||
351 | 360 | ||
352 | if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) | 361 | if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) |
353 | status = 0; | 362 | status = 0; |
@@ -477,8 +486,14 @@ static u32 current_desc_hdr(struct device *dev, int ch) | |||
477 | } | 486 | } |
478 | } | 487 | } |
479 | 488 | ||
480 | if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) | 489 | if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) { |
481 | return (priv->chan[ch].fifo[iter].desc + 1)->hdr; | 490 | struct talitos_edesc *edesc; |
491 | |||
492 | edesc = container_of(priv->chan[ch].fifo[iter].desc, | ||
493 | struct talitos_edesc, desc); | ||
494 | return ((struct talitos_desc *) | ||
495 | (edesc->buf + edesc->dma_len))->hdr; | ||
496 | } | ||
482 | 497 | ||
483 | return priv->chan[ch].fifo[iter].desc->hdr; | 498 | return priv->chan[ch].fifo[iter].desc->hdr; |
484 | } | 499 | } |
@@ -824,7 +839,11 @@ static void talitos_unregister_rng(struct device *dev) | |||
824 | * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP | 839 | * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP |
825 | */ | 840 | */ |
826 | #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) | 841 | #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) |
842 | #ifdef CONFIG_CRYPTO_DEV_TALITOS2 | ||
827 | #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) | 843 | #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) |
844 | #else | ||
845 | #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE) | ||
846 | #endif | ||
828 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | 847 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ |
829 | 848 | ||
830 | struct talitos_ctx { | 849 | struct talitos_ctx { |
@@ -948,36 +967,6 @@ badkey: | |||
948 | goto out; | 967 | goto out; |
949 | } | 968 | } |
950 | 969 | ||
951 | /* | ||
952 | * talitos_edesc - s/w-extended descriptor | ||
953 | * @src_nents: number of segments in input scatterlist | ||
954 | * @dst_nents: number of segments in output scatterlist | ||
955 | * @icv_ool: whether ICV is out-of-line | ||
956 | * @iv_dma: dma address of iv for checking continuity and link table | ||
957 | * @dma_len: length of dma mapped link_tbl space | ||
958 | * @dma_link_tbl: bus physical address of link_tbl/buf | ||
959 | * @desc: h/w descriptor | ||
960 | * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) | ||
961 | * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) | ||
962 | * | ||
963 | * if decrypting (with authcheck), or either one of src_nents or dst_nents | ||
964 | * is greater than 1, an integrity check value is concatenated to the end | ||
965 | * of link_tbl data | ||
966 | */ | ||
967 | struct talitos_edesc { | ||
968 | int src_nents; | ||
969 | int dst_nents; | ||
970 | bool icv_ool; | ||
971 | dma_addr_t iv_dma; | ||
972 | int dma_len; | ||
973 | dma_addr_t dma_link_tbl; | ||
974 | struct talitos_desc desc; | ||
975 | union { | ||
976 | struct talitos_ptr link_tbl[0]; | ||
977 | u8 buf[0]; | ||
978 | }; | ||
979 | }; | ||
980 | |||
981 | static void talitos_sg_unmap(struct device *dev, | 970 | static void talitos_sg_unmap(struct device *dev, |
982 | struct talitos_edesc *edesc, | 971 | struct talitos_edesc *edesc, |
983 | struct scatterlist *src, | 972 | struct scatterlist *src, |
@@ -1008,11 +997,13 @@ static void talitos_sg_unmap(struct device *dev, | |||
1008 | 997 | ||
1009 | static void ipsec_esp_unmap(struct device *dev, | 998 | static void ipsec_esp_unmap(struct device *dev, |
1010 | struct talitos_edesc *edesc, | 999 | struct talitos_edesc *edesc, |
1011 | struct aead_request *areq) | 1000 | struct aead_request *areq, bool encrypt) |
1012 | { | 1001 | { |
1013 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); | 1002 | struct crypto_aead *aead = crypto_aead_reqtfm(areq); |
1014 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); | 1003 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); |
1015 | unsigned int ivsize = crypto_aead_ivsize(aead); | 1004 | unsigned int ivsize = crypto_aead_ivsize(aead); |
1005 | unsigned int authsize = crypto_aead_authsize(aead); | ||
1006 | unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); | ||
1016 | bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP; | 1007 | bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP; |
1017 | struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3]; | 1008 | struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3]; |
1018 | 1009 | ||
@@ -1021,8 +1012,8 @@ static void ipsec_esp_unmap(struct device *dev, | |||
1021 | DMA_FROM_DEVICE); | 1012 | DMA_FROM_DEVICE); |
1022 | unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE); | 1013 | unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE); |
1023 | 1014 | ||
1024 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, | 1015 | talitos_sg_unmap(dev, edesc, areq->src, areq->dst, |
1025 | areq->assoclen); | 1016 | cryptlen + authsize, areq->assoclen); |
1026 | 1017 | ||
1027 | if (edesc->dma_len) | 1018 | if (edesc->dma_len) |
1028 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, | 1019 | dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, |
@@ -1032,7 +1023,7 @@ static void ipsec_esp_unmap(struct device *dev, | |||
1032 | unsigned int dst_nents = edesc->dst_nents ? : 1; | 1023 | unsigned int dst_nents = edesc->dst_nents ? : 1; |
1033 | 1024 | ||
1034 | sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, | 1025 | sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, |
1035 | areq->assoclen + areq->cryptlen - ivsize); | 1026 | areq->assoclen + cryptlen - ivsize); |
1036 | } | 1027 | } |
1037 | } | 1028 | } |
1038 | 1029 | ||
@@ -1043,31 +1034,14 @@ static void ipsec_esp_encrypt_done(struct device *dev, | |||
1043 | struct talitos_desc *desc, void *context, | 1034 | struct talitos_desc *desc, void *context, |
1044 | int err) | 1035 | int err) |
1045 | { | 1036 | { |
1046 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1047 | bool is_sec1 = has_ftr_sec1(priv); | ||
1048 | struct aead_request *areq = context; | 1037 | struct aead_request *areq = context; |
1049 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); | 1038 | struct crypto_aead *authenc = crypto_aead_reqtfm(areq); |
1050 | unsigned int authsize = crypto_aead_authsize(authenc); | ||
1051 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 1039 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
1052 | struct talitos_edesc *edesc; | 1040 | struct talitos_edesc *edesc; |
1053 | struct scatterlist *sg; | ||
1054 | void *icvdata; | ||
1055 | 1041 | ||
1056 | edesc = container_of(desc, struct talitos_edesc, desc); | 1042 | edesc = container_of(desc, struct talitos_edesc, desc); |
1057 | 1043 | ||
1058 | ipsec_esp_unmap(dev, edesc, areq); | 1044 | ipsec_esp_unmap(dev, edesc, areq, true); |
1059 | |||
1060 | /* copy the generated ICV to dst */ | ||
1061 | if (edesc->icv_ool) { | ||
1062 | if (is_sec1) | ||
1063 | icvdata = edesc->buf + areq->assoclen + areq->cryptlen; | ||
1064 | else | ||
1065 | icvdata = &edesc->link_tbl[edesc->src_nents + | ||
1066 | edesc->dst_nents + 2]; | ||
1067 | sg = sg_last(areq->dst, edesc->dst_nents); | ||
1068 | memcpy((char *)sg_virt(sg) + sg->length - authsize, | ||
1069 | icvdata, authsize); | ||
1070 | } | ||
1071 | 1045 | ||
1072 | dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); | 1046 | dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); |
1073 | 1047 | ||
@@ -1084,32 +1058,16 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, | |||
1084 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); | 1058 | struct crypto_aead *authenc = crypto_aead_reqtfm(req); |
1085 | unsigned int authsize = crypto_aead_authsize(authenc); | 1059 | unsigned int authsize = crypto_aead_authsize(authenc); |
1086 | struct talitos_edesc *edesc; | 1060 | struct talitos_edesc *edesc; |
1087 | struct scatterlist *sg; | ||
1088 | char *oicv, *icv; | 1061 | char *oicv, *icv; |
1089 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
1090 | bool is_sec1 = has_ftr_sec1(priv); | ||
1091 | 1062 | ||
1092 | edesc = container_of(desc, struct talitos_edesc, desc); | 1063 | edesc = container_of(desc, struct talitos_edesc, desc); |
1093 | 1064 | ||
1094 | ipsec_esp_unmap(dev, edesc, req); | 1065 | ipsec_esp_unmap(dev, edesc, req, false); |
1095 | 1066 | ||
1096 | if (!err) { | 1067 | if (!err) { |
1097 | /* auth check */ | 1068 | /* auth check */ |
1098 | sg = sg_last(req->dst, edesc->dst_nents ? : 1); | 1069 | oicv = edesc->buf + edesc->dma_len; |
1099 | icv = (char *)sg_virt(sg) + sg->length - authsize; | 1070 | icv = oicv - authsize; |
1100 | |||
1101 | if (edesc->dma_len) { | ||
1102 | if (is_sec1) | ||
1103 | oicv = (char *)&edesc->dma_link_tbl + | ||
1104 | req->assoclen + req->cryptlen; | ||
1105 | else | ||
1106 | oicv = (char *) | ||
1107 | &edesc->link_tbl[edesc->src_nents + | ||
1108 | edesc->dst_nents + 2]; | ||
1109 | if (edesc->icv_ool) | ||
1110 | icv = oicv + authsize; | ||
1111 | } else | ||
1112 | oicv = (char *)&edesc->link_tbl[0]; | ||
1113 | 1071 | ||
1114 | err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; | 1072 | err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; |
1115 | } | 1073 | } |
@@ -1128,7 +1086,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, | |||
1128 | 1086 | ||
1129 | edesc = container_of(desc, struct talitos_edesc, desc); | 1087 | edesc = container_of(desc, struct talitos_edesc, desc); |
1130 | 1088 | ||
1131 | ipsec_esp_unmap(dev, edesc, req); | 1089 | ipsec_esp_unmap(dev, edesc, req, false); |
1132 | 1090 | ||
1133 | /* check ICV auth status */ | 1091 | /* check ICV auth status */ |
1134 | if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != | 1092 | if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != |
@@ -1145,11 +1103,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev, | |||
1145 | * stop at cryptlen bytes | 1103 | * stop at cryptlen bytes |
1146 | */ | 1104 | */ |
1147 | static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, | 1105 | static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, |
1148 | unsigned int offset, int cryptlen, | 1106 | unsigned int offset, int datalen, int elen, |
1149 | struct talitos_ptr *link_tbl_ptr) | 1107 | struct talitos_ptr *link_tbl_ptr) |
1150 | { | 1108 | { |
1151 | int n_sg = sg_count; | 1109 | int n_sg = elen ? sg_count + 1 : sg_count; |
1152 | int count = 0; | 1110 | int count = 0; |
1111 | int cryptlen = datalen + elen; | ||
1153 | 1112 | ||
1154 | while (cryptlen && sg && n_sg--) { | 1113 | while (cryptlen && sg && n_sg--) { |
1155 | unsigned int len = sg_dma_len(sg); | 1114 | unsigned int len = sg_dma_len(sg); |
@@ -1164,11 +1123,20 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, | |||
1164 | if (len > cryptlen) | 1123 | if (len > cryptlen) |
1165 | len = cryptlen; | 1124 | len = cryptlen; |
1166 | 1125 | ||
1126 | if (datalen > 0 && len > datalen) { | ||
1127 | to_talitos_ptr(link_tbl_ptr + count, | ||
1128 | sg_dma_address(sg) + offset, datalen, 0); | ||
1129 | to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); | ||
1130 | count++; | ||
1131 | len -= datalen; | ||
1132 | offset += datalen; | ||
1133 | } | ||
1167 | to_talitos_ptr(link_tbl_ptr + count, | 1134 | to_talitos_ptr(link_tbl_ptr + count, |
1168 | sg_dma_address(sg) + offset, len, 0); | 1135 | sg_dma_address(sg) + offset, len, 0); |
1169 | to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); | 1136 | to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); |
1170 | count++; | 1137 | count++; |
1171 | cryptlen -= len; | 1138 | cryptlen -= len; |
1139 | datalen -= len; | ||
1172 | offset = 0; | 1140 | offset = 0; |
1173 | 1141 | ||
1174 | next: | 1142 | next: |
@@ -1178,7 +1146,7 @@ next: | |||
1178 | /* tag end of link table */ | 1146 | /* tag end of link table */ |
1179 | if (count > 0) | 1147 | if (count > 0) |
1180 | to_talitos_ptr_ext_set(link_tbl_ptr + count - 1, | 1148 | to_talitos_ptr_ext_set(link_tbl_ptr + count - 1, |
1181 | DESC_PTR_LNKTBL_RETURN, 0); | 1149 | DESC_PTR_LNKTBL_RET, 0); |
1182 | 1150 | ||
1183 | return count; | 1151 | return count; |
1184 | } | 1152 | } |
@@ -1186,7 +1154,8 @@ next: | |||
1186 | static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, | 1154 | static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, |
1187 | unsigned int len, struct talitos_edesc *edesc, | 1155 | unsigned int len, struct talitos_edesc *edesc, |
1188 | struct talitos_ptr *ptr, int sg_count, | 1156 | struct talitos_ptr *ptr, int sg_count, |
1189 | unsigned int offset, int tbl_off, int elen) | 1157 | unsigned int offset, int tbl_off, int elen, |
1158 | bool force) | ||
1190 | { | 1159 | { |
1191 | struct talitos_private *priv = dev_get_drvdata(dev); | 1160 | struct talitos_private *priv = dev_get_drvdata(dev); |
1192 | bool is_sec1 = has_ftr_sec1(priv); | 1161 | bool is_sec1 = has_ftr_sec1(priv); |
@@ -1196,7 +1165,7 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, | |||
1196 | return 1; | 1165 | return 1; |
1197 | } | 1166 | } |
1198 | to_talitos_ptr_ext_set(ptr, elen, is_sec1); | 1167 | to_talitos_ptr_ext_set(ptr, elen, is_sec1); |
1199 | if (sg_count == 1) { | 1168 | if (sg_count == 1 && !force) { |
1200 | to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); | 1169 | to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); |
1201 | return sg_count; | 1170 | return sg_count; |
1202 | } | 1171 | } |
@@ -1204,9 +1173,9 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, | |||
1204 | to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); | 1173 | to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); |
1205 | return sg_count; | 1174 | return sg_count; |
1206 | } | 1175 | } |
1207 | sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen, | 1176 | sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen, |
1208 | &edesc->link_tbl[tbl_off]); | 1177 | &edesc->link_tbl[tbl_off]); |
1209 | if (sg_count == 1) { | 1178 | if (sg_count == 1 && !force) { |
1210 | /* Only one segment now, so no link tbl needed*/ | 1179 | /* Only one segment now, so no link tbl needed*/ |
1211 | copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1); | 1180 | copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1); |
1212 | return sg_count; | 1181 | return sg_count; |
@@ -1224,13 +1193,14 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src, | |||
1224 | unsigned int offset, int tbl_off) | 1193 | unsigned int offset, int tbl_off) |
1225 | { | 1194 | { |
1226 | return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset, | 1195 | return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset, |
1227 | tbl_off, 0); | 1196 | tbl_off, 0, false); |
1228 | } | 1197 | } |
1229 | 1198 | ||
1230 | /* | 1199 | /* |
1231 | * fill in and submit ipsec_esp descriptor | 1200 | * fill in and submit ipsec_esp descriptor |
1232 | */ | 1201 | */ |
1233 | static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | 1202 | static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, |
1203 | bool encrypt, | ||
1234 | void (*callback)(struct device *dev, | 1204 | void (*callback)(struct device *dev, |
1235 | struct talitos_desc *desc, | 1205 | struct talitos_desc *desc, |
1236 | void *context, int error)) | 1206 | void *context, int error)) |
@@ -1240,7 +1210,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1240 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); | 1210 | struct talitos_ctx *ctx = crypto_aead_ctx(aead); |
1241 | struct device *dev = ctx->dev; | 1211 | struct device *dev = ctx->dev; |
1242 | struct talitos_desc *desc = &edesc->desc; | 1212 | struct talitos_desc *desc = &edesc->desc; |
1243 | unsigned int cryptlen = areq->cryptlen; | 1213 | unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); |
1244 | unsigned int ivsize = crypto_aead_ivsize(aead); | 1214 | unsigned int ivsize = crypto_aead_ivsize(aead); |
1245 | int tbl_off = 0; | 1215 | int tbl_off = 0; |
1246 | int sg_count, ret; | 1216 | int sg_count, ret; |
@@ -1251,6 +1221,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1251 | bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP; | 1221 | bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP; |
1252 | struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3]; | 1222 | struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3]; |
1253 | struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2]; | 1223 | struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2]; |
1224 | dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize; | ||
1254 | 1225 | ||
1255 | /* hmac key */ | 1226 | /* hmac key */ |
1256 | to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1); | 1227 | to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1); |
@@ -1290,7 +1261,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1290 | elen = authsize; | 1261 | elen = authsize; |
1291 | 1262 | ||
1292 | ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], | 1263 | ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], |
1293 | sg_count, areq->assoclen, tbl_off, elen); | 1264 | sg_count, areq->assoclen, tbl_off, elen, |
1265 | false); | ||
1294 | 1266 | ||
1295 | if (ret > 1) { | 1267 | if (ret > 1) { |
1296 | tbl_off += ret; | 1268 | tbl_off += ret; |
@@ -1304,55 +1276,32 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1304 | dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); | 1276 | dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); |
1305 | } | 1277 | } |
1306 | 1278 | ||
1307 | ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], | 1279 | if (is_ipsec_esp && encrypt) |
1308 | sg_count, areq->assoclen, tbl_off); | 1280 | elen = authsize; |
1309 | 1281 | else | |
1310 | if (is_ipsec_esp) | 1282 | elen = 0; |
1311 | to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); | 1283 | ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], |
1312 | 1284 | sg_count, areq->assoclen, tbl_off, elen, | |
1313 | /* ICV data */ | 1285 | is_ipsec_esp && !encrypt); |
1314 | if (ret > 1) { | 1286 | tbl_off += ret; |
1315 | tbl_off += ret; | ||
1316 | edesc->icv_ool = true; | ||
1317 | sync_needed = true; | ||
1318 | |||
1319 | if (is_ipsec_esp) { | ||
1320 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; | ||
1321 | int offset = (edesc->src_nents + edesc->dst_nents + 2) * | ||
1322 | sizeof(struct talitos_ptr) + authsize; | ||
1323 | |||
1324 | /* Add an entry to the link table for ICV data */ | ||
1325 | to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); | ||
1326 | to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, | ||
1327 | is_sec1); | ||
1328 | 1287 | ||
1329 | /* icv data follows link tables */ | 1288 | if (!encrypt && is_ipsec_esp) { |
1330 | to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, | 1289 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; |
1331 | authsize, is_sec1); | ||
1332 | } else { | ||
1333 | dma_addr_t addr = edesc->dma_link_tbl; | ||
1334 | 1290 | ||
1335 | if (is_sec1) | 1291 | /* Add an entry to the link table for ICV data */ |
1336 | addr += areq->assoclen + cryptlen; | 1292 | to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); |
1337 | else | 1293 | to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1); |
1338 | addr += sizeof(struct talitos_ptr) * tbl_off; | ||
1339 | 1294 | ||
1340 | to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1); | 1295 | /* icv data follows link tables */ |
1341 | } | 1296 | to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1); |
1297 | to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); | ||
1298 | sync_needed = true; | ||
1299 | } else if (!encrypt) { | ||
1300 | to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1); | ||
1301 | sync_needed = true; | ||
1342 | } else if (!is_ipsec_esp) { | 1302 | } else if (!is_ipsec_esp) { |
1343 | ret = talitos_sg_map(dev, areq->dst, authsize, edesc, | 1303 | talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6], |
1344 | &desc->ptr[6], sg_count, areq->assoclen + | 1304 | sg_count, areq->assoclen + cryptlen, tbl_off); |
1345 | cryptlen, | ||
1346 | tbl_off); | ||
1347 | if (ret > 1) { | ||
1348 | tbl_off += ret; | ||
1349 | edesc->icv_ool = true; | ||
1350 | sync_needed = true; | ||
1351 | } else { | ||
1352 | edesc->icv_ool = false; | ||
1353 | } | ||
1354 | } else { | ||
1355 | edesc->icv_ool = false; | ||
1356 | } | 1305 | } |
1357 | 1306 | ||
1358 | /* iv out */ | 1307 | /* iv out */ |
@@ -1367,7 +1316,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1367 | 1316 | ||
1368 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); | 1317 | ret = talitos_submit(dev, ctx->ch, desc, callback, areq); |
1369 | if (ret != -EINPROGRESS) { | 1318 | if (ret != -EINPROGRESS) { |
1370 | ipsec_esp_unmap(dev, edesc, areq); | 1319 | ipsec_esp_unmap(dev, edesc, areq, encrypt); |
1371 | kfree(edesc); | 1320 | kfree(edesc); |
1372 | } | 1321 | } |
1373 | return ret; | 1322 | return ret; |
@@ -1435,18 +1384,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1435 | * and space for two sets of ICVs (stashed and generated) | 1384 | * and space for two sets of ICVs (stashed and generated) |
1436 | */ | 1385 | */ |
1437 | alloc_len = sizeof(struct talitos_edesc); | 1386 | alloc_len = sizeof(struct talitos_edesc); |
1438 | if (src_nents || dst_nents) { | 1387 | if (src_nents || dst_nents || !encrypt) { |
1439 | if (is_sec1) | 1388 | if (is_sec1) |
1440 | dma_len = (src_nents ? src_len : 0) + | 1389 | dma_len = (src_nents ? src_len : 0) + |
1441 | (dst_nents ? dst_len : 0); | 1390 | (dst_nents ? dst_len : 0) + authsize; |
1442 | else | 1391 | else |
1443 | dma_len = (src_nents + dst_nents + 2) * | 1392 | dma_len = (src_nents + dst_nents + 2) * |
1444 | sizeof(struct talitos_ptr) + authsize * 2; | 1393 | sizeof(struct talitos_ptr) + authsize; |
1445 | alloc_len += dma_len; | 1394 | alloc_len += dma_len; |
1446 | } else { | 1395 | } else { |
1447 | dma_len = 0; | 1396 | dma_len = 0; |
1448 | alloc_len += icv_stashing ? authsize : 0; | ||
1449 | } | 1397 | } |
1398 | alloc_len += icv_stashing ? authsize : 0; | ||
1450 | 1399 | ||
1451 | /* if its a ahash, add space for a second desc next to the first one */ | 1400 | /* if its a ahash, add space for a second desc next to the first one */ |
1452 | if (is_sec1 && !dst) | 1401 | if (is_sec1 && !dst) |
@@ -1466,15 +1415,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | |||
1466 | edesc->dst_nents = dst_nents; | 1415 | edesc->dst_nents = dst_nents; |
1467 | edesc->iv_dma = iv_dma; | 1416 | edesc->iv_dma = iv_dma; |
1468 | edesc->dma_len = dma_len; | 1417 | edesc->dma_len = dma_len; |
1469 | if (dma_len) { | 1418 | if (dma_len) |
1470 | void *addr = &edesc->link_tbl[0]; | 1419 | edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], |
1471 | |||
1472 | if (is_sec1 && !dst) | ||
1473 | addr += sizeof(struct talitos_desc); | ||
1474 | edesc->dma_link_tbl = dma_map_single(dev, addr, | ||
1475 | edesc->dma_len, | 1420 | edesc->dma_len, |
1476 | DMA_BIDIRECTIONAL); | 1421 | DMA_BIDIRECTIONAL); |
1477 | } | 1422 | |
1478 | return edesc; | 1423 | return edesc; |
1479 | } | 1424 | } |
1480 | 1425 | ||
@@ -1485,9 +1430,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, | |||
1485 | unsigned int authsize = crypto_aead_authsize(authenc); | 1430 | unsigned int authsize = crypto_aead_authsize(authenc); |
1486 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1431 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1487 | unsigned int ivsize = crypto_aead_ivsize(authenc); | 1432 | unsigned int ivsize = crypto_aead_ivsize(authenc); |
1433 | unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); | ||
1488 | 1434 | ||
1489 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, | 1435 | return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, |
1490 | iv, areq->assoclen, areq->cryptlen, | 1436 | iv, areq->assoclen, cryptlen, |
1491 | authsize, ivsize, icv_stashing, | 1437 | authsize, ivsize, icv_stashing, |
1492 | areq->base.flags, encrypt); | 1438 | areq->base.flags, encrypt); |
1493 | } | 1439 | } |
@@ -1506,7 +1452,7 @@ static int aead_encrypt(struct aead_request *req) | |||
1506 | /* set encrypt */ | 1452 | /* set encrypt */ |
1507 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; | 1453 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; |
1508 | 1454 | ||
1509 | return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); | 1455 | return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done); |
1510 | } | 1456 | } |
1511 | 1457 | ||
1512 | static int aead_decrypt(struct aead_request *req) | 1458 | static int aead_decrypt(struct aead_request *req) |
@@ -1516,17 +1462,15 @@ static int aead_decrypt(struct aead_request *req) | |||
1516 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 1462 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
1517 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); | 1463 | struct talitos_private *priv = dev_get_drvdata(ctx->dev); |
1518 | struct talitos_edesc *edesc; | 1464 | struct talitos_edesc *edesc; |
1519 | struct scatterlist *sg; | ||
1520 | void *icvdata; | 1465 | void *icvdata; |
1521 | 1466 | ||
1522 | req->cryptlen -= authsize; | ||
1523 | |||
1524 | /* allocate extended descriptor */ | 1467 | /* allocate extended descriptor */ |
1525 | edesc = aead_edesc_alloc(req, req->iv, 1, false); | 1468 | edesc = aead_edesc_alloc(req, req->iv, 1, false); |
1526 | if (IS_ERR(edesc)) | 1469 | if (IS_ERR(edesc)) |
1527 | return PTR_ERR(edesc); | 1470 | return PTR_ERR(edesc); |
1528 | 1471 | ||
1529 | if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && | 1472 | if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) && |
1473 | (priv->features & TALITOS_FTR_HW_AUTH_CHECK) && | ||
1530 | ((!edesc->src_nents && !edesc->dst_nents) || | 1474 | ((!edesc->src_nents && !edesc->dst_nents) || |
1531 | priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { | 1475 | priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { |
1532 | 1476 | ||
@@ -1537,24 +1481,20 @@ static int aead_decrypt(struct aead_request *req) | |||
1537 | 1481 | ||
1538 | /* reset integrity check result bits */ | 1482 | /* reset integrity check result bits */ |
1539 | 1483 | ||
1540 | return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); | 1484 | return ipsec_esp(edesc, req, false, |
1485 | ipsec_esp_decrypt_hwauth_done); | ||
1541 | } | 1486 | } |
1542 | 1487 | ||
1543 | /* Have to check the ICV with software */ | 1488 | /* Have to check the ICV with software */ |
1544 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; | 1489 | edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; |
1545 | 1490 | ||
1546 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ | 1491 | /* stash incoming ICV for later cmp with ICV generated by the h/w */ |
1547 | if (edesc->dma_len) | 1492 | icvdata = edesc->buf + edesc->dma_len; |
1548 | icvdata = (char *)&edesc->link_tbl[edesc->src_nents + | ||
1549 | edesc->dst_nents + 2]; | ||
1550 | else | ||
1551 | icvdata = &edesc->link_tbl[0]; | ||
1552 | 1493 | ||
1553 | sg = sg_last(req->src, edesc->src_nents ? : 1); | 1494 | sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize, |
1495 | req->assoclen + req->cryptlen - authsize); | ||
1554 | 1496 | ||
1555 | memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize); | 1497 | return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done); |
1556 | |||
1557 | return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done); | ||
1558 | } | 1498 | } |
1559 | 1499 | ||
1560 | static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | 1500 | static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, |
@@ -1605,6 +1545,18 @@ static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher, | |||
1605 | return ablkcipher_setkey(cipher, key, keylen); | 1545 | return ablkcipher_setkey(cipher, key, keylen); |
1606 | } | 1546 | } |
1607 | 1547 | ||
1548 | static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher, | ||
1549 | const u8 *key, unsigned int keylen) | ||
1550 | { | ||
1551 | if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 || | ||
1552 | keylen == AES_KEYSIZE_256) | ||
1553 | return ablkcipher_setkey(cipher, key, keylen); | ||
1554 | |||
1555 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1556 | |||
1557 | return -EINVAL; | ||
1558 | } | ||
1559 | |||
1608 | static void common_nonsnoop_unmap(struct device *dev, | 1560 | static void common_nonsnoop_unmap(struct device *dev, |
1609 | struct talitos_edesc *edesc, | 1561 | struct talitos_edesc *edesc, |
1610 | struct ablkcipher_request *areq) | 1562 | struct ablkcipher_request *areq) |
@@ -1624,11 +1576,15 @@ static void ablkcipher_done(struct device *dev, | |||
1624 | int err) | 1576 | int err) |
1625 | { | 1577 | { |
1626 | struct ablkcipher_request *areq = context; | 1578 | struct ablkcipher_request *areq = context; |
1579 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | ||
1580 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | ||
1581 | unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); | ||
1627 | struct talitos_edesc *edesc; | 1582 | struct talitos_edesc *edesc; |
1628 | 1583 | ||
1629 | edesc = container_of(desc, struct talitos_edesc, desc); | 1584 | edesc = container_of(desc, struct talitos_edesc, desc); |
1630 | 1585 | ||
1631 | common_nonsnoop_unmap(dev, edesc, areq); | 1586 | common_nonsnoop_unmap(dev, edesc, areq); |
1587 | memcpy(areq->info, ctx->iv, ivsize); | ||
1632 | 1588 | ||
1633 | kfree(edesc); | 1589 | kfree(edesc); |
1634 | 1590 | ||
@@ -1723,6 +1679,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq) | |||
1723 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | 1679 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
1724 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1680 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1725 | struct talitos_edesc *edesc; | 1681 | struct talitos_edesc *edesc; |
1682 | unsigned int blocksize = | ||
1683 | crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher)); | ||
1684 | |||
1685 | if (!areq->nbytes) | ||
1686 | return 0; | ||
1687 | |||
1688 | if (areq->nbytes % blocksize) | ||
1689 | return -EINVAL; | ||
1726 | 1690 | ||
1727 | /* allocate extended descriptor */ | 1691 | /* allocate extended descriptor */ |
1728 | edesc = ablkcipher_edesc_alloc(areq, true); | 1692 | edesc = ablkcipher_edesc_alloc(areq, true); |
@@ -1740,6 +1704,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) | |||
1740 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); | 1704 | struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); |
1741 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1705 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1742 | struct talitos_edesc *edesc; | 1706 | struct talitos_edesc *edesc; |
1707 | unsigned int blocksize = | ||
1708 | crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher)); | ||
1709 | |||
1710 | if (!areq->nbytes) | ||
1711 | return 0; | ||
1712 | |||
1713 | if (areq->nbytes % blocksize) | ||
1714 | return -EINVAL; | ||
1743 | 1715 | ||
1744 | /* allocate extended descriptor */ | 1716 | /* allocate extended descriptor */ |
1745 | edesc = ablkcipher_edesc_alloc(areq, false); | 1717 | edesc = ablkcipher_edesc_alloc(areq, false); |
@@ -1759,14 +1731,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev, | |||
1759 | struct talitos_private *priv = dev_get_drvdata(dev); | 1731 | struct talitos_private *priv = dev_get_drvdata(dev); |
1760 | bool is_sec1 = has_ftr_sec1(priv); | 1732 | bool is_sec1 = has_ftr_sec1(priv); |
1761 | struct talitos_desc *desc = &edesc->desc; | 1733 | struct talitos_desc *desc = &edesc->desc; |
1762 | struct talitos_desc *desc2 = desc + 1; | 1734 | struct talitos_desc *desc2 = (struct talitos_desc *) |
1735 | (edesc->buf + edesc->dma_len); | ||
1763 | 1736 | ||
1764 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | 1737 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); |
1765 | if (desc->next_desc && | 1738 | if (desc->next_desc && |
1766 | desc->ptr[5].ptr != desc2->ptr[5].ptr) | 1739 | desc->ptr[5].ptr != desc2->ptr[5].ptr) |
1767 | unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); | 1740 | unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); |
1768 | 1741 | ||
1769 | talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); | 1742 | if (req_ctx->psrc) |
1743 | talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); | ||
1770 | 1744 | ||
1771 | /* When using hashctx-in, must unmap it. */ | 1745 | /* When using hashctx-in, must unmap it. */ |
1772 | if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) | 1746 | if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) |
@@ -1833,7 +1807,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx, | |||
1833 | 1807 | ||
1834 | static int common_nonsnoop_hash(struct talitos_edesc *edesc, | 1808 | static int common_nonsnoop_hash(struct talitos_edesc *edesc, |
1835 | struct ahash_request *areq, unsigned int length, | 1809 | struct ahash_request *areq, unsigned int length, |
1836 | unsigned int offset, | ||
1837 | void (*callback) (struct device *dev, | 1810 | void (*callback) (struct device *dev, |
1838 | struct talitos_desc *desc, | 1811 | struct talitos_desc *desc, |
1839 | void *context, int error)) | 1812 | void *context, int error)) |
@@ -1872,9 +1845,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1872 | 1845 | ||
1873 | sg_count = edesc->src_nents ?: 1; | 1846 | sg_count = edesc->src_nents ?: 1; |
1874 | if (is_sec1 && sg_count > 1) | 1847 | if (is_sec1 && sg_count > 1) |
1875 | sg_pcopy_to_buffer(req_ctx->psrc, sg_count, | 1848 | sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); |
1876 | edesc->buf + sizeof(struct talitos_desc), | ||
1877 | length, req_ctx->nbuf); | ||
1878 | else if (length) | 1849 | else if (length) |
1879 | sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, | 1850 | sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, |
1880 | DMA_TO_DEVICE); | 1851 | DMA_TO_DEVICE); |
@@ -1887,7 +1858,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1887 | DMA_TO_DEVICE); | 1858 | DMA_TO_DEVICE); |
1888 | } else { | 1859 | } else { |
1889 | sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, | 1860 | sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, |
1890 | &desc->ptr[3], sg_count, offset, 0); | 1861 | &desc->ptr[3], sg_count, 0, 0); |
1891 | if (sg_count > 1) | 1862 | if (sg_count > 1) |
1892 | sync_needed = true; | 1863 | sync_needed = true; |
1893 | } | 1864 | } |
@@ -1911,7 +1882,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1911 | talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); | 1882 | talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); |
1912 | 1883 | ||
1913 | if (is_sec1 && req_ctx->nbuf && length) { | 1884 | if (is_sec1 && req_ctx->nbuf && length) { |
1914 | struct talitos_desc *desc2 = desc + 1; | 1885 | struct talitos_desc *desc2 = (struct talitos_desc *) |
1886 | (edesc->buf + edesc->dma_len); | ||
1915 | dma_addr_t next_desc; | 1887 | dma_addr_t next_desc; |
1916 | 1888 | ||
1917 | memset(desc2, 0, sizeof(*desc2)); | 1889 | memset(desc2, 0, sizeof(*desc2)); |
@@ -1932,7 +1904,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | |||
1932 | DMA_TO_DEVICE); | 1904 | DMA_TO_DEVICE); |
1933 | copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); | 1905 | copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); |
1934 | sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, | 1906 | sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, |
1935 | &desc2->ptr[3], sg_count, offset, 0); | 1907 | &desc2->ptr[3], sg_count, 0, 0); |
1936 | if (sg_count > 1) | 1908 | if (sg_count > 1) |
1937 | sync_needed = true; | 1909 | sync_needed = true; |
1938 | copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); | 1910 | copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); |
@@ -2043,7 +2015,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
2043 | struct device *dev = ctx->dev; | 2015 | struct device *dev = ctx->dev; |
2044 | struct talitos_private *priv = dev_get_drvdata(dev); | 2016 | struct talitos_private *priv = dev_get_drvdata(dev); |
2045 | bool is_sec1 = has_ftr_sec1(priv); | 2017 | bool is_sec1 = has_ftr_sec1(priv); |
2046 | int offset = 0; | ||
2047 | u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; | 2018 | u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; |
2048 | 2019 | ||
2049 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { | 2020 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { |
@@ -2083,6 +2054,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
2083 | sg_chain(req_ctx->bufsl, 2, areq->src); | 2054 | sg_chain(req_ctx->bufsl, 2, areq->src); |
2084 | req_ctx->psrc = req_ctx->bufsl; | 2055 | req_ctx->psrc = req_ctx->bufsl; |
2085 | } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { | 2056 | } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { |
2057 | int offset; | ||
2058 | |||
2086 | if (nbytes_to_hash > blocksize) | 2059 | if (nbytes_to_hash > blocksize) |
2087 | offset = blocksize - req_ctx->nbuf; | 2060 | offset = blocksize - req_ctx->nbuf; |
2088 | else | 2061 | else |
@@ -2095,7 +2068,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
2095 | sg_copy_to_buffer(areq->src, nents, | 2068 | sg_copy_to_buffer(areq->src, nents, |
2096 | ctx_buf + req_ctx->nbuf, offset); | 2069 | ctx_buf + req_ctx->nbuf, offset); |
2097 | req_ctx->nbuf += offset; | 2070 | req_ctx->nbuf += offset; |
2098 | req_ctx->psrc = areq->src; | 2071 | req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src, |
2072 | offset); | ||
2099 | } else | 2073 | } else |
2100 | req_ctx->psrc = areq->src; | 2074 | req_ctx->psrc = areq->src; |
2101 | 2075 | ||
@@ -2135,8 +2109,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | |||
2135 | if (ctx->keylen && (req_ctx->first || req_ctx->last)) | 2109 | if (ctx->keylen && (req_ctx->first || req_ctx->last)) |
2136 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; | 2110 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; |
2137 | 2111 | ||
2138 | return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset, | 2112 | return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done); |
2139 | ahash_done); | ||
2140 | } | 2113 | } |
2141 | 2114 | ||
2142 | static int ahash_update(struct ahash_request *areq) | 2115 | static int ahash_update(struct ahash_request *areq) |
@@ -2339,7 +2312,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2339 | .base = { | 2312 | .base = { |
2340 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 2313 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
2341 | .cra_driver_name = "authenc-hmac-sha1-" | 2314 | .cra_driver_name = "authenc-hmac-sha1-" |
2342 | "cbc-aes-talitos", | 2315 | "cbc-aes-talitos-hsna", |
2343 | .cra_blocksize = AES_BLOCK_SIZE, | 2316 | .cra_blocksize = AES_BLOCK_SIZE, |
2344 | .cra_flags = CRYPTO_ALG_ASYNC, | 2317 | .cra_flags = CRYPTO_ALG_ASYNC, |
2345 | }, | 2318 | }, |
@@ -2384,7 +2357,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2384 | .cra_name = "authenc(hmac(sha1)," | 2357 | .cra_name = "authenc(hmac(sha1)," |
2385 | "cbc(des3_ede))", | 2358 | "cbc(des3_ede))", |
2386 | .cra_driver_name = "authenc-hmac-sha1-" | 2359 | .cra_driver_name = "authenc-hmac-sha1-" |
2387 | "cbc-3des-talitos", | 2360 | "cbc-3des-talitos-hsna", |
2388 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2361 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2389 | .cra_flags = CRYPTO_ALG_ASYNC, | 2362 | .cra_flags = CRYPTO_ALG_ASYNC, |
2390 | }, | 2363 | }, |
@@ -2427,7 +2400,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2427 | .base = { | 2400 | .base = { |
2428 | .cra_name = "authenc(hmac(sha224),cbc(aes))", | 2401 | .cra_name = "authenc(hmac(sha224),cbc(aes))", |
2429 | .cra_driver_name = "authenc-hmac-sha224-" | 2402 | .cra_driver_name = "authenc-hmac-sha224-" |
2430 | "cbc-aes-talitos", | 2403 | "cbc-aes-talitos-hsna", |
2431 | .cra_blocksize = AES_BLOCK_SIZE, | 2404 | .cra_blocksize = AES_BLOCK_SIZE, |
2432 | .cra_flags = CRYPTO_ALG_ASYNC, | 2405 | .cra_flags = CRYPTO_ALG_ASYNC, |
2433 | }, | 2406 | }, |
@@ -2472,7 +2445,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2472 | .cra_name = "authenc(hmac(sha224)," | 2445 | .cra_name = "authenc(hmac(sha224)," |
2473 | "cbc(des3_ede))", | 2446 | "cbc(des3_ede))", |
2474 | .cra_driver_name = "authenc-hmac-sha224-" | 2447 | .cra_driver_name = "authenc-hmac-sha224-" |
2475 | "cbc-3des-talitos", | 2448 | "cbc-3des-talitos-hsna", |
2476 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2449 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2477 | .cra_flags = CRYPTO_ALG_ASYNC, | 2450 | .cra_flags = CRYPTO_ALG_ASYNC, |
2478 | }, | 2451 | }, |
@@ -2515,7 +2488,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2515 | .base = { | 2488 | .base = { |
2516 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | 2489 | .cra_name = "authenc(hmac(sha256),cbc(aes))", |
2517 | .cra_driver_name = "authenc-hmac-sha256-" | 2490 | .cra_driver_name = "authenc-hmac-sha256-" |
2518 | "cbc-aes-talitos", | 2491 | "cbc-aes-talitos-hsna", |
2519 | .cra_blocksize = AES_BLOCK_SIZE, | 2492 | .cra_blocksize = AES_BLOCK_SIZE, |
2520 | .cra_flags = CRYPTO_ALG_ASYNC, | 2493 | .cra_flags = CRYPTO_ALG_ASYNC, |
2521 | }, | 2494 | }, |
@@ -2560,7 +2533,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2560 | .cra_name = "authenc(hmac(sha256)," | 2533 | .cra_name = "authenc(hmac(sha256)," |
2561 | "cbc(des3_ede))", | 2534 | "cbc(des3_ede))", |
2562 | .cra_driver_name = "authenc-hmac-sha256-" | 2535 | .cra_driver_name = "authenc-hmac-sha256-" |
2563 | "cbc-3des-talitos", | 2536 | "cbc-3des-talitos-hsna", |
2564 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2537 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2565 | .cra_flags = CRYPTO_ALG_ASYNC, | 2538 | .cra_flags = CRYPTO_ALG_ASYNC, |
2566 | }, | 2539 | }, |
@@ -2689,7 +2662,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2689 | .base = { | 2662 | .base = { |
2690 | .cra_name = "authenc(hmac(md5),cbc(aes))", | 2663 | .cra_name = "authenc(hmac(md5),cbc(aes))", |
2691 | .cra_driver_name = "authenc-hmac-md5-" | 2664 | .cra_driver_name = "authenc-hmac-md5-" |
2692 | "cbc-aes-talitos", | 2665 | "cbc-aes-talitos-hsna", |
2693 | .cra_blocksize = AES_BLOCK_SIZE, | 2666 | .cra_blocksize = AES_BLOCK_SIZE, |
2694 | .cra_flags = CRYPTO_ALG_ASYNC, | 2667 | .cra_flags = CRYPTO_ALG_ASYNC, |
2695 | }, | 2668 | }, |
@@ -2732,7 +2705,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2732 | .base = { | 2705 | .base = { |
2733 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", | 2706 | .cra_name = "authenc(hmac(md5),cbc(des3_ede))", |
2734 | .cra_driver_name = "authenc-hmac-md5-" | 2707 | .cra_driver_name = "authenc-hmac-md5-" |
2735 | "cbc-3des-talitos", | 2708 | "cbc-3des-talitos-hsna", |
2736 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 2709 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
2737 | .cra_flags = CRYPTO_ALG_ASYNC, | 2710 | .cra_flags = CRYPTO_ALG_ASYNC, |
2738 | }, | 2711 | }, |
@@ -2760,7 +2733,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2760 | .cra_ablkcipher = { | 2733 | .cra_ablkcipher = { |
2761 | .min_keysize = AES_MIN_KEY_SIZE, | 2734 | .min_keysize = AES_MIN_KEY_SIZE, |
2762 | .max_keysize = AES_MAX_KEY_SIZE, | 2735 | .max_keysize = AES_MAX_KEY_SIZE, |
2763 | .ivsize = AES_BLOCK_SIZE, | 2736 | .setkey = ablkcipher_aes_setkey, |
2764 | } | 2737 | } |
2765 | }, | 2738 | }, |
2766 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2739 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2777,6 +2750,7 @@ static struct talitos_alg_template driver_algs[] = { | |||
2777 | .min_keysize = AES_MIN_KEY_SIZE, | 2750 | .min_keysize = AES_MIN_KEY_SIZE, |
2778 | .max_keysize = AES_MAX_KEY_SIZE, | 2751 | .max_keysize = AES_MAX_KEY_SIZE, |
2779 | .ivsize = AES_BLOCK_SIZE, | 2752 | .ivsize = AES_BLOCK_SIZE, |
2753 | .setkey = ablkcipher_aes_setkey, | ||
2780 | } | 2754 | } |
2781 | }, | 2755 | }, |
2782 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | | 2756 | .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | |
@@ -2787,13 +2761,14 @@ static struct talitos_alg_template driver_algs[] = { | |||
2787 | .alg.crypto = { | 2761 | .alg.crypto = { |
2788 | .cra_name = "ctr(aes)", | 2762 | .cra_name = "ctr(aes)", |
2789 | .cra_driver_name = "ctr-aes-talitos", | 2763 | .cra_driver_name = "ctr-aes-talitos", |
2790 | .cra_blocksize = AES_BLOCK_SIZE, | 2764 | .cra_blocksize = 1, |
2791 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 2765 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
2792 | CRYPTO_ALG_ASYNC, | 2766 | CRYPTO_ALG_ASYNC, |
2793 | .cra_ablkcipher = { | 2767 | .cra_ablkcipher = { |
2794 | .min_keysize = AES_MIN_KEY_SIZE, | 2768 | .min_keysize = AES_MIN_KEY_SIZE, |
2795 | .max_keysize = AES_MAX_KEY_SIZE, | 2769 | .max_keysize = AES_MAX_KEY_SIZE, |
2796 | .ivsize = AES_BLOCK_SIZE, | 2770 | .ivsize = AES_BLOCK_SIZE, |
2771 | .setkey = ablkcipher_aes_setkey, | ||
2797 | } | 2772 | } |
2798 | }, | 2773 | }, |
2799 | .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | | 2774 | .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | |
@@ -2810,7 +2785,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2810 | .cra_ablkcipher = { | 2785 | .cra_ablkcipher = { |
2811 | .min_keysize = DES_KEY_SIZE, | 2786 | .min_keysize = DES_KEY_SIZE, |
2812 | .max_keysize = DES_KEY_SIZE, | 2787 | .max_keysize = DES_KEY_SIZE, |
2813 | .ivsize = DES_BLOCK_SIZE, | ||
2814 | .setkey = ablkcipher_des_setkey, | 2788 | .setkey = ablkcipher_des_setkey, |
2815 | } | 2789 | } |
2816 | }, | 2790 | }, |
@@ -2845,7 +2819,6 @@ static struct talitos_alg_template driver_algs[] = { | |||
2845 | .cra_ablkcipher = { | 2819 | .cra_ablkcipher = { |
2846 | .min_keysize = DES3_EDE_KEY_SIZE, | 2820 | .min_keysize = DES3_EDE_KEY_SIZE, |
2847 | .max_keysize = DES3_EDE_KEY_SIZE, | 2821 | .max_keysize = DES3_EDE_KEY_SIZE, |
2848 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
2849 | .setkey = ablkcipher_des3_setkey, | 2822 | .setkey = ablkcipher_des3_setkey, |
2850 | } | 2823 | } |
2851 | }, | 2824 | }, |
@@ -3270,7 +3243,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, | |||
3270 | alg->cra_priority = t_alg->algt.priority; | 3243 | alg->cra_priority = t_alg->algt.priority; |
3271 | else | 3244 | else |
3272 | alg->cra_priority = TALITOS_CRA_PRIORITY; | 3245 | alg->cra_priority = TALITOS_CRA_PRIORITY; |
3273 | alg->cra_alignmask = 0; | 3246 | if (has_ftr_sec1(priv)) |
3247 | alg->cra_alignmask = 3; | ||
3248 | else | ||
3249 | alg->cra_alignmask = 0; | ||
3274 | alg->cra_ctxsize = sizeof(struct talitos_ctx); | 3250 | alg->cra_ctxsize = sizeof(struct talitos_ctx); |
3275 | alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; | 3251 | alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; |
3276 | 3252 | ||
@@ -3418,7 +3394,7 @@ static int talitos_probe(struct platform_device *ofdev) | |||
3418 | if (err) | 3394 | if (err) |
3419 | goto err_out; | 3395 | goto err_out; |
3420 | 3396 | ||
3421 | if (of_device_is_compatible(np, "fsl,sec1.0")) { | 3397 | if (has_ftr_sec1(priv)) { |
3422 | if (priv->num_channels == 1) | 3398 | if (priv->num_channels == 1) |
3423 | tasklet_init(&priv->done_task[0], talitos1_done_ch0, | 3399 | tasklet_init(&priv->done_task[0], talitos1_done_ch0, |
3424 | (unsigned long)dev); | 3400 | (unsigned long)dev); |
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index a65a63e0d6c1..1469b956948a 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h | |||
@@ -1,31 +1,8 @@ | |||
1 | /* SPDX-License-Identifier: BSD-3-Clause */ | ||
1 | /* | 2 | /* |
2 | * Freescale SEC (talitos) device register and descriptor header defines | 3 | * Freescale SEC (talitos) device register and descriptor header defines |
3 | * | 4 | * |
4 | * Copyright (c) 2006-2011 Freescale Semiconductor, Inc. | 5 | * Copyright (c) 2006-2011 Freescale Semiconductor, Inc. |
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions | ||
8 | * are met: | ||
9 | * | ||
10 | * 1. Redistributions of source code must retain the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer. | ||
12 | * 2. Redistributions in binary form must reproduce the above copyright | ||
13 | * notice, this list of conditions and the following disclaimer in the | ||
14 | * documentation and/or other materials provided with the distribution. | ||
15 | * 3. The name of the author may not be used to endorse or promote products | ||
16 | * derived from this software without specific prior written permission. | ||
17 | * | ||
18 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | ||
19 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | ||
20 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | ||
21 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
22 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
23 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
24 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
25 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
26 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
27 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
28 | * | ||
29 | */ | 6 | */ |
30 | 7 | ||
31 | #define TALITOS_TIMEOUT 100000 | 8 | #define TALITOS_TIMEOUT 100000 |
@@ -65,6 +42,34 @@ struct talitos_desc { | |||
65 | 42 | ||
66 | #define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) | 43 | #define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) |
67 | 44 | ||
45 | /* | ||
46 | * talitos_edesc - s/w-extended descriptor | ||
47 | * @src_nents: number of segments in input scatterlist | ||
48 | * @dst_nents: number of segments in output scatterlist | ||
49 | * @iv_dma: dma address of iv for checking continuity and link table | ||
50 | * @dma_len: length of dma mapped link_tbl space | ||
51 | * @dma_link_tbl: bus physical address of link_tbl/buf | ||
52 | * @desc: h/w descriptor | ||
53 | * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) | ||
54 | * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) | ||
55 | * | ||
56 | * if decrypting (with authcheck), or either one of src_nents or dst_nents | ||
57 | * is greater than 1, an integrity check value is concatenated to the end | ||
58 | * of link_tbl data | ||
59 | */ | ||
60 | struct talitos_edesc { | ||
61 | int src_nents; | ||
62 | int dst_nents; | ||
63 | dma_addr_t iv_dma; | ||
64 | int dma_len; | ||
65 | dma_addr_t dma_link_tbl; | ||
66 | struct talitos_desc desc; | ||
67 | union { | ||
68 | struct talitos_ptr link_tbl[0]; | ||
69 | u8 buf[0]; | ||
70 | }; | ||
71 | }; | ||
72 | |||
68 | /** | 73 | /** |
69 | * talitos_request - descriptor submission request | 74 | * talitos_request - descriptor submission request |
70 | * @desc: descriptor pointer (kernel virtual) | 75 | * @desc: descriptor pointer (kernel virtual) |
@@ -150,12 +155,6 @@ struct talitos_private { | |||
150 | bool rng_registered; | 155 | bool rng_registered; |
151 | }; | 156 | }; |
152 | 157 | ||
153 | extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | ||
154 | void (*callback)(struct device *dev, | ||
155 | struct talitos_desc *desc, | ||
156 | void *context, int error), | ||
157 | void *context); | ||
158 | |||
159 | /* .features flag */ | 158 | /* .features flag */ |
160 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 | 159 | #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 |
161 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 | 160 | #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 |
@@ -170,13 +169,11 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | |||
170 | */ | 169 | */ |
171 | static inline bool has_ftr_sec1(struct talitos_private *priv) | 170 | static inline bool has_ftr_sec1(struct talitos_private *priv) |
172 | { | 171 | { |
173 | #if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2) | 172 | if (IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1) && |
174 | return priv->features & TALITOS_FTR_SEC1 ? true : false; | 173 | IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS2)) |
175 | #elif defined(CONFIG_CRYPTO_DEV_TALITOS1) | 174 | return priv->features & TALITOS_FTR_SEC1; |
176 | return true; | 175 | |
177 | #else | 176 | return IS_ENABLED(CONFIG_CRYPTO_DEV_TALITOS1); |
178 | return false; | ||
179 | #endif | ||
180 | } | 177 | } |
181 | 178 | ||
182 | /* | 179 | /* |
@@ -412,5 +409,5 @@ static inline bool has_ftr_sec1(struct talitos_private *priv) | |||
412 | 409 | ||
413 | /* link table extent field bits */ | 410 | /* link table extent field bits */ |
414 | #define DESC_PTR_LNKTBL_JUMP 0x80 | 411 | #define DESC_PTR_LNKTBL_JUMP 0x80 |
415 | #define DESC_PTR_LNKTBL_RETURN 0x02 | 412 | #define DESC_PTR_LNKTBL_RET 0x02 |
416 | #define DESC_PTR_LNKTBL_NEXT 0x01 | 413 | #define DESC_PTR_LNKTBL_NEXT 0x01 |
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index c7e515a1bc97..d88084447f1c 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
@@ -7,64 +7,52 @@ | |||
7 | * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> | 7 | * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/err.h> | ||
12 | #include <linux/crypto.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <asm/simd.h> | 10 | #include <asm/simd.h> |
15 | #include <asm/switch_to.h> | 11 | #include <asm/switch_to.h> |
16 | #include <crypto/aes.h> | 12 | #include <crypto/aes.h> |
17 | #include <crypto/internal/simd.h> | 13 | #include <crypto/internal/simd.h> |
18 | #include <crypto/scatterwalk.h> | 14 | #include <crypto/internal/skcipher.h> |
19 | #include <crypto/skcipher.h> | ||
20 | 15 | ||
21 | #include "aesp8-ppc.h" | 16 | #include "aesp8-ppc.h" |
22 | 17 | ||
23 | struct p8_aes_cbc_ctx { | 18 | struct p8_aes_cbc_ctx { |
24 | struct crypto_sync_skcipher *fallback; | 19 | struct crypto_skcipher *fallback; |
25 | struct aes_key enc_key; | 20 | struct aes_key enc_key; |
26 | struct aes_key dec_key; | 21 | struct aes_key dec_key; |
27 | }; | 22 | }; |
28 | 23 | ||
29 | static int p8_aes_cbc_init(struct crypto_tfm *tfm) | 24 | static int p8_aes_cbc_init(struct crypto_skcipher *tfm) |
30 | { | 25 | { |
31 | const char *alg = crypto_tfm_alg_name(tfm); | 26 | struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
32 | struct crypto_sync_skcipher *fallback; | 27 | struct crypto_skcipher *fallback; |
33 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | ||
34 | |||
35 | fallback = crypto_alloc_sync_skcipher(alg, 0, | ||
36 | CRYPTO_ALG_NEED_FALLBACK); | ||
37 | 28 | ||
29 | fallback = crypto_alloc_skcipher("cbc(aes)", 0, | ||
30 | CRYPTO_ALG_NEED_FALLBACK | | ||
31 | CRYPTO_ALG_ASYNC); | ||
38 | if (IS_ERR(fallback)) { | 32 | if (IS_ERR(fallback)) { |
39 | printk(KERN_ERR | 33 | pr_err("Failed to allocate cbc(aes) fallback: %ld\n", |
40 | "Failed to allocate transformation for '%s': %ld\n", | 34 | PTR_ERR(fallback)); |
41 | alg, PTR_ERR(fallback)); | ||
42 | return PTR_ERR(fallback); | 35 | return PTR_ERR(fallback); |
43 | } | 36 | } |
44 | 37 | ||
45 | crypto_sync_skcipher_set_flags( | 38 | crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + |
46 | fallback, | 39 | crypto_skcipher_reqsize(fallback)); |
47 | crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); | ||
48 | ctx->fallback = fallback; | 40 | ctx->fallback = fallback; |
49 | |||
50 | return 0; | 41 | return 0; |
51 | } | 42 | } |
52 | 43 | ||
53 | static void p8_aes_cbc_exit(struct crypto_tfm *tfm) | 44 | static void p8_aes_cbc_exit(struct crypto_skcipher *tfm) |
54 | { | 45 | { |
55 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 46 | struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); |
56 | 47 | ||
57 | if (ctx->fallback) { | 48 | crypto_free_skcipher(ctx->fallback); |
58 | crypto_free_sync_skcipher(ctx->fallback); | ||
59 | ctx->fallback = NULL; | ||
60 | } | ||
61 | } | 49 | } |
62 | 50 | ||
63 | static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, | 51 | static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, |
64 | unsigned int keylen) | 52 | unsigned int keylen) |
65 | { | 53 | { |
54 | struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
66 | int ret; | 55 | int ret; |
67 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | ||
68 | 56 | ||
69 | preempt_disable(); | 57 | preempt_disable(); |
70 | pagefault_disable(); | 58 | pagefault_disable(); |
@@ -75,108 +63,71 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
75 | pagefault_enable(); | 63 | pagefault_enable(); |
76 | preempt_enable(); | 64 | preempt_enable(); |
77 | 65 | ||
78 | ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); | 66 | ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen); |
79 | 67 | ||
80 | return ret ? -EINVAL : 0; | 68 | return ret ? -EINVAL : 0; |
81 | } | 69 | } |
82 | 70 | ||
83 | static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, | 71 | static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc) |
84 | struct scatterlist *dst, | ||
85 | struct scatterlist *src, unsigned int nbytes) | ||
86 | { | 72 | { |
73 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
74 | const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
75 | struct skcipher_walk walk; | ||
76 | unsigned int nbytes; | ||
87 | int ret; | 77 | int ret; |
88 | struct blkcipher_walk walk; | ||
89 | struct p8_aes_cbc_ctx *ctx = | ||
90 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); | ||
91 | 78 | ||
92 | if (!crypto_simd_usable()) { | 79 | if (!crypto_simd_usable()) { |
93 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); | 80 | struct skcipher_request *subreq = skcipher_request_ctx(req); |
94 | skcipher_request_set_sync_tfm(req, ctx->fallback); | 81 | |
95 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); | 82 | *subreq = *req; |
96 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); | 83 | skcipher_request_set_tfm(subreq, ctx->fallback); |
97 | ret = crypto_skcipher_encrypt(req); | 84 | return enc ? crypto_skcipher_encrypt(subreq) : |
98 | skcipher_request_zero(req); | 85 | crypto_skcipher_decrypt(subreq); |
99 | } else { | ||
100 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
101 | ret = blkcipher_walk_virt(desc, &walk); | ||
102 | while ((nbytes = walk.nbytes)) { | ||
103 | preempt_disable(); | ||
104 | pagefault_disable(); | ||
105 | enable_kernel_vsx(); | ||
106 | aes_p8_cbc_encrypt(walk.src.virt.addr, | ||
107 | walk.dst.virt.addr, | ||
108 | nbytes & AES_BLOCK_MASK, | ||
109 | &ctx->enc_key, walk.iv, 1); | ||
110 | disable_kernel_vsx(); | ||
111 | pagefault_enable(); | ||
112 | preempt_enable(); | ||
113 | |||
114 | nbytes &= AES_BLOCK_SIZE - 1; | ||
115 | ret = blkcipher_walk_done(desc, &walk, nbytes); | ||
116 | } | ||
117 | } | 86 | } |
118 | 87 | ||
88 | ret = skcipher_walk_virt(&walk, req, false); | ||
89 | while ((nbytes = walk.nbytes) != 0) { | ||
90 | preempt_disable(); | ||
91 | pagefault_disable(); | ||
92 | enable_kernel_vsx(); | ||
93 | aes_p8_cbc_encrypt(walk.src.virt.addr, | ||
94 | walk.dst.virt.addr, | ||
95 | round_down(nbytes, AES_BLOCK_SIZE), | ||
96 | enc ? &ctx->enc_key : &ctx->dec_key, | ||
97 | walk.iv, enc); | ||
98 | disable_kernel_vsx(); | ||
99 | pagefault_enable(); | ||
100 | preempt_enable(); | ||
101 | |||
102 | ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); | ||
103 | } | ||
119 | return ret; | 104 | return ret; |
120 | } | 105 | } |
121 | 106 | ||
122 | static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, | 107 | static int p8_aes_cbc_encrypt(struct skcipher_request *req) |
123 | struct scatterlist *dst, | ||
124 | struct scatterlist *src, unsigned int nbytes) | ||
125 | { | 108 | { |
126 | int ret; | 109 | return p8_aes_cbc_crypt(req, 1); |
127 | struct blkcipher_walk walk; | ||
128 | struct p8_aes_cbc_ctx *ctx = | ||
129 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); | ||
130 | |||
131 | if (!crypto_simd_usable()) { | ||
132 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); | ||
133 | skcipher_request_set_sync_tfm(req, ctx->fallback); | ||
134 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); | ||
135 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); | ||
136 | ret = crypto_skcipher_decrypt(req); | ||
137 | skcipher_request_zero(req); | ||
138 | } else { | ||
139 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
140 | ret = blkcipher_walk_virt(desc, &walk); | ||
141 | while ((nbytes = walk.nbytes)) { | ||
142 | preempt_disable(); | ||
143 | pagefault_disable(); | ||
144 | enable_kernel_vsx(); | ||
145 | aes_p8_cbc_encrypt(walk.src.virt.addr, | ||
146 | walk.dst.virt.addr, | ||
147 | nbytes & AES_BLOCK_MASK, | ||
148 | &ctx->dec_key, walk.iv, 0); | ||
149 | disable_kernel_vsx(); | ||
150 | pagefault_enable(); | ||
151 | preempt_enable(); | ||
152 | |||
153 | nbytes &= AES_BLOCK_SIZE - 1; | ||
154 | ret = blkcipher_walk_done(desc, &walk, nbytes); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | return ret; | ||
159 | } | 110 | } |
160 | 111 | ||
112 | static int p8_aes_cbc_decrypt(struct skcipher_request *req) | ||
113 | { | ||
114 | return p8_aes_cbc_crypt(req, 0); | ||
115 | } | ||
161 | 116 | ||
162 | struct crypto_alg p8_aes_cbc_alg = { | 117 | struct skcipher_alg p8_aes_cbc_alg = { |
163 | .cra_name = "cbc(aes)", | 118 | .base.cra_name = "cbc(aes)", |
164 | .cra_driver_name = "p8_aes_cbc", | 119 | .base.cra_driver_name = "p8_aes_cbc", |
165 | .cra_module = THIS_MODULE, | 120 | .base.cra_module = THIS_MODULE, |
166 | .cra_priority = 2000, | 121 | .base.cra_priority = 2000, |
167 | .cra_type = &crypto_blkcipher_type, | 122 | .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
168 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, | 123 | .base.cra_blocksize = AES_BLOCK_SIZE, |
169 | .cra_alignmask = 0, | 124 | .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), |
170 | .cra_blocksize = AES_BLOCK_SIZE, | 125 | .setkey = p8_aes_cbc_setkey, |
171 | .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), | 126 | .encrypt = p8_aes_cbc_encrypt, |
172 | .cra_init = p8_aes_cbc_init, | 127 | .decrypt = p8_aes_cbc_decrypt, |
173 | .cra_exit = p8_aes_cbc_exit, | 128 | .init = p8_aes_cbc_init, |
174 | .cra_blkcipher = { | 129 | .exit = p8_aes_cbc_exit, |
175 | .ivsize = AES_BLOCK_SIZE, | 130 | .min_keysize = AES_MIN_KEY_SIZE, |
176 | .min_keysize = AES_MIN_KEY_SIZE, | 131 | .max_keysize = AES_MAX_KEY_SIZE, |
177 | .max_keysize = AES_MAX_KEY_SIZE, | 132 | .ivsize = AES_BLOCK_SIZE, |
178 | .setkey = p8_aes_cbc_setkey, | ||
179 | .encrypt = p8_aes_cbc_encrypt, | ||
180 | .decrypt = p8_aes_cbc_decrypt, | ||
181 | }, | ||
182 | }; | 133 | }; |
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index dd017ef42fa9..79ba062ee1c1 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c | |||
@@ -7,62 +7,51 @@ | |||
7 | * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> | 7 | * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/err.h> | ||
12 | #include <linux/crypto.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <asm/simd.h> | 10 | #include <asm/simd.h> |
15 | #include <asm/switch_to.h> | 11 | #include <asm/switch_to.h> |
16 | #include <crypto/aes.h> | 12 | #include <crypto/aes.h> |
17 | #include <crypto/internal/simd.h> | 13 | #include <crypto/internal/simd.h> |
18 | #include <crypto/scatterwalk.h> | 14 | #include <crypto/internal/skcipher.h> |
19 | #include <crypto/skcipher.h> | ||
20 | 15 | ||
21 | #include "aesp8-ppc.h" | 16 | #include "aesp8-ppc.h" |
22 | 17 | ||
23 | struct p8_aes_ctr_ctx { | 18 | struct p8_aes_ctr_ctx { |
24 | struct crypto_sync_skcipher *fallback; | 19 | struct crypto_skcipher *fallback; |
25 | struct aes_key enc_key; | 20 | struct aes_key enc_key; |
26 | }; | 21 | }; |
27 | 22 | ||
28 | static int p8_aes_ctr_init(struct crypto_tfm *tfm) | 23 | static int p8_aes_ctr_init(struct crypto_skcipher *tfm) |
29 | { | 24 | { |
30 | const char *alg = crypto_tfm_alg_name(tfm); | 25 | struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); |
31 | struct crypto_sync_skcipher *fallback; | 26 | struct crypto_skcipher *fallback; |
32 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | ||
33 | 27 | ||
34 | fallback = crypto_alloc_sync_skcipher(alg, 0, | 28 | fallback = crypto_alloc_skcipher("ctr(aes)", 0, |
35 | CRYPTO_ALG_NEED_FALLBACK); | 29 | CRYPTO_ALG_NEED_FALLBACK | |
30 | CRYPTO_ALG_ASYNC); | ||
36 | if (IS_ERR(fallback)) { | 31 | if (IS_ERR(fallback)) { |
37 | printk(KERN_ERR | 32 | pr_err("Failed to allocate ctr(aes) fallback: %ld\n", |
38 | "Failed to allocate transformation for '%s': %ld\n", | 33 | PTR_ERR(fallback)); |
39 | alg, PTR_ERR(fallback)); | ||
40 | return PTR_ERR(fallback); | 34 | return PTR_ERR(fallback); |
41 | } | 35 | } |
42 | 36 | ||
43 | crypto_sync_skcipher_set_flags( | 37 | crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + |
44 | fallback, | 38 | crypto_skcipher_reqsize(fallback)); |
45 | crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); | ||
46 | ctx->fallback = fallback; | 39 | ctx->fallback = fallback; |
47 | |||
48 | return 0; | 40 | return 0; |
49 | } | 41 | } |
50 | 42 | ||
51 | static void p8_aes_ctr_exit(struct crypto_tfm *tfm) | 43 | static void p8_aes_ctr_exit(struct crypto_skcipher *tfm) |
52 | { | 44 | { |
53 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 45 | struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); |
54 | 46 | ||
55 | if (ctx->fallback) { | 47 | crypto_free_skcipher(ctx->fallback); |
56 | crypto_free_sync_skcipher(ctx->fallback); | ||
57 | ctx->fallback = NULL; | ||
58 | } | ||
59 | } | 48 | } |
60 | 49 | ||
61 | static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, | 50 | static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key, |
62 | unsigned int keylen) | 51 | unsigned int keylen) |
63 | { | 52 | { |
53 | struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
64 | int ret; | 54 | int ret; |
65 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | ||
66 | 55 | ||
67 | preempt_disable(); | 56 | preempt_disable(); |
68 | pagefault_disable(); | 57 | pagefault_disable(); |
@@ -72,13 +61,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
72 | pagefault_enable(); | 61 | pagefault_enable(); |
73 | preempt_enable(); | 62 | preempt_enable(); |
74 | 63 | ||
75 | ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); | 64 | ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen); |
76 | 65 | ||
77 | return ret ? -EINVAL : 0; | 66 | return ret ? -EINVAL : 0; |
78 | } | 67 | } |
79 | 68 | ||
80 | static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, | 69 | static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx, |
81 | struct blkcipher_walk *walk) | 70 | struct skcipher_walk *walk) |
82 | { | 71 | { |
83 | u8 *ctrblk = walk->iv; | 72 | u8 *ctrblk = walk->iv; |
84 | u8 keystream[AES_BLOCK_SIZE]; | 73 | u8 keystream[AES_BLOCK_SIZE]; |
@@ -98,77 +87,63 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, | |||
98 | crypto_inc(ctrblk, AES_BLOCK_SIZE); | 87 | crypto_inc(ctrblk, AES_BLOCK_SIZE); |
99 | } | 88 | } |
100 | 89 | ||
101 | static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, | 90 | static int p8_aes_ctr_crypt(struct skcipher_request *req) |
102 | struct scatterlist *dst, | ||
103 | struct scatterlist *src, unsigned int nbytes) | ||
104 | { | 91 | { |
92 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
93 | const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
94 | struct skcipher_walk walk; | ||
95 | unsigned int nbytes; | ||
105 | int ret; | 96 | int ret; |
106 | u64 inc; | ||
107 | struct blkcipher_walk walk; | ||
108 | struct p8_aes_ctr_ctx *ctx = | ||
109 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); | ||
110 | 97 | ||
111 | if (!crypto_simd_usable()) { | 98 | if (!crypto_simd_usable()) { |
112 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); | 99 | struct skcipher_request *subreq = skcipher_request_ctx(req); |
113 | skcipher_request_set_sync_tfm(req, ctx->fallback); | 100 | |
114 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); | 101 | *subreq = *req; |
115 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); | 102 | skcipher_request_set_tfm(subreq, ctx->fallback); |
116 | ret = crypto_skcipher_encrypt(req); | 103 | return crypto_skcipher_encrypt(subreq); |
117 | skcipher_request_zero(req); | ||
118 | } else { | ||
119 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
120 | ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); | ||
121 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { | ||
122 | preempt_disable(); | ||
123 | pagefault_disable(); | ||
124 | enable_kernel_vsx(); | ||
125 | aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, | ||
126 | walk.dst.virt.addr, | ||
127 | (nbytes & | ||
128 | AES_BLOCK_MASK) / | ||
129 | AES_BLOCK_SIZE, | ||
130 | &ctx->enc_key, | ||
131 | walk.iv); | ||
132 | disable_kernel_vsx(); | ||
133 | pagefault_enable(); | ||
134 | preempt_enable(); | ||
135 | |||
136 | /* We need to update IV mostly for last bytes/round */ | ||
137 | inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE; | ||
138 | if (inc > 0) | ||
139 | while (inc--) | ||
140 | crypto_inc(walk.iv, AES_BLOCK_SIZE); | ||
141 | |||
142 | nbytes &= AES_BLOCK_SIZE - 1; | ||
143 | ret = blkcipher_walk_done(desc, &walk, nbytes); | ||
144 | } | ||
145 | if (walk.nbytes) { | ||
146 | p8_aes_ctr_final(ctx, &walk); | ||
147 | ret = blkcipher_walk_done(desc, &walk, 0); | ||
148 | } | ||
149 | } | 104 | } |
150 | 105 | ||
106 | ret = skcipher_walk_virt(&walk, req, false); | ||
107 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { | ||
108 | preempt_disable(); | ||
109 | pagefault_disable(); | ||
110 | enable_kernel_vsx(); | ||
111 | aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, | ||
112 | walk.dst.virt.addr, | ||
113 | nbytes / AES_BLOCK_SIZE, | ||
114 | &ctx->enc_key, walk.iv); | ||
115 | disable_kernel_vsx(); | ||
116 | pagefault_enable(); | ||
117 | preempt_enable(); | ||
118 | |||
119 | do { | ||
120 | crypto_inc(walk.iv, AES_BLOCK_SIZE); | ||
121 | } while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE); | ||
122 | |||
123 | ret = skcipher_walk_done(&walk, nbytes); | ||
124 | } | ||
125 | if (nbytes) { | ||
126 | p8_aes_ctr_final(ctx, &walk); | ||
127 | ret = skcipher_walk_done(&walk, 0); | ||
128 | } | ||
151 | return ret; | 129 | return ret; |
152 | } | 130 | } |
153 | 131 | ||
154 | struct crypto_alg p8_aes_ctr_alg = { | 132 | struct skcipher_alg p8_aes_ctr_alg = { |
155 | .cra_name = "ctr(aes)", | 133 | .base.cra_name = "ctr(aes)", |
156 | .cra_driver_name = "p8_aes_ctr", | 134 | .base.cra_driver_name = "p8_aes_ctr", |
157 | .cra_module = THIS_MODULE, | 135 | .base.cra_module = THIS_MODULE, |
158 | .cra_priority = 2000, | 136 | .base.cra_priority = 2000, |
159 | .cra_type = &crypto_blkcipher_type, | 137 | .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
160 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, | 138 | .base.cra_blocksize = 1, |
161 | .cra_alignmask = 0, | 139 | .base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), |
162 | .cra_blocksize = 1, | 140 | .setkey = p8_aes_ctr_setkey, |
163 | .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), | 141 | .encrypt = p8_aes_ctr_crypt, |
164 | .cra_init = p8_aes_ctr_init, | 142 | .decrypt = p8_aes_ctr_crypt, |
165 | .cra_exit = p8_aes_ctr_exit, | 143 | .init = p8_aes_ctr_init, |
166 | .cra_blkcipher = { | 144 | .exit = p8_aes_ctr_exit, |
167 | .ivsize = AES_BLOCK_SIZE, | 145 | .min_keysize = AES_MIN_KEY_SIZE, |
168 | .min_keysize = AES_MIN_KEY_SIZE, | 146 | .max_keysize = AES_MAX_KEY_SIZE, |
169 | .max_keysize = AES_MAX_KEY_SIZE, | 147 | .ivsize = AES_BLOCK_SIZE, |
170 | .setkey = p8_aes_ctr_setkey, | 148 | .chunksize = AES_BLOCK_SIZE, |
171 | .encrypt = p8_aes_ctr_crypt, | ||
172 | .decrypt = p8_aes_ctr_crypt, | ||
173 | }, | ||
174 | }; | 149 | }; |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 536167e737a0..49f7258045fa 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
@@ -7,67 +7,56 @@ | |||
7 | * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> | 7 | * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/err.h> | ||
12 | #include <linux/crypto.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <asm/simd.h> | 10 | #include <asm/simd.h> |
15 | #include <asm/switch_to.h> | 11 | #include <asm/switch_to.h> |
16 | #include <crypto/aes.h> | 12 | #include <crypto/aes.h> |
17 | #include <crypto/internal/simd.h> | 13 | #include <crypto/internal/simd.h> |
18 | #include <crypto/scatterwalk.h> | 14 | #include <crypto/internal/skcipher.h> |
19 | #include <crypto/xts.h> | 15 | #include <crypto/xts.h> |
20 | #include <crypto/skcipher.h> | ||
21 | 16 | ||
22 | #include "aesp8-ppc.h" | 17 | #include "aesp8-ppc.h" |
23 | 18 | ||
24 | struct p8_aes_xts_ctx { | 19 | struct p8_aes_xts_ctx { |
25 | struct crypto_sync_skcipher *fallback; | 20 | struct crypto_skcipher *fallback; |
26 | struct aes_key enc_key; | 21 | struct aes_key enc_key; |
27 | struct aes_key dec_key; | 22 | struct aes_key dec_key; |
28 | struct aes_key tweak_key; | 23 | struct aes_key tweak_key; |
29 | }; | 24 | }; |
30 | 25 | ||
31 | static int p8_aes_xts_init(struct crypto_tfm *tfm) | 26 | static int p8_aes_xts_init(struct crypto_skcipher *tfm) |
32 | { | 27 | { |
33 | const char *alg = crypto_tfm_alg_name(tfm); | 28 | struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
34 | struct crypto_sync_skcipher *fallback; | 29 | struct crypto_skcipher *fallback; |
35 | struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); | ||
36 | 30 | ||
37 | fallback = crypto_alloc_sync_skcipher(alg, 0, | 31 | fallback = crypto_alloc_skcipher("xts(aes)", 0, |
38 | CRYPTO_ALG_NEED_FALLBACK); | 32 | CRYPTO_ALG_NEED_FALLBACK | |
33 | CRYPTO_ALG_ASYNC); | ||
39 | if (IS_ERR(fallback)) { | 34 | if (IS_ERR(fallback)) { |
40 | printk(KERN_ERR | 35 | pr_err("Failed to allocate xts(aes) fallback: %ld\n", |
41 | "Failed to allocate transformation for '%s': %ld\n", | 36 | PTR_ERR(fallback)); |
42 | alg, PTR_ERR(fallback)); | ||
43 | return PTR_ERR(fallback); | 37 | return PTR_ERR(fallback); |
44 | } | 38 | } |
45 | 39 | ||
46 | crypto_sync_skcipher_set_flags( | 40 | crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + |
47 | fallback, | 41 | crypto_skcipher_reqsize(fallback)); |
48 | crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); | ||
49 | ctx->fallback = fallback; | 42 | ctx->fallback = fallback; |
50 | |||
51 | return 0; | 43 | return 0; |
52 | } | 44 | } |
53 | 45 | ||
54 | static void p8_aes_xts_exit(struct crypto_tfm *tfm) | 46 | static void p8_aes_xts_exit(struct crypto_skcipher *tfm) |
55 | { | 47 | { |
56 | struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); | 48 | struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); |
57 | 49 | ||
58 | if (ctx->fallback) { | 50 | crypto_free_skcipher(ctx->fallback); |
59 | crypto_free_sync_skcipher(ctx->fallback); | ||
60 | ctx->fallback = NULL; | ||
61 | } | ||
62 | } | 51 | } |
63 | 52 | ||
64 | static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key, | 53 | static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, |
65 | unsigned int keylen) | 54 | unsigned int keylen) |
66 | { | 55 | { |
56 | struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
67 | int ret; | 57 | int ret; |
68 | struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); | ||
69 | 58 | ||
70 | ret = xts_check_key(tfm, key, keylen); | 59 | ret = xts_verify_key(tfm, key, keylen); |
71 | if (ret) | 60 | if (ret) |
72 | return ret; | 61 | return ret; |
73 | 62 | ||
@@ -81,100 +70,90 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key, | |||
81 | pagefault_enable(); | 70 | pagefault_enable(); |
82 | preempt_enable(); | 71 | preempt_enable(); |
83 | 72 | ||
84 | ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); | 73 | ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen); |
85 | 74 | ||
86 | return ret ? -EINVAL : 0; | 75 | return ret ? -EINVAL : 0; |
87 | } | 76 | } |
88 | 77 | ||
89 | static int p8_aes_xts_crypt(struct blkcipher_desc *desc, | 78 | static int p8_aes_xts_crypt(struct skcipher_request *req, int enc) |
90 | struct scatterlist *dst, | ||
91 | struct scatterlist *src, | ||
92 | unsigned int nbytes, int enc) | ||
93 | { | 79 | { |
94 | int ret; | 80 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
81 | const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); | ||
82 | struct skcipher_walk walk; | ||
83 | unsigned int nbytes; | ||
95 | u8 tweak[AES_BLOCK_SIZE]; | 84 | u8 tweak[AES_BLOCK_SIZE]; |
96 | u8 *iv; | 85 | int ret; |
97 | struct blkcipher_walk walk; | ||
98 | struct p8_aes_xts_ctx *ctx = | ||
99 | crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); | ||
100 | 86 | ||
101 | if (!crypto_simd_usable()) { | 87 | if (!crypto_simd_usable()) { |
102 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); | 88 | struct skcipher_request *subreq = skcipher_request_ctx(req); |
103 | skcipher_request_set_sync_tfm(req, ctx->fallback); | 89 | |
104 | skcipher_request_set_callback(req, desc->flags, NULL, NULL); | 90 | *subreq = *req; |
105 | skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); | 91 | skcipher_request_set_tfm(subreq, ctx->fallback); |
106 | ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); | 92 | return enc ? crypto_skcipher_encrypt(subreq) : |
107 | skcipher_request_zero(req); | 93 | crypto_skcipher_decrypt(subreq); |
108 | } else { | 94 | } |
109 | blkcipher_walk_init(&walk, dst, src, nbytes); | 95 | |
96 | ret = skcipher_walk_virt(&walk, req, false); | ||
97 | if (ret) | ||
98 | return ret; | ||
99 | |||
100 | preempt_disable(); | ||
101 | pagefault_disable(); | ||
102 | enable_kernel_vsx(); | ||
110 | 103 | ||
111 | ret = blkcipher_walk_virt(desc, &walk); | 104 | aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key); |
105 | |||
106 | disable_kernel_vsx(); | ||
107 | pagefault_enable(); | ||
108 | preempt_enable(); | ||
112 | 109 | ||
110 | while ((nbytes = walk.nbytes) != 0) { | ||
113 | preempt_disable(); | 111 | preempt_disable(); |
114 | pagefault_disable(); | 112 | pagefault_disable(); |
115 | enable_kernel_vsx(); | 113 | enable_kernel_vsx(); |
116 | 114 | if (enc) | |
117 | iv = walk.iv; | 115 | aes_p8_xts_encrypt(walk.src.virt.addr, |
118 | memset(tweak, 0, AES_BLOCK_SIZE); | 116 | walk.dst.virt.addr, |
119 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); | 117 | round_down(nbytes, AES_BLOCK_SIZE), |
120 | 118 | &ctx->enc_key, NULL, tweak); | |
119 | else | ||
120 | aes_p8_xts_decrypt(walk.src.virt.addr, | ||
121 | walk.dst.virt.addr, | ||
122 | round_down(nbytes, AES_BLOCK_SIZE), | ||
123 | &ctx->dec_key, NULL, tweak); | ||
121 | disable_kernel_vsx(); | 124 | disable_kernel_vsx(); |
122 | pagefault_enable(); | 125 | pagefault_enable(); |
123 | preempt_enable(); | 126 | preempt_enable(); |
124 | 127 | ||
125 | while ((nbytes = walk.nbytes)) { | 128 | ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); |
126 | preempt_disable(); | ||
127 | pagefault_disable(); | ||
128 | enable_kernel_vsx(); | ||
129 | if (enc) | ||
130 | aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | ||
131 | nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); | ||
132 | else | ||
133 | aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, | ||
134 | nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); | ||
135 | disable_kernel_vsx(); | ||
136 | pagefault_enable(); | ||
137 | preempt_enable(); | ||
138 | |||
139 | nbytes &= AES_BLOCK_SIZE - 1; | ||
140 | ret = blkcipher_walk_done(desc, &walk, nbytes); | ||
141 | } | ||
142 | } | 129 | } |
143 | return ret; | 130 | return ret; |
144 | } | 131 | } |
145 | 132 | ||
146 | static int p8_aes_xts_encrypt(struct blkcipher_desc *desc, | 133 | static int p8_aes_xts_encrypt(struct skcipher_request *req) |
147 | struct scatterlist *dst, | ||
148 | struct scatterlist *src, unsigned int nbytes) | ||
149 | { | 134 | { |
150 | return p8_aes_xts_crypt(desc, dst, src, nbytes, 1); | 135 | return p8_aes_xts_crypt(req, 1); |
151 | } | 136 | } |
152 | 137 | ||
153 | static int p8_aes_xts_decrypt(struct blkcipher_desc *desc, | 138 | static int p8_aes_xts_decrypt(struct skcipher_request *req) |
154 | struct scatterlist *dst, | ||
155 | struct scatterlist *src, unsigned int nbytes) | ||
156 | { | 139 | { |
157 | return p8_aes_xts_crypt(desc, dst, src, nbytes, 0); | 140 | return p8_aes_xts_crypt(req, 0); |
158 | } | 141 | } |
159 | 142 | ||
160 | struct crypto_alg p8_aes_xts_alg = { | 143 | struct skcipher_alg p8_aes_xts_alg = { |
161 | .cra_name = "xts(aes)", | 144 | .base.cra_name = "xts(aes)", |
162 | .cra_driver_name = "p8_aes_xts", | 145 | .base.cra_driver_name = "p8_aes_xts", |
163 | .cra_module = THIS_MODULE, | 146 | .base.cra_module = THIS_MODULE, |
164 | .cra_priority = 2000, | 147 | .base.cra_priority = 2000, |
165 | .cra_type = &crypto_blkcipher_type, | 148 | .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, |
166 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, | 149 | .base.cra_blocksize = AES_BLOCK_SIZE, |
167 | .cra_alignmask = 0, | 150 | .base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx), |
168 | .cra_blocksize = AES_BLOCK_SIZE, | 151 | .setkey = p8_aes_xts_setkey, |
169 | .cra_ctxsize = sizeof(struct p8_aes_xts_ctx), | 152 | .encrypt = p8_aes_xts_encrypt, |
170 | .cra_init = p8_aes_xts_init, | 153 | .decrypt = p8_aes_xts_decrypt, |
171 | .cra_exit = p8_aes_xts_exit, | 154 | .init = p8_aes_xts_init, |
172 | .cra_blkcipher = { | 155 | .exit = p8_aes_xts_exit, |
173 | .ivsize = AES_BLOCK_SIZE, | 156 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
174 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | 157 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
175 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | 158 | .ivsize = AES_BLOCK_SIZE, |
176 | .setkey = p8_aes_xts_setkey, | ||
177 | .encrypt = p8_aes_xts_encrypt, | ||
178 | .decrypt = p8_aes_xts_decrypt, | ||
179 | } | ||
180 | }; | 159 | }; |
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h index 349646b73754..01774a4d26a2 100644 --- a/drivers/crypto/vmx/aesp8-ppc.h +++ b/drivers/crypto/vmx/aesp8-ppc.h | |||
@@ -2,8 +2,6 @@ | |||
2 | #include <linux/types.h> | 2 | #include <linux/types.h> |
3 | #include <crypto/aes.h> | 3 | #include <crypto/aes.h> |
4 | 4 | ||
5 | #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) | ||
6 | |||
7 | struct aes_key { | 5 | struct aes_key { |
8 | u8 key[AES_MAX_KEYLENGTH]; | 6 | u8 key[AES_MAX_KEYLENGTH]; |
9 | int rounds; | 7 | int rounds; |
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl index 9c6b5c1d6a1a..db874367b602 100644 --- a/drivers/crypto/vmx/aesp8-ppc.pl +++ b/drivers/crypto/vmx/aesp8-ppc.pl | |||
@@ -1286,6 +1286,24 @@ ___ | |||
1286 | 1286 | ||
1287 | ######################################################################### | 1287 | ######################################################################### |
1288 | {{{ # CTR procedure[s] # | 1288 | {{{ # CTR procedure[s] # |
1289 | |||
1290 | ####################### WARNING: Here be dragons! ####################### | ||
1291 | # | ||
1292 | # This code is written as 'ctr32', based on a 32-bit counter used | ||
1293 | # upstream. The kernel does *not* use a 32-bit counter. The kernel uses | ||
1294 | # a 128-bit counter. | ||
1295 | # | ||
1296 | # This leads to subtle changes from the upstream code: the counter | ||
1297 | # is incremented with vaddu_q_m rather than vaddu_w_m. This occurs in | ||
1298 | # both the bulk (8 blocks at a time) path, and in the individual block | ||
1299 | # path. Be aware of this when doing updates. | ||
1300 | # | ||
1301 | # See: | ||
1302 | # 1d4aa0b4c181 ("crypto: vmx - Fixing AES-CTR counter bug") | ||
1303 | # 009b30ac7444 ("crypto: vmx - CTR: always increment IV as quadword") | ||
1304 | # https://github.com/openssl/openssl/pull/8942 | ||
1305 | # | ||
1306 | ######################################################################### | ||
1289 | my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10)); | 1307 | my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10)); |
1290 | my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3)); | 1308 | my ($rndkey0,$rndkey1,$inout,$tmp)= map("v$_",(0..3)); |
1291 | my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)= | 1309 | my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)= |
@@ -1357,7 +1375,7 @@ Loop_ctr32_enc: | |||
1357 | addi $idx,$idx,16 | 1375 | addi $idx,$idx,16 |
1358 | bdnz Loop_ctr32_enc | 1376 | bdnz Loop_ctr32_enc |
1359 | 1377 | ||
1360 | vadduqm $ivec,$ivec,$one | 1378 | vadduqm $ivec,$ivec,$one # Kernel change for 128-bit |
1361 | vmr $dat,$inptail | 1379 | vmr $dat,$inptail |
1362 | lvx $inptail,0,$inp | 1380 | lvx $inptail,0,$inp |
1363 | addi $inp,$inp,16 | 1381 | addi $inp,$inp,16 |
@@ -1501,7 +1519,7 @@ Load_ctr32_enc_key: | |||
1501 | $SHL $len,$len,4 | 1519 | $SHL $len,$len,4 |
1502 | 1520 | ||
1503 | vadduqm $out1,$ivec,$one # counter values ... | 1521 | vadduqm $out1,$ivec,$one # counter values ... |
1504 | vadduqm $out2,$ivec,$two | 1522 | vadduqm $out2,$ivec,$two # (do all ctr adds as 128-bit) |
1505 | vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] | 1523 | vxor $out0,$ivec,$rndkey0 # ... xored with rndkey[0] |
1506 | le?li $idx,8 | 1524 | le?li $idx,8 |
1507 | vadduqm $out3,$out1,$two | 1525 | vadduqm $out3,$out1,$two |
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index 6c4c77f4e159..3e0335fb406c 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c | |||
@@ -15,54 +15,58 @@ | |||
15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
16 | #include <asm/cputable.h> | 16 | #include <asm/cputable.h> |
17 | #include <crypto/internal/hash.h> | 17 | #include <crypto/internal/hash.h> |
18 | #include <crypto/internal/skcipher.h> | ||
18 | 19 | ||
19 | extern struct shash_alg p8_ghash_alg; | 20 | extern struct shash_alg p8_ghash_alg; |
20 | extern struct crypto_alg p8_aes_alg; | 21 | extern struct crypto_alg p8_aes_alg; |
21 | extern struct crypto_alg p8_aes_cbc_alg; | 22 | extern struct skcipher_alg p8_aes_cbc_alg; |
22 | extern struct crypto_alg p8_aes_ctr_alg; | 23 | extern struct skcipher_alg p8_aes_ctr_alg; |
23 | extern struct crypto_alg p8_aes_xts_alg; | 24 | extern struct skcipher_alg p8_aes_xts_alg; |
24 | static struct crypto_alg *algs[] = { | ||
25 | &p8_aes_alg, | ||
26 | &p8_aes_cbc_alg, | ||
27 | &p8_aes_ctr_alg, | ||
28 | &p8_aes_xts_alg, | ||
29 | NULL, | ||
30 | }; | ||
31 | 25 | ||
32 | static int __init p8_init(void) | 26 | static int __init p8_init(void) |
33 | { | 27 | { |
34 | int ret = 0; | 28 | int ret; |
35 | struct crypto_alg **alg_it; | ||
36 | 29 | ||
37 | for (alg_it = algs; *alg_it; alg_it++) { | 30 | ret = crypto_register_shash(&p8_ghash_alg); |
38 | ret = crypto_register_alg(*alg_it); | ||
39 | printk(KERN_INFO "crypto_register_alg '%s' = %d\n", | ||
40 | (*alg_it)->cra_name, ret); | ||
41 | if (ret) { | ||
42 | for (alg_it--; alg_it >= algs; alg_it--) | ||
43 | crypto_unregister_alg(*alg_it); | ||
44 | break; | ||
45 | } | ||
46 | } | ||
47 | if (ret) | 31 | if (ret) |
48 | return ret; | 32 | goto err; |
49 | 33 | ||
50 | ret = crypto_register_shash(&p8_ghash_alg); | 34 | ret = crypto_register_alg(&p8_aes_alg); |
51 | if (ret) { | 35 | if (ret) |
52 | for (alg_it = algs; *alg_it; alg_it++) | 36 | goto err_unregister_ghash; |
53 | crypto_unregister_alg(*alg_it); | 37 | |
54 | } | 38 | ret = crypto_register_skcipher(&p8_aes_cbc_alg); |
39 | if (ret) | ||
40 | goto err_unregister_aes; | ||
41 | |||
42 | ret = crypto_register_skcipher(&p8_aes_ctr_alg); | ||
43 | if (ret) | ||
44 | goto err_unregister_aes_cbc; | ||
45 | |||
46 | ret = crypto_register_skcipher(&p8_aes_xts_alg); | ||
47 | if (ret) | ||
48 | goto err_unregister_aes_ctr; | ||
49 | |||
50 | return 0; | ||
51 | |||
52 | err_unregister_aes_ctr: | ||
53 | crypto_unregister_skcipher(&p8_aes_ctr_alg); | ||
54 | err_unregister_aes_cbc: | ||
55 | crypto_unregister_skcipher(&p8_aes_cbc_alg); | ||
56 | err_unregister_aes: | ||
57 | crypto_unregister_alg(&p8_aes_alg); | ||
58 | err_unregister_ghash: | ||
59 | crypto_unregister_shash(&p8_ghash_alg); | ||
60 | err: | ||
55 | return ret; | 61 | return ret; |
56 | } | 62 | } |
57 | 63 | ||
58 | static void __exit p8_exit(void) | 64 | static void __exit p8_exit(void) |
59 | { | 65 | { |
60 | struct crypto_alg **alg_it; | 66 | crypto_unregister_skcipher(&p8_aes_xts_alg); |
61 | 67 | crypto_unregister_skcipher(&p8_aes_ctr_alg); | |
62 | for (alg_it = algs; *alg_it; alg_it++) { | 68 | crypto_unregister_skcipher(&p8_aes_cbc_alg); |
63 | printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); | 69 | crypto_unregister_alg(&p8_aes_alg); |
64 | crypto_unregister_alg(*alg_it); | ||
65 | } | ||
66 | crypto_unregister_shash(&p8_ghash_alg); | 70 | crypto_unregister_shash(&p8_ghash_alg); |
67 | } | 71 | } |
68 | 72 | ||
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index d84095591e45..f86065e16772 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c | |||
@@ -111,8 +111,7 @@ static int i2c_acpi_do_lookup(struct acpi_device *adev, | |||
111 | struct list_head resource_list; | 111 | struct list_head resource_list; |
112 | int ret; | 112 | int ret; |
113 | 113 | ||
114 | if (acpi_bus_get_status(adev) || !adev->status.present || | 114 | if (acpi_bus_get_status(adev) || !adev->status.present) |
115 | acpi_device_enumerated(adev)) | ||
116 | return -EINVAL; | 115 | return -EINVAL; |
117 | 116 | ||
118 | if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0) | 117 | if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0) |
@@ -147,6 +146,9 @@ static int i2c_acpi_get_info(struct acpi_device *adev, | |||
147 | lookup.info = info; | 146 | lookup.info = info; |
148 | lookup.index = -1; | 147 | lookup.index = -1; |
149 | 148 | ||
149 | if (acpi_device_enumerated(adev)) | ||
150 | return -EINVAL; | ||
151 | |||
150 | ret = i2c_acpi_do_lookup(adev, &lookup); | 152 | ret = i2c_acpi_do_lookup(adev, &lookup); |
151 | if (ret) | 153 | if (ret) |
152 | return ret; | 154 | return ret; |
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig index bf395df3bb37..1a2e2f7629f3 100644 --- a/drivers/net/ppp/Kconfig +++ b/drivers/net/ppp/Kconfig | |||
@@ -87,8 +87,7 @@ config PPP_MPPE | |||
87 | depends on PPP | 87 | depends on PPP |
88 | select CRYPTO | 88 | select CRYPTO |
89 | select CRYPTO_SHA1 | 89 | select CRYPTO_SHA1 |
90 | select CRYPTO_ARC4 | 90 | select CRYPTO_LIB_ARC4 |
91 | select CRYPTO_ECB | ||
92 | ---help--- | 91 | ---help--- |
93 | Support for the MPPE Encryption protocol, as employed by the | 92 | Support for the MPPE Encryption protocol, as employed by the |
94 | Microsoft Point-to-Point Tunneling Protocol. | 93 | Microsoft Point-to-Point Tunneling Protocol. |
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index 66c8e65f6872..bd3c80b0bc77 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c | |||
@@ -42,9 +42,10 @@ | |||
42 | * deprecated in 2.6 | 42 | * deprecated in 2.6 |
43 | */ | 43 | */ |
44 | 44 | ||
45 | #include <crypto/arc4.h> | ||
45 | #include <crypto/hash.h> | 46 | #include <crypto/hash.h> |
46 | #include <crypto/skcipher.h> | ||
47 | #include <linux/err.h> | 47 | #include <linux/err.h> |
48 | #include <linux/fips.h> | ||
48 | #include <linux/module.h> | 49 | #include <linux/module.h> |
49 | #include <linux/kernel.h> | 50 | #include <linux/kernel.h> |
50 | #include <linux/init.h> | 51 | #include <linux/init.h> |
@@ -66,13 +67,6 @@ MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); | |||
66 | MODULE_SOFTDEP("pre: arc4"); | 67 | MODULE_SOFTDEP("pre: arc4"); |
67 | MODULE_VERSION("1.0.2"); | 68 | MODULE_VERSION("1.0.2"); |
68 | 69 | ||
69 | static unsigned int | ||
70 | setup_sg(struct scatterlist *sg, const void *address, unsigned int length) | ||
71 | { | ||
72 | sg_set_buf(sg, address, length); | ||
73 | return length; | ||
74 | } | ||
75 | |||
76 | #define SHA1_PAD_SIZE 40 | 70 | #define SHA1_PAD_SIZE 40 |
77 | 71 | ||
78 | /* | 72 | /* |
@@ -96,7 +90,7 @@ static inline void sha_pad_init(struct sha_pad *shapad) | |||
96 | * State for an MPPE (de)compressor. | 90 | * State for an MPPE (de)compressor. |
97 | */ | 91 | */ |
98 | struct ppp_mppe_state { | 92 | struct ppp_mppe_state { |
99 | struct crypto_sync_skcipher *arc4; | 93 | struct arc4_ctx arc4; |
100 | struct shash_desc *sha1; | 94 | struct shash_desc *sha1; |
101 | unsigned char *sha1_digest; | 95 | unsigned char *sha1_digest; |
102 | unsigned char master_key[MPPE_MAX_KEY_LEN]; | 96 | unsigned char master_key[MPPE_MAX_KEY_LEN]; |
@@ -155,24 +149,11 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state) | |||
155 | */ | 149 | */ |
156 | static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) | 150 | static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) |
157 | { | 151 | { |
158 | struct scatterlist sg_in[1], sg_out[1]; | ||
159 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4); | ||
160 | |||
161 | skcipher_request_set_sync_tfm(req, state->arc4); | ||
162 | skcipher_request_set_callback(req, 0, NULL, NULL); | ||
163 | |||
164 | get_new_key_from_sha(state); | 152 | get_new_key_from_sha(state); |
165 | if (!initial_key) { | 153 | if (!initial_key) { |
166 | crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest, | 154 | arc4_setkey(&state->arc4, state->sha1_digest, state->keylen); |
167 | state->keylen); | 155 | arc4_crypt(&state->arc4, state->session_key, state->sha1_digest, |
168 | sg_init_table(sg_in, 1); | 156 | state->keylen); |
169 | sg_init_table(sg_out, 1); | ||
170 | setup_sg(sg_in, state->sha1_digest, state->keylen); | ||
171 | setup_sg(sg_out, state->session_key, state->keylen); | ||
172 | skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen, | ||
173 | NULL); | ||
174 | if (crypto_skcipher_encrypt(req)) | ||
175 | printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); | ||
176 | } else { | 157 | } else { |
177 | memcpy(state->session_key, state->sha1_digest, state->keylen); | 158 | memcpy(state->session_key, state->sha1_digest, state->keylen); |
178 | } | 159 | } |
@@ -182,9 +163,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) | |||
182 | state->session_key[1] = 0x26; | 163 | state->session_key[1] = 0x26; |
183 | state->session_key[2] = 0x9e; | 164 | state->session_key[2] = 0x9e; |
184 | } | 165 | } |
185 | crypto_sync_skcipher_setkey(state->arc4, state->session_key, | 166 | arc4_setkey(&state->arc4, state->session_key, state->keylen); |
186 | state->keylen); | ||
187 | skcipher_request_zero(req); | ||
188 | } | 167 | } |
189 | 168 | ||
190 | /* | 169 | /* |
@@ -197,7 +176,8 @@ static void *mppe_alloc(unsigned char *options, int optlen) | |||
197 | unsigned int digestsize; | 176 | unsigned int digestsize; |
198 | 177 | ||
199 | if (optlen != CILEN_MPPE + sizeof(state->master_key) || | 178 | if (optlen != CILEN_MPPE + sizeof(state->master_key) || |
200 | options[0] != CI_MPPE || options[1] != CILEN_MPPE) | 179 | options[0] != CI_MPPE || options[1] != CILEN_MPPE || |
180 | fips_enabled) | ||
201 | goto out; | 181 | goto out; |
202 | 182 | ||
203 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 183 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
@@ -205,12 +185,6 @@ static void *mppe_alloc(unsigned char *options, int optlen) | |||
205 | goto out; | 185 | goto out; |
206 | 186 | ||
207 | 187 | ||
208 | state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); | ||
209 | if (IS_ERR(state->arc4)) { | ||
210 | state->arc4 = NULL; | ||
211 | goto out_free; | ||
212 | } | ||
213 | |||
214 | shash = crypto_alloc_shash("sha1", 0, 0); | 188 | shash = crypto_alloc_shash("sha1", 0, 0); |
215 | if (IS_ERR(shash)) | 189 | if (IS_ERR(shash)) |
216 | goto out_free; | 190 | goto out_free; |
@@ -251,7 +225,6 @@ out_free: | |||
251 | crypto_free_shash(state->sha1->tfm); | 225 | crypto_free_shash(state->sha1->tfm); |
252 | kzfree(state->sha1); | 226 | kzfree(state->sha1); |
253 | } | 227 | } |
254 | crypto_free_sync_skcipher(state->arc4); | ||
255 | kfree(state); | 228 | kfree(state); |
256 | out: | 229 | out: |
257 | return NULL; | 230 | return NULL; |
@@ -267,8 +240,7 @@ static void mppe_free(void *arg) | |||
267 | kfree(state->sha1_digest); | 240 | kfree(state->sha1_digest); |
268 | crypto_free_shash(state->sha1->tfm); | 241 | crypto_free_shash(state->sha1->tfm); |
269 | kzfree(state->sha1); | 242 | kzfree(state->sha1); |
270 | crypto_free_sync_skcipher(state->arc4); | 243 | kzfree(state); |
271 | kfree(state); | ||
272 | } | 244 | } |
273 | } | 245 | } |
274 | 246 | ||
@@ -367,10 +339,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, | |||
367 | int isize, int osize) | 339 | int isize, int osize) |
368 | { | 340 | { |
369 | struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; | 341 | struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; |
370 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4); | ||
371 | int proto; | 342 | int proto; |
372 | int err; | ||
373 | struct scatterlist sg_in[1], sg_out[1]; | ||
374 | 343 | ||
375 | /* | 344 | /* |
376 | * Check that the protocol is in the range we handle. | 345 | * Check that the protocol is in the range we handle. |
@@ -421,21 +390,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, | |||
421 | ibuf += 2; /* skip to proto field */ | 390 | ibuf += 2; /* skip to proto field */ |
422 | isize -= 2; | 391 | isize -= 2; |
423 | 392 | ||
424 | /* Encrypt packet */ | 393 | arc4_crypt(&state->arc4, obuf, ibuf, isize); |
425 | sg_init_table(sg_in, 1); | ||
426 | sg_init_table(sg_out, 1); | ||
427 | setup_sg(sg_in, ibuf, isize); | ||
428 | setup_sg(sg_out, obuf, osize); | ||
429 | |||
430 | skcipher_request_set_sync_tfm(req, state->arc4); | ||
431 | skcipher_request_set_callback(req, 0, NULL, NULL); | ||
432 | skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL); | ||
433 | err = crypto_skcipher_encrypt(req); | ||
434 | skcipher_request_zero(req); | ||
435 | if (err) { | ||
436 | printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); | ||
437 | return -1; | ||
438 | } | ||
439 | 394 | ||
440 | state->stats.unc_bytes += isize; | 395 | state->stats.unc_bytes += isize; |
441 | state->stats.unc_packets++; | 396 | state->stats.unc_packets++; |
@@ -481,10 +436,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
481 | int osize) | 436 | int osize) |
482 | { | 437 | { |
483 | struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; | 438 | struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; |
484 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4); | ||
485 | unsigned ccount; | 439 | unsigned ccount; |
486 | int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; | 440 | int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; |
487 | struct scatterlist sg_in[1], sg_out[1]; | ||
488 | 441 | ||
489 | if (isize <= PPP_HDRLEN + MPPE_OVHD) { | 442 | if (isize <= PPP_HDRLEN + MPPE_OVHD) { |
490 | if (state->debug) | 443 | if (state->debug) |
@@ -611,19 +564,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
611 | * Decrypt the first byte in order to check if it is | 564 | * Decrypt the first byte in order to check if it is |
612 | * a compressed or uncompressed protocol field. | 565 | * a compressed or uncompressed protocol field. |
613 | */ | 566 | */ |
614 | sg_init_table(sg_in, 1); | 567 | arc4_crypt(&state->arc4, obuf, ibuf, 1); |
615 | sg_init_table(sg_out, 1); | ||
616 | setup_sg(sg_in, ibuf, 1); | ||
617 | setup_sg(sg_out, obuf, 1); | ||
618 | |||
619 | skcipher_request_set_sync_tfm(req, state->arc4); | ||
620 | skcipher_request_set_callback(req, 0, NULL, NULL); | ||
621 | skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL); | ||
622 | if (crypto_skcipher_decrypt(req)) { | ||
623 | printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); | ||
624 | osize = DECOMP_ERROR; | ||
625 | goto out_zap_req; | ||
626 | } | ||
627 | 568 | ||
628 | /* | 569 | /* |
629 | * Do PFC decompression. | 570 | * Do PFC decompression. |
@@ -638,14 +579,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
638 | } | 579 | } |
639 | 580 | ||
640 | /* And finally, decrypt the rest of the packet. */ | 581 | /* And finally, decrypt the rest of the packet. */ |
641 | setup_sg(sg_in, ibuf + 1, isize - 1); | 582 | arc4_crypt(&state->arc4, obuf + 1, ibuf + 1, isize - 1); |
642 | setup_sg(sg_out, obuf + 1, osize - 1); | ||
643 | skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL); | ||
644 | if (crypto_skcipher_decrypt(req)) { | ||
645 | printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); | ||
646 | osize = DECOMP_ERROR; | ||
647 | goto out_zap_req; | ||
648 | } | ||
649 | 583 | ||
650 | state->stats.unc_bytes += osize; | 584 | state->stats.unc_bytes += osize; |
651 | state->stats.unc_packets++; | 585 | state->stats.unc_packets++; |
@@ -655,8 +589,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
655 | /* good packet credit */ | 589 | /* good packet credit */ |
656 | state->sanity_errors >>= 1; | 590 | state->sanity_errors >>= 1; |
657 | 591 | ||
658 | out_zap_req: | ||
659 | skcipher_request_zero(req); | ||
660 | return osize; | 592 | return osize; |
661 | 593 | ||
662 | sanity_error: | 594 | sanity_error: |
@@ -729,8 +661,7 @@ static struct compressor ppp_mppe = { | |||
729 | static int __init ppp_mppe_init(void) | 661 | static int __init ppp_mppe_init(void) |
730 | { | 662 | { |
731 | int answer; | 663 | int answer; |
732 | if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && | 664 | if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)) |
733 | crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))) | ||
734 | return -ENODEV; | 665 | return -ENODEV; |
735 | 666 | ||
736 | sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); | 667 | sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index aae2b8b2adf5..523e9ea78a28 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -10,7 +10,7 @@ config CIFS | |||
10 | select CRYPTO_SHA512 | 10 | select CRYPTO_SHA512 |
11 | select CRYPTO_CMAC | 11 | select CRYPTO_CMAC |
12 | select CRYPTO_HMAC | 12 | select CRYPTO_HMAC |
13 | select CRYPTO_ARC4 | 13 | select CRYPTO_LIB_ARC4 |
14 | select CRYPTO_AEAD2 | 14 | select CRYPTO_AEAD2 |
15 | select CRYPTO_CCM | 15 | select CRYPTO_CCM |
16 | select CRYPTO_ECB | 16 | select CRYPTO_ECB |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index d2a05e46d6f5..97b7497c13ef 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -33,7 +33,8 @@ | |||
33 | #include <linux/ctype.h> | 33 | #include <linux/ctype.h> |
34 | #include <linux/random.h> | 34 | #include <linux/random.h> |
35 | #include <linux/highmem.h> | 35 | #include <linux/highmem.h> |
36 | #include <crypto/skcipher.h> | 36 | #include <linux/fips.h> |
37 | #include <crypto/arc4.h> | ||
37 | #include <crypto/aead.h> | 38 | #include <crypto/aead.h> |
38 | 39 | ||
39 | int __cifs_calc_signature(struct smb_rqst *rqst, | 40 | int __cifs_calc_signature(struct smb_rqst *rqst, |
@@ -772,63 +773,32 @@ setup_ntlmv2_rsp_ret: | |||
772 | int | 773 | int |
773 | calc_seckey(struct cifs_ses *ses) | 774 | calc_seckey(struct cifs_ses *ses) |
774 | { | 775 | { |
775 | int rc; | 776 | unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */ |
776 | struct crypto_skcipher *tfm_arc4; | 777 | struct arc4_ctx *ctx_arc4; |
777 | struct scatterlist sgin, sgout; | ||
778 | struct skcipher_request *req; | ||
779 | unsigned char *sec_key; | ||
780 | 778 | ||
781 | sec_key = kmalloc(CIFS_SESS_KEY_SIZE, GFP_KERNEL); | 779 | if (fips_enabled) |
782 | if (sec_key == NULL) | 780 | return -ENODEV; |
783 | return -ENOMEM; | ||
784 | 781 | ||
785 | get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); | 782 | get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); |
786 | 783 | ||
787 | tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); | 784 | ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL); |
788 | if (IS_ERR(tfm_arc4)) { | 785 | if (!ctx_arc4) { |
789 | rc = PTR_ERR(tfm_arc4); | 786 | cifs_dbg(VFS, "could not allocate arc4 context\n"); |
790 | cifs_dbg(VFS, "could not allocate crypto API arc4\n"); | 787 | return -ENOMEM; |
791 | goto out; | ||
792 | } | ||
793 | |||
794 | rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response, | ||
795 | CIFS_SESS_KEY_SIZE); | ||
796 | if (rc) { | ||
797 | cifs_dbg(VFS, "%s: Could not set response as a key\n", | ||
798 | __func__); | ||
799 | goto out_free_cipher; | ||
800 | } | ||
801 | |||
802 | req = skcipher_request_alloc(tfm_arc4, GFP_KERNEL); | ||
803 | if (!req) { | ||
804 | rc = -ENOMEM; | ||
805 | cifs_dbg(VFS, "could not allocate crypto API arc4 request\n"); | ||
806 | goto out_free_cipher; | ||
807 | } | 788 | } |
808 | 789 | ||
809 | sg_init_one(&sgin, sec_key, CIFS_SESS_KEY_SIZE); | 790 | arc4_setkey(ctx_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE); |
810 | sg_init_one(&sgout, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); | 791 | arc4_crypt(ctx_arc4, ses->ntlmssp->ciphertext, sec_key, |
811 | 792 | CIFS_CPHTXT_SIZE); | |
812 | skcipher_request_set_callback(req, 0, NULL, NULL); | ||
813 | skcipher_request_set_crypt(req, &sgin, &sgout, CIFS_CPHTXT_SIZE, NULL); | ||
814 | |||
815 | rc = crypto_skcipher_encrypt(req); | ||
816 | skcipher_request_free(req); | ||
817 | if (rc) { | ||
818 | cifs_dbg(VFS, "could not encrypt session key rc: %d\n", rc); | ||
819 | goto out_free_cipher; | ||
820 | } | ||
821 | 793 | ||
822 | /* make secondary_key/nonce as session key */ | 794 | /* make secondary_key/nonce as session key */ |
823 | memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE); | 795 | memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE); |
824 | /* and make len as that of session key only */ | 796 | /* and make len as that of session key only */ |
825 | ses->auth_key.len = CIFS_SESS_KEY_SIZE; | 797 | ses->auth_key.len = CIFS_SESS_KEY_SIZE; |
826 | 798 | ||
827 | out_free_cipher: | 799 | memzero_explicit(sec_key, CIFS_SESS_KEY_SIZE); |
828 | crypto_free_skcipher(tfm_arc4); | 800 | kzfree(ctx_arc4); |
829 | out: | 801 | return 0; |
830 | kfree(sec_key); | ||
831 | return rc; | ||
832 | } | 802 | } |
833 | 803 | ||
834 | void | 804 | void |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 65d9771e49f9..72db1c89bf5a 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -1591,7 +1591,6 @@ MODULE_DESCRIPTION | |||
1591 | ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " | 1591 | ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " |
1592 | "also older servers complying with the SNIA CIFS Specification)"); | 1592 | "also older servers complying with the SNIA CIFS Specification)"); |
1593 | MODULE_VERSION(CIFS_VERSION); | 1593 | MODULE_VERSION(CIFS_VERSION); |
1594 | MODULE_SOFTDEP("pre: arc4"); | ||
1595 | MODULE_SOFTDEP("pre: des"); | 1594 | MODULE_SOFTDEP("pre: des"); |
1596 | MODULE_SOFTDEP("pre: ecb"); | 1595 | MODULE_SOFTDEP("pre: ecb"); |
1597 | MODULE_SOFTDEP("pre: hmac"); | 1596 | MODULE_SOFTDEP("pre: hmac"); |
diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 61bb10490492..3c245b1859e7 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h | |||
@@ -317,21 +317,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | |||
317 | * | 317 | * |
318 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | 318 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred |
319 | */ | 319 | */ |
320 | static inline int crypto_aead_encrypt(struct aead_request *req) | 320 | int crypto_aead_encrypt(struct aead_request *req); |
321 | { | ||
322 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
323 | struct crypto_alg *alg = aead->base.__crt_alg; | ||
324 | unsigned int cryptlen = req->cryptlen; | ||
325 | int ret; | ||
326 | |||
327 | crypto_stats_get(alg); | ||
328 | if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) | ||
329 | ret = -ENOKEY; | ||
330 | else | ||
331 | ret = crypto_aead_alg(aead)->encrypt(req); | ||
332 | crypto_stats_aead_encrypt(cryptlen, alg, ret); | ||
333 | return ret; | ||
334 | } | ||
335 | 321 | ||
336 | /** | 322 | /** |
337 | * crypto_aead_decrypt() - decrypt ciphertext | 323 | * crypto_aead_decrypt() - decrypt ciphertext |
@@ -355,23 +341,7 @@ static inline int crypto_aead_encrypt(struct aead_request *req) | |||
355 | * integrity of the ciphertext or the associated data was violated); | 341 | * integrity of the ciphertext or the associated data was violated); |
356 | * < 0 if an error occurred. | 342 | * < 0 if an error occurred. |
357 | */ | 343 | */ |
358 | static inline int crypto_aead_decrypt(struct aead_request *req) | 344 | int crypto_aead_decrypt(struct aead_request *req); |
359 | { | ||
360 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
361 | struct crypto_alg *alg = aead->base.__crt_alg; | ||
362 | unsigned int cryptlen = req->cryptlen; | ||
363 | int ret; | ||
364 | |||
365 | crypto_stats_get(alg); | ||
366 | if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) | ||
367 | ret = -ENOKEY; | ||
368 | else if (req->cryptlen < crypto_aead_authsize(aead)) | ||
369 | ret = -EINVAL; | ||
370 | else | ||
371 | ret = crypto_aead_alg(aead)->decrypt(req); | ||
372 | crypto_stats_aead_decrypt(cryptlen, alg, ret); | ||
373 | return ret; | ||
374 | } | ||
375 | 345 | ||
376 | /** | 346 | /** |
377 | * DOC: Asynchronous AEAD Request Handle | 347 | * DOC: Asynchronous AEAD Request Handle |
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 743d626479ef..dc1106af95c3 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h | |||
@@ -189,7 +189,6 @@ void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); | |||
189 | int crypto_enqueue_request(struct crypto_queue *queue, | 189 | int crypto_enqueue_request(struct crypto_queue *queue, |
190 | struct crypto_async_request *request); | 190 | struct crypto_async_request *request); |
191 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); | 191 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); |
192 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); | ||
193 | static inline unsigned int crypto_queue_len(struct crypto_queue *queue) | 192 | static inline unsigned int crypto_queue_len(struct crypto_queue *queue) |
194 | { | 193 | { |
195 | return queue->qlen; | 194 | return queue->qlen; |
@@ -371,12 +370,6 @@ static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) | |||
371 | return req->__ctx; | 370 | return req->__ctx; |
372 | } | 371 | } |
373 | 372 | ||
374 | static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, | ||
375 | struct crypto_ablkcipher *tfm) | ||
376 | { | ||
377 | return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); | ||
378 | } | ||
379 | |||
380 | static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, | 373 | static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, |
381 | u32 type, u32 mask) | 374 | u32 type, u32 mask) |
382 | { | 375 | { |
diff --git a/include/crypto/arc4.h b/include/crypto/arc4.h index 5b2c24ab0139..f3c22fe01704 100644 --- a/include/crypto/arc4.h +++ b/include/crypto/arc4.h | |||
@@ -6,8 +6,18 @@ | |||
6 | #ifndef _CRYPTO_ARC4_H | 6 | #ifndef _CRYPTO_ARC4_H |
7 | #define _CRYPTO_ARC4_H | 7 | #define _CRYPTO_ARC4_H |
8 | 8 | ||
9 | #include <linux/types.h> | ||
10 | |||
9 | #define ARC4_MIN_KEY_SIZE 1 | 11 | #define ARC4_MIN_KEY_SIZE 1 |
10 | #define ARC4_MAX_KEY_SIZE 256 | 12 | #define ARC4_MAX_KEY_SIZE 256 |
11 | #define ARC4_BLOCK_SIZE 1 | 13 | #define ARC4_BLOCK_SIZE 1 |
12 | 14 | ||
15 | struct arc4_ctx { | ||
16 | u32 S[256]; | ||
17 | u32 x, y; | ||
18 | }; | ||
19 | |||
20 | int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len); | ||
21 | void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len); | ||
22 | |||
13 | #endif /* _CRYPTO_ARC4_H */ | 23 | #endif /* _CRYPTO_ARC4_H */ |
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 1fc70a69d550..d1e723c6a37d 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h | |||
@@ -41,7 +41,7 @@ static inline void chacha20_block(u32 *state, u8 *stream) | |||
41 | } | 41 | } |
42 | void hchacha_block(const u32 *in, u32 *out, int nrounds); | 42 | void hchacha_block(const u32 *in, u32 *out, int nrounds); |
43 | 43 | ||
44 | void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv); | 44 | void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv); |
45 | 45 | ||
46 | int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, | 46 | int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, |
47 | unsigned int keysize); | 47 | unsigned int keysize); |
diff --git a/include/crypto/crypto_wq.h b/include/crypto/crypto_wq.h deleted file mode 100644 index 23114746ac08..000000000000 --- a/include/crypto/crypto_wq.h +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef CRYPTO_WQ_H | ||
3 | #define CRYPTO_WQ_H | ||
4 | |||
5 | #include <linux/workqueue.h> | ||
6 | |||
7 | extern struct workqueue_struct *kcrypto_wq; | ||
8 | #endif | ||
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 3fb581bf3b87..8c9af21efce1 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h | |||
@@ -129,6 +129,8 @@ struct drbg_state { | |||
129 | 129 | ||
130 | bool seeded; /* DRBG fully seeded? */ | 130 | bool seeded; /* DRBG fully seeded? */ |
131 | bool pr; /* Prediction resistance enabled? */ | 131 | bool pr; /* Prediction resistance enabled? */ |
132 | bool fips_primed; /* Continuous test primed? */ | ||
133 | unsigned char *prev; /* FIPS 140-2 continuous test value */ | ||
132 | struct work_struct seed_work; /* asynchronous seeding support */ | 134 | struct work_struct seed_work; /* asynchronous seeding support */ |
133 | struct crypto_rng *jent; | 135 | struct crypto_rng *jent; |
134 | const struct drbg_state_ops *d_ops; | 136 | const struct drbg_state_ops *d_ops; |
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 31e0662fa429..bfc9db7b100d 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h | |||
@@ -196,12 +196,6 @@ static inline struct ahash_request *ahash_dequeue_request( | |||
196 | return ahash_request_cast(crypto_dequeue_request(queue)); | 196 | return ahash_request_cast(crypto_dequeue_request(queue)); |
197 | } | 197 | } |
198 | 198 | ||
199 | static inline int ahash_tfm_in_queue(struct crypto_queue *queue, | ||
200 | struct crypto_ahash *tfm) | ||
201 | { | ||
202 | return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm)); | ||
203 | } | ||
204 | |||
205 | static inline void *crypto_shash_ctx(struct crypto_shash *tfm) | 199 | static inline void *crypto_shash_ctx(struct crypto_shash *tfm) |
206 | { | 200 | { |
207 | return crypto_tfm_ctx(&tfm->base); | 201 | return crypto_tfm_ctx(&tfm->base); |
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index fe0376d5a471..d68faa5759ad 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h | |||
@@ -200,6 +200,66 @@ static inline unsigned int crypto_skcipher_alg_max_keysize( | |||
200 | return alg->max_keysize; | 200 | return alg->max_keysize; |
201 | } | 201 | } |
202 | 202 | ||
203 | static inline unsigned int crypto_skcipher_alg_chunksize( | ||
204 | struct skcipher_alg *alg) | ||
205 | { | ||
206 | if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
207 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
208 | return alg->base.cra_blocksize; | ||
209 | |||
210 | if (alg->base.cra_ablkcipher.encrypt) | ||
211 | return alg->base.cra_blocksize; | ||
212 | |||
213 | return alg->chunksize; | ||
214 | } | ||
215 | |||
216 | static inline unsigned int crypto_skcipher_alg_walksize( | ||
217 | struct skcipher_alg *alg) | ||
218 | { | ||
219 | if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
220 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
221 | return alg->base.cra_blocksize; | ||
222 | |||
223 | if (alg->base.cra_ablkcipher.encrypt) | ||
224 | return alg->base.cra_blocksize; | ||
225 | |||
226 | return alg->walksize; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * crypto_skcipher_chunksize() - obtain chunk size | ||
231 | * @tfm: cipher handle | ||
232 | * | ||
233 | * The block size is set to one for ciphers such as CTR. However, | ||
234 | * you still need to provide incremental updates in multiples of | ||
235 | * the underlying block size as the IV does not have sub-block | ||
236 | * granularity. This is known in this API as the chunk size. | ||
237 | * | ||
238 | * Return: chunk size in bytes | ||
239 | */ | ||
240 | static inline unsigned int crypto_skcipher_chunksize( | ||
241 | struct crypto_skcipher *tfm) | ||
242 | { | ||
243 | return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); | ||
244 | } | ||
245 | |||
246 | /** | ||
247 | * crypto_skcipher_walksize() - obtain walk size | ||
248 | * @tfm: cipher handle | ||
249 | * | ||
250 | * In some cases, algorithms can only perform optimally when operating on | ||
251 | * multiple blocks in parallel. This is reflected by the walksize, which | ||
252 | * must be a multiple of the chunksize (or equal if the concern does not | ||
253 | * apply) | ||
254 | * | ||
255 | * Return: walk size in bytes | ||
256 | */ | ||
257 | static inline unsigned int crypto_skcipher_walksize( | ||
258 | struct crypto_skcipher *tfm) | ||
259 | { | ||
260 | return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); | ||
261 | } | ||
262 | |||
203 | /* Helpers for simple block cipher modes of operation */ | 263 | /* Helpers for simple block cipher modes of operation */ |
204 | struct skcipher_ctx_simple { | 264 | struct skcipher_ctx_simple { |
205 | struct crypto_cipher *cipher; /* underlying block cipher */ | 265 | struct crypto_cipher *cipher; /* underlying block cipher */ |
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index ce7fa0973580..37c164234d97 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h | |||
@@ -288,66 +288,6 @@ static inline unsigned int crypto_sync_skcipher_ivsize( | |||
288 | return crypto_skcipher_ivsize(&tfm->base); | 288 | return crypto_skcipher_ivsize(&tfm->base); |
289 | } | 289 | } |
290 | 290 | ||
291 | static inline unsigned int crypto_skcipher_alg_chunksize( | ||
292 | struct skcipher_alg *alg) | ||
293 | { | ||
294 | if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
295 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
296 | return alg->base.cra_blocksize; | ||
297 | |||
298 | if (alg->base.cra_ablkcipher.encrypt) | ||
299 | return alg->base.cra_blocksize; | ||
300 | |||
301 | return alg->chunksize; | ||
302 | } | ||
303 | |||
304 | static inline unsigned int crypto_skcipher_alg_walksize( | ||
305 | struct skcipher_alg *alg) | ||
306 | { | ||
307 | if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == | ||
308 | CRYPTO_ALG_TYPE_BLKCIPHER) | ||
309 | return alg->base.cra_blocksize; | ||
310 | |||
311 | if (alg->base.cra_ablkcipher.encrypt) | ||
312 | return alg->base.cra_blocksize; | ||
313 | |||
314 | return alg->walksize; | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * crypto_skcipher_chunksize() - obtain chunk size | ||
319 | * @tfm: cipher handle | ||
320 | * | ||
321 | * The block size is set to one for ciphers such as CTR. However, | ||
322 | * you still need to provide incremental updates in multiples of | ||
323 | * the underlying block size as the IV does not have sub-block | ||
324 | * granularity. This is known in this API as the chunk size. | ||
325 | * | ||
326 | * Return: chunk size in bytes | ||
327 | */ | ||
328 | static inline unsigned int crypto_skcipher_chunksize( | ||
329 | struct crypto_skcipher *tfm) | ||
330 | { | ||
331 | return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * crypto_skcipher_walksize() - obtain walk size | ||
336 | * @tfm: cipher handle | ||
337 | * | ||
338 | * In some cases, algorithms can only perform optimally when operating on | ||
339 | * multiple blocks in parallel. This is reflected by the walksize, which | ||
340 | * must be a multiple of the chunksize (or equal if the concern does not | ||
341 | * apply) | ||
342 | * | ||
343 | * Return: walk size in bytes | ||
344 | */ | ||
345 | static inline unsigned int crypto_skcipher_walksize( | ||
346 | struct crypto_skcipher *tfm) | ||
347 | { | ||
348 | return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); | ||
349 | } | ||
350 | |||
351 | /** | 291 | /** |
352 | * crypto_skcipher_blocksize() - obtain block size of cipher | 292 | * crypto_skcipher_blocksize() - obtain block size of cipher |
353 | * @tfm: cipher handle | 293 | * @tfm: cipher handle |
@@ -479,21 +419,7 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( | |||
479 | * | 419 | * |
480 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | 420 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred |
481 | */ | 421 | */ |
482 | static inline int crypto_skcipher_encrypt(struct skcipher_request *req) | 422 | int crypto_skcipher_encrypt(struct skcipher_request *req); |
483 | { | ||
484 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
485 | struct crypto_alg *alg = tfm->base.__crt_alg; | ||
486 | unsigned int cryptlen = req->cryptlen; | ||
487 | int ret; | ||
488 | |||
489 | crypto_stats_get(alg); | ||
490 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
491 | ret = -ENOKEY; | ||
492 | else | ||
493 | ret = tfm->encrypt(req); | ||
494 | crypto_stats_skcipher_encrypt(cryptlen, ret, alg); | ||
495 | return ret; | ||
496 | } | ||
497 | 423 | ||
498 | /** | 424 | /** |
499 | * crypto_skcipher_decrypt() - decrypt ciphertext | 425 | * crypto_skcipher_decrypt() - decrypt ciphertext |
@@ -506,21 +432,7 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req) | |||
506 | * | 432 | * |
507 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | 433 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred |
508 | */ | 434 | */ |
509 | static inline int crypto_skcipher_decrypt(struct skcipher_request *req) | 435 | int crypto_skcipher_decrypt(struct skcipher_request *req); |
510 | { | ||
511 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | ||
512 | struct crypto_alg *alg = tfm->base.__crt_alg; | ||
513 | unsigned int cryptlen = req->cryptlen; | ||
514 | int ret; | ||
515 | |||
516 | crypto_stats_get(alg); | ||
517 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | ||
518 | ret = -ENOKEY; | ||
519 | else | ||
520 | ret = tfm->decrypt(req); | ||
521 | crypto_stats_skcipher_decrypt(cryptlen, ret, alg); | ||
522 | return ret; | ||
523 | } | ||
524 | 436 | ||
525 | /** | 437 | /** |
526 | * DOC: Symmetric Key Cipher Request Handle | 438 | * DOC: Symmetric Key Cipher Request Handle |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 9cf8f3ce0e50..19ea3a371d7b 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -49,7 +49,6 @@ | |||
49 | #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b | 49 | #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b |
50 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c | 50 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c |
51 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d | 51 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d |
52 | #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e | ||
53 | #define CRYPTO_ALG_TYPE_HASH 0x0000000e | 52 | #define CRYPTO_ALG_TYPE_HASH 0x0000000e |
54 | #define CRYPTO_ALG_TYPE_SHASH 0x0000000e | 53 | #define CRYPTO_ALG_TYPE_SHASH 0x0000000e |
55 | #define CRYPTO_ALG_TYPE_AHASH 0x0000000f | 54 | #define CRYPTO_ALG_TYPE_AHASH 0x0000000f |
@@ -323,6 +322,17 @@ struct cipher_alg { | |||
323 | void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | 322 | void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); |
324 | }; | 323 | }; |
325 | 324 | ||
325 | /** | ||
326 | * struct compress_alg - compression/decompression algorithm | ||
327 | * @coa_compress: Compress a buffer of specified length, storing the resulting | ||
328 | * data in the specified buffer. Return the length of the | ||
329 | * compressed data in dlen. | ||
330 | * @coa_decompress: Decompress the source buffer, storing the uncompressed | ||
331 | * data in the specified buffer. The length of the data is | ||
332 | * returned in dlen. | ||
333 | * | ||
334 | * All fields are mandatory. | ||
335 | */ | ||
326 | struct compress_alg { | 336 | struct compress_alg { |
327 | int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, | 337 | int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, |
328 | unsigned int slen, u8 *dst, unsigned int *dlen); | 338 | unsigned int slen, u8 *dst, unsigned int *dlen); |
diff --git a/lib/Makefile b/lib/Makefile index fb7697031a79..d3daedf93c5a 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -102,7 +102,7 @@ endif | |||
102 | obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o | 102 | obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o |
103 | CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) | 103 | CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) |
104 | 104 | ||
105 | obj-y += math/ | 105 | obj-y += math/ crypto/ |
106 | 106 | ||
107 | obj-$(CONFIG_GENERIC_IOMAP) += iomap.o | 107 | obj-$(CONFIG_GENERIC_IOMAP) += iomap.o |
108 | obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o | 108 | obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o |
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile new file mode 100644 index 000000000000..88195c34932d --- /dev/null +++ b/lib/crypto/Makefile | |||
@@ -0,0 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o | ||
4 | libarc4-y := arc4.o | ||
diff --git a/lib/crypto/arc4.c b/lib/crypto/arc4.c new file mode 100644 index 000000000000..c2020f19c652 --- /dev/null +++ b/lib/crypto/arc4.c | |||
@@ -0,0 +1,74 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Cryptographic API | ||
4 | * | ||
5 | * ARC4 Cipher Algorithm | ||
6 | * | ||
7 | * Jon Oberheide <jon@oberheide.org> | ||
8 | */ | ||
9 | |||
10 | #include <crypto/arc4.h> | ||
11 | #include <linux/module.h> | ||
12 | |||
13 | int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len) | ||
14 | { | ||
15 | int i, j = 0, k = 0; | ||
16 | |||
17 | ctx->x = 1; | ||
18 | ctx->y = 0; | ||
19 | |||
20 | for (i = 0; i < 256; i++) | ||
21 | ctx->S[i] = i; | ||
22 | |||
23 | for (i = 0; i < 256; i++) { | ||
24 | u32 a = ctx->S[i]; | ||
25 | |||
26 | j = (j + in_key[k] + a) & 0xff; | ||
27 | ctx->S[i] = ctx->S[j]; | ||
28 | ctx->S[j] = a; | ||
29 | if (++k >= key_len) | ||
30 | k = 0; | ||
31 | } | ||
32 | |||
33 | return 0; | ||
34 | } | ||
35 | EXPORT_SYMBOL(arc4_setkey); | ||
36 | |||
37 | void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len) | ||
38 | { | ||
39 | u32 *const S = ctx->S; | ||
40 | u32 x, y, a, b; | ||
41 | u32 ty, ta, tb; | ||
42 | |||
43 | if (len == 0) | ||
44 | return; | ||
45 | |||
46 | x = ctx->x; | ||
47 | y = ctx->y; | ||
48 | |||
49 | a = S[x]; | ||
50 | y = (y + a) & 0xff; | ||
51 | b = S[y]; | ||
52 | |||
53 | do { | ||
54 | S[y] = a; | ||
55 | a = (a + b) & 0xff; | ||
56 | S[x] = b; | ||
57 | x = (x + 1) & 0xff; | ||
58 | ta = S[x]; | ||
59 | ty = (y + ta) & 0xff; | ||
60 | tb = S[ty]; | ||
61 | *out++ = *in++ ^ S[a]; | ||
62 | if (--len == 0) | ||
63 | break; | ||
64 | y = ty; | ||
65 | a = ta; | ||
66 | b = tb; | ||
67 | } while (true); | ||
68 | |||
69 | ctx->x = x; | ||
70 | ctx->y = y; | ||
71 | } | ||
72 | EXPORT_SYMBOL(arc4_crypt); | ||
73 | |||
74 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 2882d9ba6607..eacb82468437 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -676,17 +676,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) | |||
676 | { | 676 | { |
677 | if (!miter->__remaining) { | 677 | if (!miter->__remaining) { |
678 | struct scatterlist *sg; | 678 | struct scatterlist *sg; |
679 | unsigned long pgoffset; | ||
680 | 679 | ||
681 | if (!__sg_page_iter_next(&miter->piter)) | 680 | if (!__sg_page_iter_next(&miter->piter)) |
682 | return false; | 681 | return false; |
683 | 682 | ||
684 | sg = miter->piter.sg; | 683 | sg = miter->piter.sg; |
685 | pgoffset = miter->piter.sg_pgoffset; | ||
686 | 684 | ||
687 | miter->__offset = pgoffset ? 0 : sg->offset; | 685 | miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset; |
686 | miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT; | ||
687 | miter->__offset &= PAGE_SIZE - 1; | ||
688 | miter->__remaining = sg->offset + sg->length - | 688 | miter->__remaining = sg->offset + sg->length - |
689 | (pgoffset << PAGE_SHIFT) - miter->__offset; | 689 | (miter->piter.sg_pgoffset << PAGE_SHIFT) - |
690 | miter->__offset; | ||
690 | miter->__remaining = min_t(unsigned long, miter->__remaining, | 691 | miter->__remaining = min_t(unsigned long, miter->__remaining, |
691 | PAGE_SIZE - miter->__offset); | 692 | PAGE_SIZE - miter->__offset); |
692 | } | 693 | } |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 0227cce9685e..0c93b1b7a826 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -3,7 +3,7 @@ config MAC80211 | |||
3 | tristate "Generic IEEE 802.11 Networking Stack (mac80211)" | 3 | tristate "Generic IEEE 802.11 Networking Stack (mac80211)" |
4 | depends on CFG80211 | 4 | depends on CFG80211 |
5 | select CRYPTO | 5 | select CRYPTO |
6 | select CRYPTO_ARC4 | 6 | select CRYPTO_LIB_ARC4 |
7 | select CRYPTO_AES | 7 | select CRYPTO_AES |
8 | select CRYPTO_CCM | 8 | select CRYPTO_CCM |
9 | select CRYPTO_GCM | 9 | select CRYPTO_GCM |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index a1973a26c7fc..3fae902937fd 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <net/net_namespace.h> | 15 | #include <net/net_namespace.h> |
16 | #include <linux/rcupdate.h> | 16 | #include <linux/rcupdate.h> |
17 | #include <linux/fips.h> | ||
17 | #include <linux/if_ether.h> | 18 | #include <linux/if_ether.h> |
18 | #include <net/cfg80211.h> | 19 | #include <net/cfg80211.h> |
19 | #include "ieee80211_i.h" | 20 | #include "ieee80211_i.h" |
@@ -402,9 +403,8 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
402 | case WLAN_CIPHER_SUITE_WEP40: | 403 | case WLAN_CIPHER_SUITE_WEP40: |
403 | case WLAN_CIPHER_SUITE_TKIP: | 404 | case WLAN_CIPHER_SUITE_TKIP: |
404 | case WLAN_CIPHER_SUITE_WEP104: | 405 | case WLAN_CIPHER_SUITE_WEP104: |
405 | if (IS_ERR(local->wep_tx_tfm)) | 406 | if (WARN_ON_ONCE(fips_enabled)) |
406 | return -EINVAL; | 407 | return -EINVAL; |
407 | break; | ||
408 | case WLAN_CIPHER_SUITE_CCMP: | 408 | case WLAN_CIPHER_SUITE_CCMP: |
409 | case WLAN_CIPHER_SUITE_CCMP_256: | 409 | case WLAN_CIPHER_SUITE_CCMP_256: |
410 | case WLAN_CIPHER_SUITE_AES_CMAC: | 410 | case WLAN_CIPHER_SUITE_AES_CMAC: |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6396d46a9a71..004e2e3adb88 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1255,8 +1255,8 @@ struct ieee80211_local { | |||
1255 | 1255 | ||
1256 | struct rate_control_ref *rate_ctrl; | 1256 | struct rate_control_ref *rate_ctrl; |
1257 | 1257 | ||
1258 | struct crypto_cipher *wep_tx_tfm; | 1258 | struct arc4_ctx wep_tx_ctx; |
1259 | struct crypto_cipher *wep_rx_tfm; | 1259 | struct arc4_ctx wep_rx_ctx; |
1260 | u32 wep_iv; | 1260 | u32 wep_iv; |
1261 | 1261 | ||
1262 | /* see iface.c */ | 1262 | /* see iface.c */ |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index be118c39433f..b8b9cd743bf4 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/crypto.h> | 12 | #include <linux/crypto.h> |
13 | #include <linux/rcupdate.h> | 13 | #include <linux/rcupdate.h> |
14 | #include <crypto/arc4.h> | ||
14 | #include <net/mac80211.h> | 15 | #include <net/mac80211.h> |
15 | 16 | ||
16 | #define NUM_DEFAULT_KEYS 4 | 17 | #define NUM_DEFAULT_KEYS 4 |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 55583b71ffaf..087c90e461b5 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <net/mac80211.h> | 11 | #include <net/mac80211.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/fips.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/netdevice.h> | 15 | #include <linux/netdevice.h> |
15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
@@ -730,8 +731,7 @@ EXPORT_SYMBOL(ieee80211_alloc_hw_nm); | |||
730 | 731 | ||
731 | static int ieee80211_init_cipher_suites(struct ieee80211_local *local) | 732 | static int ieee80211_init_cipher_suites(struct ieee80211_local *local) |
732 | { | 733 | { |
733 | bool have_wep = !(IS_ERR(local->wep_tx_tfm) || | 734 | bool have_wep = !fips_enabled; /* FIPS does not permit the use of RC4 */ |
734 | IS_ERR(local->wep_rx_tfm)); | ||
735 | bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE); | 735 | bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE); |
736 | int n_suites = 0, r = 0, w = 0; | 736 | int n_suites = 0, r = 0, w = 0; |
737 | u32 *suites; | 737 | u32 *suites; |
@@ -1298,7 +1298,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
1298 | fail_rate: | 1298 | fail_rate: |
1299 | rtnl_unlock(); | 1299 | rtnl_unlock(); |
1300 | ieee80211_led_exit(local); | 1300 | ieee80211_led_exit(local); |
1301 | ieee80211_wep_free(local); | ||
1302 | fail_flows: | 1301 | fail_flows: |
1303 | destroy_workqueue(local->workqueue); | 1302 | destroy_workqueue(local->workqueue); |
1304 | fail_workqueue: | 1303 | fail_workqueue: |
@@ -1355,7 +1354,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1355 | 1354 | ||
1356 | destroy_workqueue(local->workqueue); | 1355 | destroy_workqueue(local->workqueue); |
1357 | wiphy_unregister(local->hw.wiphy); | 1356 | wiphy_unregister(local->hw.wiphy); |
1358 | ieee80211_wep_free(local); | ||
1359 | ieee80211_led_exit(local); | 1357 | ieee80211_led_exit(local); |
1360 | kfree(local->int_scan_req); | 1358 | kfree(local->int_scan_req); |
1361 | } | 1359 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 379d2ab6d327..a00b703bfdfd 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/fips.h> | ||
15 | #include <linux/if_ether.h> | 16 | #include <linux/if_ether.h> |
16 | #include <linux/skbuff.h> | 17 | #include <linux/skbuff.h> |
17 | #include <linux/if_arp.h> | 18 | #include <linux/if_arp.h> |
@@ -5045,7 +5046,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | |||
5045 | auth_alg = WLAN_AUTH_OPEN; | 5046 | auth_alg = WLAN_AUTH_OPEN; |
5046 | break; | 5047 | break; |
5047 | case NL80211_AUTHTYPE_SHARED_KEY: | 5048 | case NL80211_AUTHTYPE_SHARED_KEY: |
5048 | if (IS_ERR(local->wep_tx_tfm)) | 5049 | if (fips_enabled) |
5049 | return -EOPNOTSUPP; | 5050 | return -EOPNOTSUPP; |
5050 | auth_alg = WLAN_AUTH_SHARED_KEY; | 5051 | auth_alg = WLAN_AUTH_SHARED_KEY; |
5051 | break; | 5052 | break; |
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c index 7914b8e3ce8c..727dc9f3f3b3 100644 --- a/net/mac80211/tkip.c +++ b/net/mac80211/tkip.c | |||
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(ieee80211_get_tkip_p2k); | |||
219 | * @payload_len is the length of payload (_not_ including IV/ICV length). | 219 | * @payload_len is the length of payload (_not_ including IV/ICV length). |
220 | * @ta is the transmitter addresses. | 220 | * @ta is the transmitter addresses. |
221 | */ | 221 | */ |
222 | int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, | 222 | int ieee80211_tkip_encrypt_data(struct arc4_ctx *ctx, |
223 | struct ieee80211_key *key, | 223 | struct ieee80211_key *key, |
224 | struct sk_buff *skb, | 224 | struct sk_buff *skb, |
225 | u8 *payload, size_t payload_len) | 225 | u8 *payload, size_t payload_len) |
@@ -228,7 +228,7 @@ int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, | |||
228 | 228 | ||
229 | ieee80211_get_tkip_p2k(&key->conf, skb, rc4key); | 229 | ieee80211_get_tkip_p2k(&key->conf, skb, rc4key); |
230 | 230 | ||
231 | return ieee80211_wep_encrypt_data(tfm, rc4key, 16, | 231 | return ieee80211_wep_encrypt_data(ctx, rc4key, 16, |
232 | payload, payload_len); | 232 | payload, payload_len); |
233 | } | 233 | } |
234 | 234 | ||
@@ -236,7 +236,7 @@ int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, | |||
236 | * beginning of the buffer containing IEEE 802.11 header payload, i.e., | 236 | * beginning of the buffer containing IEEE 802.11 header payload, i.e., |
237 | * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the | 237 | * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the |
238 | * length of payload, including IV, Ext. IV, MIC, ICV. */ | 238 | * length of payload, including IV, Ext. IV, MIC, ICV. */ |
239 | int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm, | 239 | int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx, |
240 | struct ieee80211_key *key, | 240 | struct ieee80211_key *key, |
241 | u8 *payload, size_t payload_len, u8 *ta, | 241 | u8 *payload, size_t payload_len, u8 *ta, |
242 | u8 *ra, int only_iv, int queue, | 242 | u8 *ra, int only_iv, int queue, |
@@ -294,7 +294,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm, | |||
294 | 294 | ||
295 | tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key); | 295 | tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key); |
296 | 296 | ||
297 | res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); | 297 | res = ieee80211_wep_decrypt_data(ctx, rc4key, 16, pos, payload_len - 12); |
298 | done: | 298 | done: |
299 | if (res == TKIP_DECRYPT_OK) { | 299 | if (res == TKIP_DECRYPT_OK) { |
300 | /* | 300 | /* |
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h index 676a7babdf5d..9d2f8bd36cc7 100644 --- a/net/mac80211/tkip.h +++ b/net/mac80211/tkip.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/crypto.h> | 10 | #include <linux/crypto.h> |
11 | #include "key.h" | 11 | #include "key.h" |
12 | 12 | ||
13 | int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, | 13 | int ieee80211_tkip_encrypt_data(struct arc4_ctx *ctx, |
14 | struct ieee80211_key *key, | 14 | struct ieee80211_key *key, |
15 | struct sk_buff *skb, | 15 | struct sk_buff *skb, |
16 | u8 *payload, size_t payload_len); | 16 | u8 *payload, size_t payload_len); |
@@ -21,7 +21,7 @@ enum { | |||
21 | TKIP_DECRYPT_INVALID_KEYIDX = -2, | 21 | TKIP_DECRYPT_INVALID_KEYIDX = -2, |
22 | TKIP_DECRYPT_REPLAY = -3, | 22 | TKIP_DECRYPT_REPLAY = -3, |
23 | }; | 23 | }; |
24 | int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm, | 24 | int ieee80211_tkip_decrypt_data(struct arc4_ctx *ctx, |
25 | struct ieee80211_key *key, | 25 | struct ieee80211_key *key, |
26 | u8 *payload, size_t payload_len, u8 *ta, | 26 | u8 *payload, size_t payload_len, u8 *ta, |
27 | u8 *ra, int only_iv, int queue, | 27 | u8 *ra, int only_iv, int queue, |
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 3d9e92867ef0..b75c2c54e665 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c | |||
@@ -27,30 +27,9 @@ int ieee80211_wep_init(struct ieee80211_local *local) | |||
27 | /* start WEP IV from a random value */ | 27 | /* start WEP IV from a random value */ |
28 | get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN); | 28 | get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN); |
29 | 29 | ||
30 | local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, 0); | ||
31 | if (IS_ERR(local->wep_tx_tfm)) { | ||
32 | local->wep_rx_tfm = ERR_PTR(-EINVAL); | ||
33 | return PTR_ERR(local->wep_tx_tfm); | ||
34 | } | ||
35 | |||
36 | local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, 0); | ||
37 | if (IS_ERR(local->wep_rx_tfm)) { | ||
38 | crypto_free_cipher(local->wep_tx_tfm); | ||
39 | local->wep_tx_tfm = ERR_PTR(-EINVAL); | ||
40 | return PTR_ERR(local->wep_rx_tfm); | ||
41 | } | ||
42 | |||
43 | return 0; | 30 | return 0; |
44 | } | 31 | } |
45 | 32 | ||
46 | void ieee80211_wep_free(struct ieee80211_local *local) | ||
47 | { | ||
48 | if (!IS_ERR(local->wep_tx_tfm)) | ||
49 | crypto_free_cipher(local->wep_tx_tfm); | ||
50 | if (!IS_ERR(local->wep_rx_tfm)) | ||
51 | crypto_free_cipher(local->wep_rx_tfm); | ||
52 | } | ||
53 | |||
54 | static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) | 33 | static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) |
55 | { | 34 | { |
56 | /* | 35 | /* |
@@ -128,21 +107,17 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local, | |||
128 | /* Perform WEP encryption using given key. data buffer must have tailroom | 107 | /* Perform WEP encryption using given key. data buffer must have tailroom |
129 | * for 4-byte ICV. data_len must not include this ICV. Note: this function | 108 | * for 4-byte ICV. data_len must not include this ICV. Note: this function |
130 | * does _not_ add IV. data = RC4(data | CRC32(data)) */ | 109 | * does _not_ add IV. data = RC4(data | CRC32(data)) */ |
131 | int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key, | 110 | int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key, |
132 | size_t klen, u8 *data, size_t data_len) | 111 | size_t klen, u8 *data, size_t data_len) |
133 | { | 112 | { |
134 | __le32 icv; | 113 | __le32 icv; |
135 | int i; | ||
136 | |||
137 | if (IS_ERR(tfm)) | ||
138 | return -1; | ||
139 | 114 | ||
140 | icv = cpu_to_le32(~crc32_le(~0, data, data_len)); | 115 | icv = cpu_to_le32(~crc32_le(~0, data, data_len)); |
141 | put_unaligned(icv, (__le32 *)(data + data_len)); | 116 | put_unaligned(icv, (__le32 *)(data + data_len)); |
142 | 117 | ||
143 | crypto_cipher_setkey(tfm, rc4key, klen); | 118 | arc4_setkey(ctx, rc4key, klen); |
144 | for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) | 119 | arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN); |
145 | crypto_cipher_encrypt_one(tfm, data + i, data + i); | 120 | memzero_explicit(ctx, sizeof(*ctx)); |
146 | 121 | ||
147 | return 0; | 122 | return 0; |
148 | } | 123 | } |
@@ -181,7 +156,7 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, | |||
181 | /* Add room for ICV */ | 156 | /* Add room for ICV */ |
182 | skb_put(skb, IEEE80211_WEP_ICV_LEN); | 157 | skb_put(skb, IEEE80211_WEP_ICV_LEN); |
183 | 158 | ||
184 | return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, | 159 | return ieee80211_wep_encrypt_data(&local->wep_tx_ctx, rc4key, keylen + 3, |
185 | iv + IEEE80211_WEP_IV_LEN, len); | 160 | iv + IEEE80211_WEP_IV_LEN, len); |
186 | } | 161 | } |
187 | 162 | ||
@@ -189,18 +164,14 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, | |||
189 | /* Perform WEP decryption using given key. data buffer includes encrypted | 164 | /* Perform WEP decryption using given key. data buffer includes encrypted |
190 | * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. | 165 | * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. |
191 | * Return 0 on success and -1 on ICV mismatch. */ | 166 | * Return 0 on success and -1 on ICV mismatch. */ |
192 | int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, | 167 | int ieee80211_wep_decrypt_data(struct arc4_ctx *ctx, u8 *rc4key, |
193 | size_t klen, u8 *data, size_t data_len) | 168 | size_t klen, u8 *data, size_t data_len) |
194 | { | 169 | { |
195 | __le32 crc; | 170 | __le32 crc; |
196 | int i; | ||
197 | |||
198 | if (IS_ERR(tfm)) | ||
199 | return -1; | ||
200 | 171 | ||
201 | crypto_cipher_setkey(tfm, rc4key, klen); | 172 | arc4_setkey(ctx, rc4key, klen); |
202 | for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++) | 173 | arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN); |
203 | crypto_cipher_decrypt_one(tfm, data + i, data + i); | 174 | memzero_explicit(ctx, sizeof(*ctx)); |
204 | 175 | ||
205 | crc = cpu_to_le32(~crc32_le(~0, data, data_len)); | 176 | crc = cpu_to_le32(~crc32_le(~0, data, data_len)); |
206 | if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0) | 177 | if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0) |
@@ -253,7 +224,7 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local, | |||
253 | /* Copy rest of the WEP key (the secret part) */ | 224 | /* Copy rest of the WEP key (the secret part) */ |
254 | memcpy(rc4key + 3, key->conf.key, key->conf.keylen); | 225 | memcpy(rc4key + 3, key->conf.key, key->conf.keylen); |
255 | 226 | ||
256 | if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, | 227 | if (ieee80211_wep_decrypt_data(&local->wep_rx_ctx, rc4key, klen, |
257 | skb->data + hdrlen + | 228 | skb->data + hdrlen + |
258 | IEEE80211_WEP_IV_LEN, len)) | 229 | IEEE80211_WEP_IV_LEN, len)) |
259 | ret = -1; | 230 | ret = -1; |
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h index 866a6798c9ef..997a034233c2 100644 --- a/net/mac80211/wep.h +++ b/net/mac80211/wep.h | |||
@@ -14,13 +14,12 @@ | |||
14 | #include "key.h" | 14 | #include "key.h" |
15 | 15 | ||
16 | int ieee80211_wep_init(struct ieee80211_local *local); | 16 | int ieee80211_wep_init(struct ieee80211_local *local); |
17 | void ieee80211_wep_free(struct ieee80211_local *local); | 17 | int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key, |
18 | int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key, | ||
19 | size_t klen, u8 *data, size_t data_len); | 18 | size_t klen, u8 *data, size_t data_len); |
20 | int ieee80211_wep_encrypt(struct ieee80211_local *local, | 19 | int ieee80211_wep_encrypt(struct ieee80211_local *local, |
21 | struct sk_buff *skb, | 20 | struct sk_buff *skb, |
22 | const u8 *key, int keylen, int keyidx); | 21 | const u8 *key, int keylen, int keyidx); |
23 | int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key, | 22 | int ieee80211_wep_decrypt_data(struct arc4_ctx *ctx, u8 *rc4key, |
24 | size_t klen, u8 *data, size_t data_len); | 23 | size_t klen, u8 *data, size_t data_len); |
25 | 24 | ||
26 | ieee80211_rx_result | 25 | ieee80211_rx_result |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index a51c7909366e..ee72779729e5 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -239,7 +239,7 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) | |||
239 | /* Add room for ICV */ | 239 | /* Add room for ICV */ |
240 | skb_put(skb, IEEE80211_TKIP_ICV_LEN); | 240 | skb_put(skb, IEEE80211_TKIP_ICV_LEN); |
241 | 241 | ||
242 | return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, | 242 | return ieee80211_tkip_encrypt_data(&tx->local->wep_tx_ctx, |
243 | key, skb, pos, len); | 243 | key, skb, pos, len); |
244 | } | 244 | } |
245 | 245 | ||
@@ -290,7 +290,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) | |||
290 | if (status->flag & RX_FLAG_DECRYPTED) | 290 | if (status->flag & RX_FLAG_DECRYPTED) |
291 | hwaccel = 1; | 291 | hwaccel = 1; |
292 | 292 | ||
293 | res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, | 293 | res = ieee80211_tkip_decrypt_data(&rx->local->wep_rx_ctx, |
294 | key, skb->data + hdrlen, | 294 | key, skb->data + hdrlen, |
295 | skb->len - hdrlen, rx->sta->sta.addr, | 295 | skb->len - hdrlen, rx->sta->sta.addr, |
296 | hdr->addr1, hwaccel, rx->security_idx, | 296 | hdr->addr1, hwaccel, rx->security_idx, |
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index 6310ddede220..578cce4fbe6c 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig | |||
@@ -213,12 +213,14 @@ config LIB80211 | |||
213 | 213 | ||
214 | config LIB80211_CRYPT_WEP | 214 | config LIB80211_CRYPT_WEP |
215 | tristate | 215 | tristate |
216 | select CRYPTO_LIB_ARC4 | ||
216 | 217 | ||
217 | config LIB80211_CRYPT_CCMP | 218 | config LIB80211_CRYPT_CCMP |
218 | tristate | 219 | tristate |
219 | 220 | ||
220 | config LIB80211_CRYPT_TKIP | 221 | config LIB80211_CRYPT_TKIP |
221 | tristate | 222 | tristate |
223 | select CRYPTO_LIB_ARC4 | ||
222 | 224 | ||
223 | config LIB80211_DEBUG | 225 | config LIB80211_DEBUG |
224 | bool "lib80211 debugging messages" | 226 | bool "lib80211 debugging messages" |
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c index 62edf5b01953..f5e842ba7673 100644 --- a/net/wireless/lib80211_crypt_tkip.c +++ b/net/wireless/lib80211_crypt_tkip.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | 10 | ||
11 | #include <linux/err.h> | 11 | #include <linux/err.h> |
12 | #include <linux/fips.h> | ||
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
@@ -25,6 +26,7 @@ | |||
25 | #include <linux/ieee80211.h> | 26 | #include <linux/ieee80211.h> |
26 | #include <net/iw_handler.h> | 27 | #include <net/iw_handler.h> |
27 | 28 | ||
29 | #include <crypto/arc4.h> | ||
28 | #include <crypto/hash.h> | 30 | #include <crypto/hash.h> |
29 | #include <linux/crypto.h> | 31 | #include <linux/crypto.h> |
30 | #include <linux/crc32.h> | 32 | #include <linux/crc32.h> |
@@ -60,9 +62,9 @@ struct lib80211_tkip_data { | |||
60 | 62 | ||
61 | int key_idx; | 63 | int key_idx; |
62 | 64 | ||
63 | struct crypto_cipher *rx_tfm_arc4; | 65 | struct arc4_ctx rx_ctx_arc4; |
66 | struct arc4_ctx tx_ctx_arc4; | ||
64 | struct crypto_shash *rx_tfm_michael; | 67 | struct crypto_shash *rx_tfm_michael; |
65 | struct crypto_cipher *tx_tfm_arc4; | ||
66 | struct crypto_shash *tx_tfm_michael; | 68 | struct crypto_shash *tx_tfm_michael; |
67 | 69 | ||
68 | /* scratch buffers for virt_to_page() (crypto API) */ | 70 | /* scratch buffers for virt_to_page() (crypto API) */ |
@@ -89,30 +91,21 @@ static void *lib80211_tkip_init(int key_idx) | |||
89 | { | 91 | { |
90 | struct lib80211_tkip_data *priv; | 92 | struct lib80211_tkip_data *priv; |
91 | 93 | ||
94 | if (fips_enabled) | ||
95 | return NULL; | ||
96 | |||
92 | priv = kzalloc(sizeof(*priv), GFP_ATOMIC); | 97 | priv = kzalloc(sizeof(*priv), GFP_ATOMIC); |
93 | if (priv == NULL) | 98 | if (priv == NULL) |
94 | goto fail; | 99 | goto fail; |
95 | 100 | ||
96 | priv->key_idx = key_idx; | 101 | priv->key_idx = key_idx; |
97 | 102 | ||
98 | priv->tx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0); | ||
99 | if (IS_ERR(priv->tx_tfm_arc4)) { | ||
100 | priv->tx_tfm_arc4 = NULL; | ||
101 | goto fail; | ||
102 | } | ||
103 | |||
104 | priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); | 103 | priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); |
105 | if (IS_ERR(priv->tx_tfm_michael)) { | 104 | if (IS_ERR(priv->tx_tfm_michael)) { |
106 | priv->tx_tfm_michael = NULL; | 105 | priv->tx_tfm_michael = NULL; |
107 | goto fail; | 106 | goto fail; |
108 | } | 107 | } |
109 | 108 | ||
110 | priv->rx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0); | ||
111 | if (IS_ERR(priv->rx_tfm_arc4)) { | ||
112 | priv->rx_tfm_arc4 = NULL; | ||
113 | goto fail; | ||
114 | } | ||
115 | |||
116 | priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); | 109 | priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); |
117 | if (IS_ERR(priv->rx_tfm_michael)) { | 110 | if (IS_ERR(priv->rx_tfm_michael)) { |
118 | priv->rx_tfm_michael = NULL; | 111 | priv->rx_tfm_michael = NULL; |
@@ -124,9 +117,7 @@ static void *lib80211_tkip_init(int key_idx) | |||
124 | fail: | 117 | fail: |
125 | if (priv) { | 118 | if (priv) { |
126 | crypto_free_shash(priv->tx_tfm_michael); | 119 | crypto_free_shash(priv->tx_tfm_michael); |
127 | crypto_free_cipher(priv->tx_tfm_arc4); | ||
128 | crypto_free_shash(priv->rx_tfm_michael); | 120 | crypto_free_shash(priv->rx_tfm_michael); |
129 | crypto_free_cipher(priv->rx_tfm_arc4); | ||
130 | kfree(priv); | 121 | kfree(priv); |
131 | } | 122 | } |
132 | 123 | ||
@@ -138,11 +129,9 @@ static void lib80211_tkip_deinit(void *priv) | |||
138 | struct lib80211_tkip_data *_priv = priv; | 129 | struct lib80211_tkip_data *_priv = priv; |
139 | if (_priv) { | 130 | if (_priv) { |
140 | crypto_free_shash(_priv->tx_tfm_michael); | 131 | crypto_free_shash(_priv->tx_tfm_michael); |
141 | crypto_free_cipher(_priv->tx_tfm_arc4); | ||
142 | crypto_free_shash(_priv->rx_tfm_michael); | 132 | crypto_free_shash(_priv->rx_tfm_michael); |
143 | crypto_free_cipher(_priv->rx_tfm_arc4); | ||
144 | } | 133 | } |
145 | kfree(priv); | 134 | kzfree(priv); |
146 | } | 135 | } |
147 | 136 | ||
148 | static inline u16 RotR1(u16 val) | 137 | static inline u16 RotR1(u16 val) |
@@ -341,7 +330,6 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
341 | int len; | 330 | int len; |
342 | u8 rc4key[16], *pos, *icv; | 331 | u8 rc4key[16], *pos, *icv; |
343 | u32 crc; | 332 | u32 crc; |
344 | int i; | ||
345 | 333 | ||
346 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { | 334 | if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { |
347 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 335 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
@@ -366,9 +354,9 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
366 | icv[2] = crc >> 16; | 354 | icv[2] = crc >> 16; |
367 | icv[3] = crc >> 24; | 355 | icv[3] = crc >> 24; |
368 | 356 | ||
369 | crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); | 357 | arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16); |
370 | for (i = 0; i < len + 4; i++) | 358 | arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4); |
371 | crypto_cipher_encrypt_one(tkey->tx_tfm_arc4, pos + i, pos + i); | 359 | |
372 | return 0; | 360 | return 0; |
373 | } | 361 | } |
374 | 362 | ||
@@ -396,7 +384,6 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
396 | u8 icv[4]; | 384 | u8 icv[4]; |
397 | u32 crc; | 385 | u32 crc; |
398 | int plen; | 386 | int plen; |
399 | int i; | ||
400 | 387 | ||
401 | hdr = (struct ieee80211_hdr *)skb->data; | 388 | hdr = (struct ieee80211_hdr *)skb->data; |
402 | 389 | ||
@@ -449,9 +436,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
449 | 436 | ||
450 | plen = skb->len - hdr_len - 12; | 437 | plen = skb->len - hdr_len - 12; |
451 | 438 | ||
452 | crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); | 439 | arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16); |
453 | for (i = 0; i < plen + 4; i++) | 440 | arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4); |
454 | crypto_cipher_decrypt_one(tkey->rx_tfm_arc4, pos + i, pos + i); | ||
455 | 441 | ||
456 | crc = ~crc32_le(~0, pos, plen); | 442 | crc = ~crc32_le(~0, pos, plen); |
457 | icv[0] = crc; | 443 | icv[0] = crc; |
@@ -636,17 +622,17 @@ static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv) | |||
636 | struct lib80211_tkip_data *tkey = priv; | 622 | struct lib80211_tkip_data *tkey = priv; |
637 | int keyidx; | 623 | int keyidx; |
638 | struct crypto_shash *tfm = tkey->tx_tfm_michael; | 624 | struct crypto_shash *tfm = tkey->tx_tfm_michael; |
639 | struct crypto_cipher *tfm2 = tkey->tx_tfm_arc4; | 625 | struct arc4_ctx *tfm2 = &tkey->tx_ctx_arc4; |
640 | struct crypto_shash *tfm3 = tkey->rx_tfm_michael; | 626 | struct crypto_shash *tfm3 = tkey->rx_tfm_michael; |
641 | struct crypto_cipher *tfm4 = tkey->rx_tfm_arc4; | 627 | struct arc4_ctx *tfm4 = &tkey->rx_ctx_arc4; |
642 | 628 | ||
643 | keyidx = tkey->key_idx; | 629 | keyidx = tkey->key_idx; |
644 | memset(tkey, 0, sizeof(*tkey)); | 630 | memset(tkey, 0, sizeof(*tkey)); |
645 | tkey->key_idx = keyidx; | 631 | tkey->key_idx = keyidx; |
646 | tkey->tx_tfm_michael = tfm; | 632 | tkey->tx_tfm_michael = tfm; |
647 | tkey->tx_tfm_arc4 = tfm2; | 633 | tkey->tx_ctx_arc4 = *tfm2; |
648 | tkey->rx_tfm_michael = tfm3; | 634 | tkey->rx_tfm_michael = tfm3; |
649 | tkey->rx_tfm_arc4 = tfm4; | 635 | tkey->rx_ctx_arc4 = *tfm4; |
650 | if (len == TKIP_KEY_LEN) { | 636 | if (len == TKIP_KEY_LEN) { |
651 | memcpy(tkey->key, key, TKIP_KEY_LEN); | 637 | memcpy(tkey->key, key, TKIP_KEY_LEN); |
652 | tkey->key_set = 1; | 638 | tkey->key_set = 1; |
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c index e127b6f7fc9f..dafc6f3571db 100644 --- a/net/wireless/lib80211_crypt_wep.c +++ b/net/wireless/lib80211_crypt_wep.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/err.h> | 9 | #include <linux/err.h> |
10 | #include <linux/fips.h> | ||
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/init.h> | 12 | #include <linux/init.h> |
12 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
@@ -18,7 +19,7 @@ | |||
18 | 19 | ||
19 | #include <net/lib80211.h> | 20 | #include <net/lib80211.h> |
20 | 21 | ||
21 | #include <linux/crypto.h> | 22 | #include <crypto/arc4.h> |
22 | #include <linux/crc32.h> | 23 | #include <linux/crc32.h> |
23 | 24 | ||
24 | MODULE_AUTHOR("Jouni Malinen"); | 25 | MODULE_AUTHOR("Jouni Malinen"); |
@@ -31,52 +32,31 @@ struct lib80211_wep_data { | |||
31 | u8 key[WEP_KEY_LEN + 1]; | 32 | u8 key[WEP_KEY_LEN + 1]; |
32 | u8 key_len; | 33 | u8 key_len; |
33 | u8 key_idx; | 34 | u8 key_idx; |
34 | struct crypto_cipher *tx_tfm; | 35 | struct arc4_ctx tx_ctx; |
35 | struct crypto_cipher *rx_tfm; | 36 | struct arc4_ctx rx_ctx; |
36 | }; | 37 | }; |
37 | 38 | ||
38 | static void *lib80211_wep_init(int keyidx) | 39 | static void *lib80211_wep_init(int keyidx) |
39 | { | 40 | { |
40 | struct lib80211_wep_data *priv; | 41 | struct lib80211_wep_data *priv; |
41 | 42 | ||
43 | if (fips_enabled) | ||
44 | return NULL; | ||
45 | |||
42 | priv = kzalloc(sizeof(*priv), GFP_ATOMIC); | 46 | priv = kzalloc(sizeof(*priv), GFP_ATOMIC); |
43 | if (priv == NULL) | 47 | if (priv == NULL) |
44 | goto fail; | 48 | return NULL; |
45 | priv->key_idx = keyidx; | 49 | priv->key_idx = keyidx; |
46 | 50 | ||
47 | priv->tx_tfm = crypto_alloc_cipher("arc4", 0, 0); | ||
48 | if (IS_ERR(priv->tx_tfm)) { | ||
49 | priv->tx_tfm = NULL; | ||
50 | goto fail; | ||
51 | } | ||
52 | |||
53 | priv->rx_tfm = crypto_alloc_cipher("arc4", 0, 0); | ||
54 | if (IS_ERR(priv->rx_tfm)) { | ||
55 | priv->rx_tfm = NULL; | ||
56 | goto fail; | ||
57 | } | ||
58 | /* start WEP IV from a random value */ | 51 | /* start WEP IV from a random value */ |
59 | get_random_bytes(&priv->iv, 4); | 52 | get_random_bytes(&priv->iv, 4); |
60 | 53 | ||
61 | return priv; | 54 | return priv; |
62 | |||
63 | fail: | ||
64 | if (priv) { | ||
65 | crypto_free_cipher(priv->tx_tfm); | ||
66 | crypto_free_cipher(priv->rx_tfm); | ||
67 | kfree(priv); | ||
68 | } | ||
69 | return NULL; | ||
70 | } | 55 | } |
71 | 56 | ||
72 | static void lib80211_wep_deinit(void *priv) | 57 | static void lib80211_wep_deinit(void *priv) |
73 | { | 58 | { |
74 | struct lib80211_wep_data *_priv = priv; | 59 | kzfree(priv); |
75 | if (_priv) { | ||
76 | crypto_free_cipher(_priv->tx_tfm); | ||
77 | crypto_free_cipher(_priv->rx_tfm); | ||
78 | } | ||
79 | kfree(priv); | ||
80 | } | 60 | } |
81 | 61 | ||
82 | /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ | 62 | /* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */ |
@@ -128,7 +108,6 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
128 | u32 crc, klen, len; | 108 | u32 crc, klen, len; |
129 | u8 *pos, *icv; | 109 | u8 *pos, *icv; |
130 | u8 key[WEP_KEY_LEN + 3]; | 110 | u8 key[WEP_KEY_LEN + 3]; |
131 | int i; | ||
132 | 111 | ||
133 | /* other checks are in lib80211_wep_build_iv */ | 112 | /* other checks are in lib80211_wep_build_iv */ |
134 | if (skb_tailroom(skb) < 4) | 113 | if (skb_tailroom(skb) < 4) |
@@ -156,10 +135,8 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
156 | icv[2] = crc >> 16; | 135 | icv[2] = crc >> 16; |
157 | icv[3] = crc >> 24; | 136 | icv[3] = crc >> 24; |
158 | 137 | ||
159 | crypto_cipher_setkey(wep->tx_tfm, key, klen); | 138 | arc4_setkey(&wep->tx_ctx, key, klen); |
160 | 139 | arc4_crypt(&wep->tx_ctx, pos, pos, len + 4); | |
161 | for (i = 0; i < len + 4; i++) | ||
162 | crypto_cipher_encrypt_one(wep->tx_tfm, pos + i, pos + i); | ||
163 | 140 | ||
164 | return 0; | 141 | return 0; |
165 | } | 142 | } |
@@ -177,7 +154,6 @@ static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
177 | u32 crc, klen, plen; | 154 | u32 crc, klen, plen; |
178 | u8 key[WEP_KEY_LEN + 3]; | 155 | u8 key[WEP_KEY_LEN + 3]; |
179 | u8 keyidx, *pos, icv[4]; | 156 | u8 keyidx, *pos, icv[4]; |
180 | int i; | ||
181 | 157 | ||
182 | if (skb->len < hdr_len + 8) | 158 | if (skb->len < hdr_len + 8) |
183 | return -1; | 159 | return -1; |
@@ -198,9 +174,8 @@ static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) | |||
198 | /* Apply RC4 to data and compute CRC32 over decrypted data */ | 174 | /* Apply RC4 to data and compute CRC32 over decrypted data */ |
199 | plen = skb->len - hdr_len - 8; | 175 | plen = skb->len - hdr_len - 8; |
200 | 176 | ||
201 | crypto_cipher_setkey(wep->rx_tfm, key, klen); | 177 | arc4_setkey(&wep->rx_ctx, key, klen); |
202 | for (i = 0; i < plen + 4; i++) | 178 | arc4_crypt(&wep->rx_ctx, pos, pos, plen + 4); |
203 | crypto_cipher_decrypt_one(wep->rx_tfm, pos + i, pos + i); | ||
204 | 179 | ||
205 | crc = ~crc32_le(~0, pos, plen); | 180 | crc = ~crc32_le(~0, pos, plen); |
206 | icv[0] = crc; | 181 | icv[0] = crc; |