diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-05 15:22:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-05 15:22:23 -0400 |
commit | 8ad06e56dcbc1984ef0ff8f6e3c19982c5809f73 (patch) | |
tree | 74bc746a4f18eeddfc085b76c776ddcf2c798cfa | |
parent | 59005b0c59a164101b0273e4bda212c809dc2246 (diff) | |
parent | 035f901eac4d2d0fd40f3055026355d55d46949f (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Algorithms:
- add private key generation to ecdh
Drivers:
- add generic gcm(aes) to aesni-intel
- add SafeXcel EIP197 crypto engine driver
- add ecb(aes), cfb(aes) and ecb(des3_ede) to cavium
- add support for CNN55XX adapters in cavium
- add ctr mode to chcr
- add support for gcm(aes) to omap"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (140 commits)
crypto: testmgr - Reenable sha1/aes in FIPS mode
crypto: ccp - Release locks before returning
crypto: cavium/nitrox - dma_mapping_error() returns bool
crypto: doc - fix typo in docs
Documentation/bindings: Document the SafeXel cryptographic engine driver
crypto: caam - fix gfp allocation flags (part II)
crypto: caam - fix gfp allocation flags (part I)
crypto: drbg - Fixes panic in wait_for_completion call
crypto: caam - make of_device_ids const.
crypto: vmx - remove unnecessary check
crypto: n2 - make of_device_ids const
crypto: inside-secure - use the base_end pointer in ring rollback
crypto: inside-secure - increase the batch size
crypto: inside-secure - only dequeue when needed
crypto: inside-secure - get the backlog before dequeueing the request
crypto: inside-secure - stop requeueing failed requests
crypto: inside-secure - use one queue per hw ring
crypto: inside-secure - update the context and request later
crypto: inside-secure - align the cipher and hash send functions
crypto: inside-secure - optimize DSE bufferability control
...
127 files changed, 12841 insertions, 1535 deletions
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst index d021fd96a76d..2531948db89f 100644 --- a/Documentation/crypto/api-samples.rst +++ b/Documentation/crypto/api-samples.rst | |||
@@ -155,9 +155,9 @@ Code Example For Use of Operational State Memory With SHASH | |||
155 | char ctx[]; | 155 | char ctx[]; |
156 | }; | 156 | }; |
157 | 157 | ||
158 | static struct sdesc init_sdesc(struct crypto_shash *alg) | 158 | static struct sdesc *init_sdesc(struct crypto_shash *alg) |
159 | { | 159 | { |
160 | struct sdesc sdesc; | 160 | struct sdesc *sdesc; |
161 | int size; | 161 | int size; |
162 | 162 | ||
163 | size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); | 163 | size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); |
@@ -169,15 +169,16 @@ Code Example For Use of Operational State Memory With SHASH | |||
169 | return sdesc; | 169 | return sdesc; |
170 | } | 170 | } |
171 | 171 | ||
172 | static int calc_hash(struct crypto_shashalg, | 172 | static int calc_hash(struct crypto_shash *alg, |
173 | const unsigned chardata, unsigned int datalen, | 173 | const unsigned char *data, unsigned int datalen, |
174 | unsigned chardigest) { | 174 | unsigned char *digest) |
175 | struct sdesc sdesc; | 175 | { |
176 | struct sdesc *sdesc; | ||
176 | int ret; | 177 | int ret; |
177 | 178 | ||
178 | sdesc = init_sdesc(alg); | 179 | sdesc = init_sdesc(alg); |
179 | if (IS_ERR(sdesc)) { | 180 | if (IS_ERR(sdesc)) { |
180 | pr_info("trusted_key: can't alloc %s\n", hash_alg); | 181 | pr_info("can't alloc sdesc\n"); |
181 | return PTR_ERR(sdesc); | 182 | return PTR_ERR(sdesc); |
182 | } | 183 | } |
183 | 184 | ||
@@ -186,6 +187,23 @@ Code Example For Use of Operational State Memory With SHASH | |||
186 | return ret; | 187 | return ret; |
187 | } | 188 | } |
188 | 189 | ||
190 | static int test_hash(const unsigned char *data, unsigned int datalen, | ||
191 | unsigned char *digest) | ||
192 | { | ||
193 | struct crypto_shash *alg; | ||
194 | char *hash_alg_name = "sha1-padlock-nano"; | ||
195 | int ret; | ||
196 | |||
197 | alg = crypto_alloc_shash(hash_alg_name, CRYPTO_ALG_TYPE_SHASH, 0); | ||
198 | if (IS_ERR(alg)) { | ||
199 | pr_info("can't alloc alg %s\n", hash_alg_name); | ||
200 | return PTR_ERR(alg); | ||
201 | } | ||
202 | ret = calc_hash(alg, data, datalen, digest); | ||
203 | crypto_free_shash(alg); | ||
204 | return ret; | ||
205 | } | ||
206 | |||
189 | 207 | ||
190 | Code Example For Random Number Generator Usage | 208 | Code Example For Random Number Generator Usage |
191 | ---------------------------------------------- | 209 | ---------------------------------------------- |
@@ -195,8 +213,8 @@ Code Example For Random Number Generator Usage | |||
195 | 213 | ||
196 | static int get_random_numbers(u8 *buf, unsigned int len) | 214 | static int get_random_numbers(u8 *buf, unsigned int len) |
197 | { | 215 | { |
198 | struct crypto_rngrng = NULL; | 216 | struct crypto_rng *rng = NULL; |
199 | chardrbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */ | 217 | char *drbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */ |
200 | int ret; | 218 | int ret; |
201 | 219 | ||
202 | if (!buf || !len) { | 220 | if (!buf || !len) { |
@@ -207,7 +225,7 @@ Code Example For Random Number Generator Usage | |||
207 | rng = crypto_alloc_rng(drbg, 0, 0); | 225 | rng = crypto_alloc_rng(drbg, 0, 0); |
208 | if (IS_ERR(rng)) { | 226 | if (IS_ERR(rng)) { |
209 | pr_debug("could not allocate RNG handle for %s\n", drbg); | 227 | pr_debug("could not allocate RNG handle for %s\n", drbg); |
210 | return -PTR_ERR(rng); | 228 | return PTR_ERR(rng); |
211 | } | 229 | } |
212 | 230 | ||
213 | ret = crypto_rng_get_bytes(rng, buf, len); | 231 | ret = crypto_rng_get_bytes(rng, buf, len); |
diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst index de5a72e32bc9..ff86befa61e0 100644 --- a/Documentation/crypto/userspace-if.rst +++ b/Documentation/crypto/userspace-if.rst | |||
@@ -327,7 +327,7 @@ boundary. Non-aligned data can be used as well, but may require more | |||
327 | operations of the kernel which would defeat the speed gains obtained | 327 | operations of the kernel which would defeat the speed gains obtained |
328 | from the zero-copy interface. | 328 | from the zero-copy interface. |
329 | 329 | ||
330 | The system-interent limit for the size of one zero-copy operation is 16 | 330 | The system-inherent limit for the size of one zero-copy operation is 16 |
331 | pages. If more data is to be sent to AF_ALG, user space must slice the | 331 | pages. If more data is to be sent to AF_ALG, user space must slice the |
332 | input into segments with a maximum size of 16 pages. | 332 | input into segments with a maximum size of 16 pages. |
333 | 333 | ||
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt new file mode 100644 index 000000000000..f69773f4252b --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt | |||
@@ -0,0 +1,29 @@ | |||
1 | Inside Secure SafeXcel cryptographic engine | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should be "inside-secure,safexcel-eip197". | ||
5 | - reg: Base physical address of the engine and length of memory mapped region. | ||
6 | - interrupts: Interrupt numbers for the rings and engine. | ||
7 | - interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem". | ||
8 | |||
9 | Optional properties: | ||
10 | - clocks: Reference to the crypto engine clock. | ||
11 | - dma-mask: The address mask limitation. Defaults to 64. | ||
12 | |||
13 | Example: | ||
14 | |||
15 | crypto: crypto@800000 { | ||
16 | compatible = "inside-secure,safexcel-eip197"; | ||
17 | reg = <0x800000 0x200000>; | ||
18 | interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, | ||
19 | <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, | ||
20 | <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, | ||
21 | <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>, | ||
22 | <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>, | ||
23 | <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>; | ||
24 | interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3", | ||
25 | "eip"; | ||
26 | clocks = <&cpm_syscon0 1 26>; | ||
27 | dma-mask = <0xff 0xffffffff>; | ||
28 | status = "disabled"; | ||
29 | }; | ||
diff --git a/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt b/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt index c204725e5873..450da3661cad 100644 --- a/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt +++ b/Documentation/devicetree/bindings/crypto/mediatek-crypto.txt | |||
@@ -6,8 +6,7 @@ Required properties: | |||
6 | - interrupts: Should contain the five crypto engines interrupts in numeric | 6 | - interrupts: Should contain the five crypto engines interrupts in numeric |
7 | order. These are global system and four descriptor rings. | 7 | order. These are global system and four descriptor rings. |
8 | - clocks: the clock used by the core | 8 | - clocks: the clock used by the core |
9 | - clock-names: the names of the clock listed in the clocks property. These are | 9 | - clock-names: Must contain "cryp". |
10 | "ethif", "cryp" | ||
11 | - power-domains: Must contain a reference to the PM domain. | 10 | - power-domains: Must contain a reference to the PM domain. |
12 | 11 | ||
13 | 12 | ||
@@ -20,8 +19,7 @@ Example: | |||
20 | <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>, | 19 | <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>, |
21 | <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>, | 20 | <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>, |
22 | <GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>; | 21 | <GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>; |
23 | clocks = <&topckgen CLK_TOP_ETHIF_SEL>, | 22 | clocks = <ðsys CLK_ETHSYS_CRYPTO>; |
24 | <ðsys CLK_ETHSYS_CRYPTO>; | 23 | clock-names = "cryp"; |
25 | clock-names = "ethif","cryp"; | ||
26 | power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; | 24 | power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; |
27 | }; | 25 | }; |
diff --git a/Documentation/devicetree/bindings/rng/mtk-rng.txt b/Documentation/devicetree/bindings/rng/mtk-rng.txt index a6d62a2abd39..366b99bff8cd 100644 --- a/Documentation/devicetree/bindings/rng/mtk-rng.txt +++ b/Documentation/devicetree/bindings/rng/mtk-rng.txt | |||
@@ -2,7 +2,9 @@ Device-Tree bindings for Mediatek random number generator | |||
2 | found in Mediatek SoC family | 2 | found in Mediatek SoC family |
3 | 3 | ||
4 | Required properties: | 4 | Required properties: |
5 | - compatible : Should be "mediatek,mt7623-rng" | 5 | - compatible : Should be |
6 | "mediatek,mt7622-rng", "mediatek,mt7623-rng" : for MT7622 | ||
7 | "mediatek,mt7623-rng" : for MT7623 | ||
6 | - clocks : list of clock specifiers, corresponding to | 8 | - clocks : list of clock specifiers, corresponding to |
7 | entries in clock-names property; | 9 | entries in clock-names property; |
8 | - clock-names : Should contain "rng" entries; | 10 | - clock-names : Should contain "rng" entries; |
diff --git a/Documentation/devicetree/bindings/rng/timeriomem_rng.txt b/Documentation/devicetree/bindings/rng/timeriomem_rng.txt index 6616d15866a3..214940093b55 100644 --- a/Documentation/devicetree/bindings/rng/timeriomem_rng.txt +++ b/Documentation/devicetree/bindings/rng/timeriomem_rng.txt | |||
@@ -5,6 +5,13 @@ Required properties: | |||
5 | - reg : base address to sample from | 5 | - reg : base address to sample from |
6 | - period : wait time in microseconds to use between samples | 6 | - period : wait time in microseconds to use between samples |
7 | 7 | ||
8 | Optional properties: | ||
9 | - quality : estimated number of bits of true entropy per 1024 bits read from the | ||
10 | rng. Defaults to zero which causes the kernel's default quality to | ||
11 | be used instead. Note that the default quality is usually zero | ||
12 | which disables using this rng to automatically fill the kernel's | ||
13 | entropy pool. | ||
14 | |||
8 | N.B. currently 'reg' must be four bytes wide and aligned | 15 | N.B. currently 'reg' must be four bytes wide and aligned |
9 | 16 | ||
10 | Example: | 17 | Example: |
diff --git a/MAINTAINERS b/MAINTAINERS index 61d0ac6f535c..df62ee151b62 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3746,6 +3746,13 @@ S: Supported | |||
3746 | F: drivers/infiniband/hw/cxgb4/ | 3746 | F: drivers/infiniband/hw/cxgb4/ |
3747 | F: include/uapi/rdma/cxgb4-abi.h | 3747 | F: include/uapi/rdma/cxgb4-abi.h |
3748 | 3748 | ||
3749 | CXGB4 CRYPTO DRIVER (chcr) | ||
3750 | M: Harsh Jain <harsh@chelsio.com> | ||
3751 | L: linux-crypto@vger.kernel.org | ||
3752 | W: http://www.chelsio.com | ||
3753 | S: Supported | ||
3754 | F: drivers/crypto/chelsio | ||
3755 | |||
3749 | CXGB4VF ETHERNET DRIVER (CXGB4VF) | 3756 | CXGB4VF ETHERNET DRIVER (CXGB4VF) |
3750 | M: Casey Leedom <leedom@chelsio.com> | 3757 | M: Casey Leedom <leedom@chelsio.com> |
3751 | L: netdev@vger.kernel.org | 3758 | L: netdev@vger.kernel.org |
@@ -6647,6 +6654,12 @@ F: Documentation/input/multi-touch-protocol.rst | |||
6647 | F: drivers/input/input-mt.c | 6654 | F: drivers/input/input-mt.c |
6648 | K: \b(ABS|SYN)_MT_ | 6655 | K: \b(ABS|SYN)_MT_ |
6649 | 6656 | ||
6657 | INSIDE SECURE CRYPTO DRIVER | ||
6658 | M: Antoine Tenart <antoine.tenart@free-electrons.com> | ||
6659 | F: drivers/crypto/inside-secure/ | ||
6660 | S: Maintained | ||
6661 | L: linux-crypto@vger.kernel.org | ||
6662 | |||
6650 | INTEL ASoC BDW/HSW DRIVERS | 6663 | INTEL ASoC BDW/HSW DRIVERS |
6651 | M: Jie Yang <yang.jie@linux.intel.com> | 6664 | M: Jie Yang <yang.jie@linux.intel.com> |
6652 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 6665 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
@@ -8306,6 +8319,11 @@ L: linux-wireless@vger.kernel.org | |||
8306 | S: Maintained | 8319 | S: Maintained |
8307 | F: drivers/net/wireless/mediatek/mt7601u/ | 8320 | F: drivers/net/wireless/mediatek/mt7601u/ |
8308 | 8321 | ||
8322 | MEDIATEK RANDOM NUMBER GENERATOR SUPPORT | ||
8323 | M: Sean Wang <sean.wang@mediatek.com> | ||
8324 | S: Maintained | ||
8325 | F: drivers/char/hw_random/mtk-rng.c | ||
8326 | |||
8309 | MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES | 8327 | MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES |
8310 | M: Peter Senna Tschudin <peter.senna@collabora.com> | 8328 | M: Peter Senna Tschudin <peter.senna@collabora.com> |
8311 | M: Martin Donnelly <martin.donnelly@ge.com> | 8329 | M: Martin Donnelly <martin.donnelly@ge.com> |
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index 883b84d828c5..0f966a8ca1ce 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <crypto/aes.h> | 14 | #include <crypto/aes.h> |
15 | #include <crypto/internal/simd.h> | 15 | #include <crypto/internal/simd.h> |
16 | #include <crypto/internal/skcipher.h> | 16 | #include <crypto/internal/skcipher.h> |
17 | #include <linux/cpufeature.h> | ||
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | #include <crypto/xts.h> | 19 | #include <crypto/xts.h> |
19 | 20 | ||
@@ -425,9 +426,6 @@ static int __init aes_init(void) | |||
425 | int err; | 426 | int err; |
426 | int i; | 427 | int i; |
427 | 428 | ||
428 | if (!(elf_hwcap2 & HWCAP2_AES)) | ||
429 | return -ENODEV; | ||
430 | |||
431 | err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); | 429 | err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); |
432 | if (err) | 430 | if (err) |
433 | return err; | 431 | return err; |
@@ -451,5 +449,5 @@ unregister_simds: | |||
451 | return err; | 449 | return err; |
452 | } | 450 | } |
453 | 451 | ||
454 | module_init(aes_init); | 452 | module_cpu_feature_match(AES, aes_init); |
455 | module_exit(aes_exit); | 453 | module_exit(aes_exit); |
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c index e1566bec1016..1b0e0e86ee9c 100644 --- a/arch/arm/crypto/crc32-ce-glue.c +++ b/arch/arm/crypto/crc32-ce-glue.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/cpufeature.h> | ||
11 | #include <linux/crc32.h> | 12 | #include <linux/crc32.h> |
12 | #include <linux/init.h> | 13 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
@@ -233,6 +234,11 @@ static void __exit crc32_pmull_mod_exit(void) | |||
233 | ARRAY_SIZE(crc32_pmull_algs)); | 234 | ARRAY_SIZE(crc32_pmull_algs)); |
234 | } | 235 | } |
235 | 236 | ||
237 | static const struct cpu_feature crc32_cpu_feature[] = { | ||
238 | { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { } | ||
239 | }; | ||
240 | MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature); | ||
241 | |||
236 | module_init(crc32_pmull_mod_init); | 242 | module_init(crc32_pmull_mod_init); |
237 | module_exit(crc32_pmull_mod_exit); | 243 | module_exit(crc32_pmull_mod_exit); |
238 | 244 | ||
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 7546b3c02466..6bac8bea9f1e 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <crypto/cryptd.h> | 15 | #include <crypto/cryptd.h> |
16 | #include <crypto/internal/hash.h> | 16 | #include <crypto/internal/hash.h> |
17 | #include <crypto/gf128mul.h> | 17 | #include <crypto/gf128mul.h> |
18 | #include <linux/cpufeature.h> | ||
18 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> |
19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
20 | 21 | ||
@@ -311,9 +312,6 @@ static int __init ghash_ce_mod_init(void) | |||
311 | { | 312 | { |
312 | int err; | 313 | int err; |
313 | 314 | ||
314 | if (!(elf_hwcap2 & HWCAP2_PMULL)) | ||
315 | return -ENODEV; | ||
316 | |||
317 | err = crypto_register_shash(&ghash_alg); | 315 | err = crypto_register_shash(&ghash_alg); |
318 | if (err) | 316 | if (err) |
319 | return err; | 317 | return err; |
@@ -334,5 +332,5 @@ static void __exit ghash_ce_mod_exit(void) | |||
334 | crypto_unregister_shash(&ghash_alg); | 332 | crypto_unregister_shash(&ghash_alg); |
335 | } | 333 | } |
336 | 334 | ||
337 | module_init(ghash_ce_mod_init); | 335 | module_cpu_feature_match(PMULL, ghash_ce_mod_init); |
338 | module_exit(ghash_ce_mod_exit); | 336 | module_exit(ghash_ce_mod_exit); |
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c index 80bc2fcd241a..555f72b5e659 100644 --- a/arch/arm/crypto/sha1-ce-glue.c +++ b/arch/arm/crypto/sha1-ce-glue.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <crypto/internal/hash.h> | 11 | #include <crypto/internal/hash.h> |
12 | #include <crypto/sha.h> | 12 | #include <crypto/sha.h> |
13 | #include <crypto/sha1_base.h> | 13 | #include <crypto/sha1_base.h> |
14 | #include <linux/cpufeature.h> | ||
14 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | 17 | ||
@@ -82,8 +83,6 @@ static struct shash_alg alg = { | |||
82 | 83 | ||
83 | static int __init sha1_ce_mod_init(void) | 84 | static int __init sha1_ce_mod_init(void) |
84 | { | 85 | { |
85 | if (!(elf_hwcap2 & HWCAP2_SHA1)) | ||
86 | return -ENODEV; | ||
87 | return crypto_register_shash(&alg); | 86 | return crypto_register_shash(&alg); |
88 | } | 87 | } |
89 | 88 | ||
@@ -92,5 +91,5 @@ static void __exit sha1_ce_mod_fini(void) | |||
92 | crypto_unregister_shash(&alg); | 91 | crypto_unregister_shash(&alg); |
93 | } | 92 | } |
94 | 93 | ||
95 | module_init(sha1_ce_mod_init); | 94 | module_cpu_feature_match(SHA1, sha1_ce_mod_init); |
96 | module_exit(sha1_ce_mod_fini); | 95 | module_exit(sha1_ce_mod_fini); |
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c index 0755b2d657f3..df4dcef054ae 100644 --- a/arch/arm/crypto/sha2-ce-glue.c +++ b/arch/arm/crypto/sha2-ce-glue.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <crypto/internal/hash.h> | 11 | #include <crypto/internal/hash.h> |
12 | #include <crypto/sha.h> | 12 | #include <crypto/sha.h> |
13 | #include <crypto/sha256_base.h> | 13 | #include <crypto/sha256_base.h> |
14 | #include <linux/cpufeature.h> | ||
14 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | 17 | ||
@@ -100,8 +101,6 @@ static struct shash_alg algs[] = { { | |||
100 | 101 | ||
101 | static int __init sha2_ce_mod_init(void) | 102 | static int __init sha2_ce_mod_init(void) |
102 | { | 103 | { |
103 | if (!(elf_hwcap2 & HWCAP2_SHA2)) | ||
104 | return -ENODEV; | ||
105 | return crypto_register_shashes(algs, ARRAY_SIZE(algs)); | 104 | return crypto_register_shashes(algs, ARRAY_SIZE(algs)); |
106 | } | 105 | } |
107 | 106 | ||
@@ -110,5 +109,5 @@ static void __exit sha2_ce_mod_fini(void) | |||
110 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); | 109 | crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); |
111 | } | 110 | } |
112 | 111 | ||
113 | module_init(sha2_ce_mod_init); | 112 | module_cpu_feature_match(SHA2, sha2_ce_mod_init); |
114 | module_exit(sha2_ce_mod_fini); | 113 | module_exit(sha2_ce_mod_fini); |
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S index c98e7e849f06..8550408735a0 100644 --- a/arch/arm64/crypto/sha1-ce-core.S +++ b/arch/arm64/crypto/sha1-ce-core.S | |||
@@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform) | |||
82 | ldr dgb, [x0, #16] | 82 | ldr dgb, [x0, #16] |
83 | 83 | ||
84 | /* load sha1_ce_state::finalize */ | 84 | /* load sha1_ce_state::finalize */ |
85 | ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize] | 85 | ldr_l w4, sha1_ce_offsetof_finalize, x4 |
86 | ldr w4, [x0, x4] | ||
86 | 87 | ||
87 | /* load input */ | 88 | /* load input */ |
88 | 0: ld1 {v8.4s-v11.4s}, [x1], #64 | 89 | 0: ld1 {v8.4s-v11.4s}, [x1], #64 |
@@ -132,7 +133,8 @@ CPU_LE( rev32 v11.16b, v11.16b ) | |||
132 | * the padding is handled by the C code in that case. | 133 | * the padding is handled by the C code in that case. |
133 | */ | 134 | */ |
134 | cbz x4, 3f | 135 | cbz x4, 3f |
135 | ldr x4, [x0, #:lo12:sha1_ce_offsetof_count] | 136 | ldr_l w4, sha1_ce_offsetof_count, x4 |
137 | ldr x4, [x0, x4] | ||
136 | movi v9.2d, #0 | 138 | movi v9.2d, #0 |
137 | mov x8, #0x80000000 | 139 | mov x8, #0x80000000 |
138 | movi v10.2d, #0 | 140 | movi v10.2d, #0 |
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index aefda9868627..ea319c055f5d 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c | |||
@@ -17,9 +17,6 @@ | |||
17 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | 19 | ||
20 | #define ASM_EXPORT(sym, val) \ | ||
21 | asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val)); | ||
22 | |||
23 | MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); | 20 | MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions"); |
24 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | 21 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); |
25 | MODULE_LICENSE("GPL v2"); | 22 | MODULE_LICENSE("GPL v2"); |
@@ -32,6 +29,9 @@ struct sha1_ce_state { | |||
32 | asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, | 29 | asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, |
33 | int blocks); | 30 | int blocks); |
34 | 31 | ||
32 | const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count); | ||
33 | const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize); | ||
34 | |||
35 | static int sha1_ce_update(struct shash_desc *desc, const u8 *data, | 35 | static int sha1_ce_update(struct shash_desc *desc, const u8 *data, |
36 | unsigned int len) | 36 | unsigned int len) |
37 | { | 37 | { |
@@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, | |||
52 | struct sha1_ce_state *sctx = shash_desc_ctx(desc); | 52 | struct sha1_ce_state *sctx = shash_desc_ctx(desc); |
53 | bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); | 53 | bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); |
54 | 54 | ||
55 | ASM_EXPORT(sha1_ce_offsetof_count, | ||
56 | offsetof(struct sha1_ce_state, sst.count)); | ||
57 | ASM_EXPORT(sha1_ce_offsetof_finalize, | ||
58 | offsetof(struct sha1_ce_state, finalize)); | ||
59 | |||
60 | /* | 55 | /* |
61 | * Allow the asm code to perform the finalization if there is no | 56 | * Allow the asm code to perform the finalization if there is no |
62 | * partial data and the input is a round multiple of the block size. | 57 | * partial data and the input is a round multiple of the block size. |
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S index 01cfee066837..679c6c002f4f 100644 --- a/arch/arm64/crypto/sha2-ce-core.S +++ b/arch/arm64/crypto/sha2-ce-core.S | |||
@@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform) | |||
88 | ld1 {dgav.4s, dgbv.4s}, [x0] | 88 | ld1 {dgav.4s, dgbv.4s}, [x0] |
89 | 89 | ||
90 | /* load sha256_ce_state::finalize */ | 90 | /* load sha256_ce_state::finalize */ |
91 | ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize] | 91 | ldr_l w4, sha256_ce_offsetof_finalize, x4 |
92 | ldr w4, [x0, x4] | ||
92 | 93 | ||
93 | /* load input */ | 94 | /* load input */ |
94 | 0: ld1 {v16.4s-v19.4s}, [x1], #64 | 95 | 0: ld1 {v16.4s-v19.4s}, [x1], #64 |
@@ -136,7 +137,8 @@ CPU_LE( rev32 v19.16b, v19.16b ) | |||
136 | * the padding is handled by the C code in that case. | 137 | * the padding is handled by the C code in that case. |
137 | */ | 138 | */ |
138 | cbz x4, 3f | 139 | cbz x4, 3f |
139 | ldr x4, [x0, #:lo12:sha256_ce_offsetof_count] | 140 | ldr_l w4, sha256_ce_offsetof_count, x4 |
141 | ldr x4, [x0, x4] | ||
140 | movi v17.2d, #0 | 142 | movi v17.2d, #0 |
141 | mov x8, #0x80000000 | 143 | mov x8, #0x80000000 |
142 | movi v18.2d, #0 | 144 | movi v18.2d, #0 |
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 7cd587564a41..0ed9486f75dd 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c | |||
@@ -17,9 +17,6 @@ | |||
17 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | 19 | ||
20 | #define ASM_EXPORT(sym, val) \ | ||
21 | asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val)); | ||
22 | |||
23 | MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); | 20 | MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); |
24 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); | 21 | MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); |
25 | MODULE_LICENSE("GPL v2"); | 22 | MODULE_LICENSE("GPL v2"); |
@@ -32,6 +29,11 @@ struct sha256_ce_state { | |||
32 | asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, | 29 | asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, |
33 | int blocks); | 30 | int blocks); |
34 | 31 | ||
32 | const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state, | ||
33 | sst.count); | ||
34 | const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state, | ||
35 | finalize); | ||
36 | |||
35 | static int sha256_ce_update(struct shash_desc *desc, const u8 *data, | 37 | static int sha256_ce_update(struct shash_desc *desc, const u8 *data, |
36 | unsigned int len) | 38 | unsigned int len) |
37 | { | 39 | { |
@@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, | |||
52 | struct sha256_ce_state *sctx = shash_desc_ctx(desc); | 54 | struct sha256_ce_state *sctx = shash_desc_ctx(desc); |
53 | bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); | 55 | bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); |
54 | 56 | ||
55 | ASM_EXPORT(sha256_ce_offsetof_count, | ||
56 | offsetof(struct sha256_ce_state, sst.count)); | ||
57 | ASM_EXPORT(sha256_ce_offsetof_finalize, | ||
58 | offsetof(struct sha256_ce_state, finalize)); | ||
59 | |||
60 | /* | 57 | /* |
61 | * Allow the asm code to perform the finalization if there is no | 58 | * Allow the asm code to perform the finalization if there is no |
62 | * partial data and the input is a round multiple of the block size. | 59 | * partial data and the input is a round multiple of the block size. |
diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S index 910565547163..8739cf7795de 100644 --- a/arch/x86/crypto/aes-x86_64-asm_64.S +++ b/arch/x86/crypto/aes-x86_64-asm_64.S | |||
@@ -42,17 +42,15 @@ | |||
42 | #define R5E %esi | 42 | #define R5E %esi |
43 | #define R6 %rdi | 43 | #define R6 %rdi |
44 | #define R6E %edi | 44 | #define R6E %edi |
45 | #define R7 %rbp | 45 | #define R7 %r9 /* don't use %rbp; it breaks stack traces */ |
46 | #define R7E %ebp | 46 | #define R7E %r9d |
47 | #define R8 %r8 | 47 | #define R8 %r8 |
48 | #define R9 %r9 | ||
49 | #define R10 %r10 | 48 | #define R10 %r10 |
50 | #define R11 %r11 | 49 | #define R11 %r11 |
51 | 50 | ||
52 | #define prologue(FUNC,KEY,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \ | 51 | #define prologue(FUNC,KEY,B128,B192,r1,r2,r5,r6,r7,r8,r9,r10,r11) \ |
53 | ENTRY(FUNC); \ | 52 | ENTRY(FUNC); \ |
54 | movq r1,r2; \ | 53 | movq r1,r2; \ |
55 | movq r3,r4; \ | ||
56 | leaq KEY+48(r8),r9; \ | 54 | leaq KEY+48(r8),r9; \ |
57 | movq r10,r11; \ | 55 | movq r10,r11; \ |
58 | movl (r7),r5 ## E; \ | 56 | movl (r7),r5 ## E; \ |
@@ -70,9 +68,8 @@ | |||
70 | je B192; \ | 68 | je B192; \ |
71 | leaq 32(r9),r9; | 69 | leaq 32(r9),r9; |
72 | 70 | ||
73 | #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \ | 71 | #define epilogue(FUNC,r1,r2,r5,r6,r7,r8,r9) \ |
74 | movq r1,r2; \ | 72 | movq r1,r2; \ |
75 | movq r3,r4; \ | ||
76 | movl r5 ## E,(r9); \ | 73 | movl r5 ## E,(r9); \ |
77 | movl r6 ## E,4(r9); \ | 74 | movl r6 ## E,4(r9); \ |
78 | movl r7 ## E,8(r9); \ | 75 | movl r7 ## E,8(r9); \ |
@@ -88,12 +85,12 @@ | |||
88 | movl TAB(,r6,4),r6 ## E; \ | 85 | movl TAB(,r6,4),r6 ## E; \ |
89 | roll $16,r2 ## E; \ | 86 | roll $16,r2 ## E; \ |
90 | shrl $16,r4 ## E; \ | 87 | shrl $16,r4 ## E; \ |
91 | movzbl r4 ## H,r7 ## E; \ | 88 | movzbl r4 ## L,r7 ## E; \ |
92 | movzbl r4 ## L,r4 ## E; \ | 89 | movzbl r4 ## H,r4 ## E; \ |
93 | xorl OFFSET(r8),ra ## E; \ | 90 | xorl OFFSET(r8),ra ## E; \ |
94 | xorl OFFSET+4(r8),rb ## E; \ | 91 | xorl OFFSET+4(r8),rb ## E; \ |
95 | xorl TAB+3072(,r7,4),r5 ## E;\ | 92 | xorl TAB+3072(,r4,4),r5 ## E;\ |
96 | xorl TAB+2048(,r4,4),r6 ## E;\ | 93 | xorl TAB+2048(,r7,4),r6 ## E;\ |
97 | movzbl r1 ## L,r7 ## E; \ | 94 | movzbl r1 ## L,r7 ## E; \ |
98 | movzbl r1 ## H,r4 ## E; \ | 95 | movzbl r1 ## H,r4 ## E; \ |
99 | movl TAB+1024(,r4,4),r4 ## E;\ | 96 | movl TAB+1024(,r4,4),r4 ## E;\ |
@@ -101,19 +98,19 @@ | |||
101 | roll $16,r1 ## E; \ | 98 | roll $16,r1 ## E; \ |
102 | shrl $16,r3 ## E; \ | 99 | shrl $16,r3 ## E; \ |
103 | xorl TAB(,r7,4),r5 ## E; \ | 100 | xorl TAB(,r7,4),r5 ## E; \ |
104 | movzbl r3 ## H,r7 ## E; \ | 101 | movzbl r3 ## L,r7 ## E; \ |
105 | movzbl r3 ## L,r3 ## E; \ | 102 | movzbl r3 ## H,r3 ## E; \ |
106 | xorl TAB+3072(,r7,4),r4 ## E;\ | 103 | xorl TAB+3072(,r3,4),r4 ## E;\ |
107 | xorl TAB+2048(,r3,4),r5 ## E;\ | 104 | xorl TAB+2048(,r7,4),r5 ## E;\ |
108 | movzbl r1 ## H,r7 ## E; \ | 105 | movzbl r1 ## L,r7 ## E; \ |
109 | movzbl r1 ## L,r3 ## E; \ | 106 | movzbl r1 ## H,r3 ## E; \ |
110 | shrl $16,r1 ## E; \ | 107 | shrl $16,r1 ## E; \ |
111 | xorl TAB+3072(,r7,4),r6 ## E;\ | 108 | xorl TAB+3072(,r3,4),r6 ## E;\ |
112 | movl TAB+2048(,r3,4),r3 ## E;\ | 109 | movl TAB+2048(,r7,4),r3 ## E;\ |
113 | movzbl r1 ## H,r7 ## E; \ | 110 | movzbl r1 ## L,r7 ## E; \ |
114 | movzbl r1 ## L,r1 ## E; \ | 111 | movzbl r1 ## H,r1 ## E; \ |
115 | xorl TAB+1024(,r7,4),r6 ## E;\ | 112 | xorl TAB+1024(,r1,4),r6 ## E;\ |
116 | xorl TAB(,r1,4),r3 ## E; \ | 113 | xorl TAB(,r7,4),r3 ## E; \ |
117 | movzbl r2 ## H,r1 ## E; \ | 114 | movzbl r2 ## H,r1 ## E; \ |
118 | movzbl r2 ## L,r7 ## E; \ | 115 | movzbl r2 ## L,r7 ## E; \ |
119 | shrl $16,r2 ## E; \ | 116 | shrl $16,r2 ## E; \ |
@@ -131,9 +128,9 @@ | |||
131 | movl r4 ## E,r2 ## E; | 128 | movl r4 ## E,r2 ## E; |
132 | 129 | ||
133 | #define entry(FUNC,KEY,B128,B192) \ | 130 | #define entry(FUNC,KEY,B128,B192) \ |
134 | prologue(FUNC,KEY,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11) | 131 | prologue(FUNC,KEY,B128,B192,R2,R8,R1,R3,R4,R6,R10,R5,R11) |
135 | 132 | ||
136 | #define return(FUNC) epilogue(FUNC,R8,R2,R9,R7,R5,R6,R3,R4,R11) | 133 | #define return(FUNC) epilogue(FUNC,R8,R2,R5,R6,R3,R4,R11) |
137 | 134 | ||
138 | #define encrypt_round(TAB,OFFSET) \ | 135 | #define encrypt_round(TAB,OFFSET) \ |
139 | round(TAB,OFFSET,R1,R2,R3,R4,R5,R6,R7,R10,R5,R6,R3,R4) \ | 136 | round(TAB,OFFSET,R1,R2,R3,R4,R5,R6,R7,R10,R5,R6,R3,R4) \ |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 3c465184ff8a..16627fec80b2 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -89,6 +89,29 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 | |||
89 | ALL_F: .octa 0xffffffffffffffffffffffffffffffff | 89 | ALL_F: .octa 0xffffffffffffffffffffffffffffffff |
90 | .octa 0x00000000000000000000000000000000 | 90 | .octa 0x00000000000000000000000000000000 |
91 | 91 | ||
92 | .section .rodata | ||
93 | .align 16 | ||
94 | .type aad_shift_arr, @object | ||
95 | .size aad_shift_arr, 272 | ||
96 | aad_shift_arr: | ||
97 | .octa 0xffffffffffffffffffffffffffffffff | ||
98 | .octa 0xffffffffffffffffffffffffffffff0C | ||
99 | .octa 0xffffffffffffffffffffffffffff0D0C | ||
100 | .octa 0xffffffffffffffffffffffffff0E0D0C | ||
101 | .octa 0xffffffffffffffffffffffff0F0E0D0C | ||
102 | .octa 0xffffffffffffffffffffff0C0B0A0908 | ||
103 | .octa 0xffffffffffffffffffff0D0C0B0A0908 | ||
104 | .octa 0xffffffffffffffffff0E0D0C0B0A0908 | ||
105 | .octa 0xffffffffffffffff0F0E0D0C0B0A0908 | ||
106 | .octa 0xffffffffffffff0C0B0A090807060504 | ||
107 | .octa 0xffffffffffff0D0C0B0A090807060504 | ||
108 | .octa 0xffffffffff0E0D0C0B0A090807060504 | ||
109 | .octa 0xffffffff0F0E0D0C0B0A090807060504 | ||
110 | .octa 0xffffff0C0B0A09080706050403020100 | ||
111 | .octa 0xffff0D0C0B0A09080706050403020100 | ||
112 | .octa 0xff0E0D0C0B0A09080706050403020100 | ||
113 | .octa 0x0F0E0D0C0B0A09080706050403020100 | ||
114 | |||
92 | 115 | ||
93 | .text | 116 | .text |
94 | 117 | ||
@@ -252,32 +275,66 @@ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation | |||
252 | mov arg8, %r12 # %r12 = aadLen | 275 | mov arg8, %r12 # %r12 = aadLen |
253 | mov %r12, %r11 | 276 | mov %r12, %r11 |
254 | pxor %xmm\i, %xmm\i | 277 | pxor %xmm\i, %xmm\i |
278 | pxor \XMM2, \XMM2 | ||
255 | 279 | ||
256 | _get_AAD_loop\num_initial_blocks\operation: | 280 | cmp $16, %r11 |
257 | movd (%r10), \TMP1 | 281 | jl _get_AAD_rest8\num_initial_blocks\operation |
258 | pslldq $12, \TMP1 | 282 | _get_AAD_blocks\num_initial_blocks\operation: |
259 | psrldq $4, %xmm\i | 283 | movdqu (%r10), %xmm\i |
284 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | ||
285 | pxor %xmm\i, \XMM2 | ||
286 | GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | ||
287 | add $16, %r10 | ||
288 | sub $16, %r12 | ||
289 | sub $16, %r11 | ||
290 | cmp $16, %r11 | ||
291 | jge _get_AAD_blocks\num_initial_blocks\operation | ||
292 | |||
293 | movdqu \XMM2, %xmm\i | ||
294 | cmp $0, %r11 | ||
295 | je _get_AAD_done\num_initial_blocks\operation | ||
296 | |||
297 | pxor %xmm\i,%xmm\i | ||
298 | |||
299 | /* read the last <16B of AAD. since we have at least 4B of | ||
300 | data right after the AAD (the ICV, and maybe some CT), we can | ||
301 | read 4B/8B blocks safely, and then get rid of the extra stuff */ | ||
302 | _get_AAD_rest8\num_initial_blocks\operation: | ||
303 | cmp $4, %r11 | ||
304 | jle _get_AAD_rest4\num_initial_blocks\operation | ||
305 | movq (%r10), \TMP1 | ||
306 | add $8, %r10 | ||
307 | sub $8, %r11 | ||
308 | pslldq $8, \TMP1 | ||
309 | psrldq $8, %xmm\i | ||
260 | pxor \TMP1, %xmm\i | 310 | pxor \TMP1, %xmm\i |
311 | jmp _get_AAD_rest8\num_initial_blocks\operation | ||
312 | _get_AAD_rest4\num_initial_blocks\operation: | ||
313 | cmp $0, %r11 | ||
314 | jle _get_AAD_rest0\num_initial_blocks\operation | ||
315 | mov (%r10), %eax | ||
316 | movq %rax, \TMP1 | ||
261 | add $4, %r10 | 317 | add $4, %r10 |
262 | sub $4, %r12 | 318 | sub $4, %r10 |
263 | jne _get_AAD_loop\num_initial_blocks\operation | 319 | pslldq $12, \TMP1 |
264 | |||
265 | cmp $16, %r11 | ||
266 | je _get_AAD_loop2_done\num_initial_blocks\operation | ||
267 | |||
268 | mov $16, %r12 | ||
269 | _get_AAD_loop2\num_initial_blocks\operation: | ||
270 | psrldq $4, %xmm\i | 320 | psrldq $4, %xmm\i |
271 | sub $4, %r12 | 321 | pxor \TMP1, %xmm\i |
272 | cmp %r11, %r12 | 322 | _get_AAD_rest0\num_initial_blocks\operation: |
273 | jne _get_AAD_loop2\num_initial_blocks\operation | 323 | /* finalize: shift out the extra bytes we read, and align |
274 | 324 | left. since pslldq can only shift by an immediate, we use | |
275 | _get_AAD_loop2_done\num_initial_blocks\operation: | 325 | vpshufb and an array of shuffle masks */ |
326 | movq %r12, %r11 | ||
327 | salq $4, %r11 | ||
328 | movdqu aad_shift_arr(%r11), \TMP1 | ||
329 | PSHUFB_XMM \TMP1, %xmm\i | ||
330 | _get_AAD_rest_final\num_initial_blocks\operation: | ||
276 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | 331 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
332 | pxor \XMM2, %xmm\i | ||
333 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | ||
277 | 334 | ||
335 | _get_AAD_done\num_initial_blocks\operation: | ||
278 | xor %r11, %r11 # initialise the data pointer offset as zero | 336 | xor %r11, %r11 # initialise the data pointer offset as zero |
279 | 337 | # start AES for num_initial_blocks blocks | |
280 | # start AES for num_initial_blocks blocks | ||
281 | 338 | ||
282 | mov %arg5, %rax # %rax = *Y0 | 339 | mov %arg5, %rax # %rax = *Y0 |
283 | movdqu (%rax), \XMM0 # XMM0 = Y0 | 340 | movdqu (%rax), \XMM0 # XMM0 = Y0 |
@@ -322,7 +379,7 @@ aes_loop_initial_dec\num_initial_blocks: | |||
322 | # prepare plaintext/ciphertext for GHASH computation | 379 | # prepare plaintext/ciphertext for GHASH computation |
323 | .endr | 380 | .endr |
324 | .endif | 381 | .endif |
325 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | 382 | |
326 | # apply GHASH on num_initial_blocks blocks | 383 | # apply GHASH on num_initial_blocks blocks |
327 | 384 | ||
328 | .if \i == 5 | 385 | .if \i == 5 |
@@ -477,28 +534,66 @@ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation | |||
477 | mov arg8, %r12 # %r12 = aadLen | 534 | mov arg8, %r12 # %r12 = aadLen |
478 | mov %r12, %r11 | 535 | mov %r12, %r11 |
479 | pxor %xmm\i, %xmm\i | 536 | pxor %xmm\i, %xmm\i |
480 | _get_AAD_loop\num_initial_blocks\operation: | 537 | pxor \XMM2, \XMM2 |
481 | movd (%r10), \TMP1 | 538 | |
482 | pslldq $12, \TMP1 | 539 | cmp $16, %r11 |
483 | psrldq $4, %xmm\i | 540 | jl _get_AAD_rest8\num_initial_blocks\operation |
541 | _get_AAD_blocks\num_initial_blocks\operation: | ||
542 | movdqu (%r10), %xmm\i | ||
543 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | ||
544 | pxor %xmm\i, \XMM2 | ||
545 | GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | ||
546 | add $16, %r10 | ||
547 | sub $16, %r12 | ||
548 | sub $16, %r11 | ||
549 | cmp $16, %r11 | ||
550 | jge _get_AAD_blocks\num_initial_blocks\operation | ||
551 | |||
552 | movdqu \XMM2, %xmm\i | ||
553 | cmp $0, %r11 | ||
554 | je _get_AAD_done\num_initial_blocks\operation | ||
555 | |||
556 | pxor %xmm\i,%xmm\i | ||
557 | |||
558 | /* read the last <16B of AAD. since we have at least 4B of | ||
559 | data right after the AAD (the ICV, and maybe some PT), we can | ||
560 | read 4B/8B blocks safely, and then get rid of the extra stuff */ | ||
561 | _get_AAD_rest8\num_initial_blocks\operation: | ||
562 | cmp $4, %r11 | ||
563 | jle _get_AAD_rest4\num_initial_blocks\operation | ||
564 | movq (%r10), \TMP1 | ||
565 | add $8, %r10 | ||
566 | sub $8, %r11 | ||
567 | pslldq $8, \TMP1 | ||
568 | psrldq $8, %xmm\i | ||
484 | pxor \TMP1, %xmm\i | 569 | pxor \TMP1, %xmm\i |
570 | jmp _get_AAD_rest8\num_initial_blocks\operation | ||
571 | _get_AAD_rest4\num_initial_blocks\operation: | ||
572 | cmp $0, %r11 | ||
573 | jle _get_AAD_rest0\num_initial_blocks\operation | ||
574 | mov (%r10), %eax | ||
575 | movq %rax, \TMP1 | ||
485 | add $4, %r10 | 576 | add $4, %r10 |
486 | sub $4, %r12 | 577 | sub $4, %r10 |
487 | jne _get_AAD_loop\num_initial_blocks\operation | 578 | pslldq $12, \TMP1 |
488 | cmp $16, %r11 | ||
489 | je _get_AAD_loop2_done\num_initial_blocks\operation | ||
490 | mov $16, %r12 | ||
491 | _get_AAD_loop2\num_initial_blocks\operation: | ||
492 | psrldq $4, %xmm\i | 579 | psrldq $4, %xmm\i |
493 | sub $4, %r12 | 580 | pxor \TMP1, %xmm\i |
494 | cmp %r11, %r12 | 581 | _get_AAD_rest0\num_initial_blocks\operation: |
495 | jne _get_AAD_loop2\num_initial_blocks\operation | 582 | /* finalize: shift out the extra bytes we read, and align |
496 | _get_AAD_loop2_done\num_initial_blocks\operation: | 583 | left. since pslldq can only shift by an immediate, we use |
584 | vpshufb and an array of shuffle masks */ | ||
585 | movq %r12, %r11 | ||
586 | salq $4, %r11 | ||
587 | movdqu aad_shift_arr(%r11), \TMP1 | ||
588 | PSHUFB_XMM \TMP1, %xmm\i | ||
589 | _get_AAD_rest_final\num_initial_blocks\operation: | ||
497 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data | 590 | PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data |
591 | pxor \XMM2, %xmm\i | ||
592 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | ||
498 | 593 | ||
594 | _get_AAD_done\num_initial_blocks\operation: | ||
499 | xor %r11, %r11 # initialise the data pointer offset as zero | 595 | xor %r11, %r11 # initialise the data pointer offset as zero |
500 | 596 | # start AES for num_initial_blocks blocks | |
501 | # start AES for num_initial_blocks blocks | ||
502 | 597 | ||
503 | mov %arg5, %rax # %rax = *Y0 | 598 | mov %arg5, %rax # %rax = *Y0 |
504 | movdqu (%rax), \XMM0 # XMM0 = Y0 | 599 | movdqu (%rax), \XMM0 # XMM0 = Y0 |
@@ -543,7 +638,7 @@ aes_loop_initial_enc\num_initial_blocks: | |||
543 | # prepare plaintext/ciphertext for GHASH computation | 638 | # prepare plaintext/ciphertext for GHASH computation |
544 | .endr | 639 | .endr |
545 | .endif | 640 | .endif |
546 | GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 | 641 | |
547 | # apply GHASH on num_initial_blocks blocks | 642 | # apply GHASH on num_initial_blocks blocks |
548 | 643 | ||
549 | .if \i == 5 | 644 | .if \i == 5 |
@@ -1454,18 +1549,35 @@ _return_T_decrypt: | |||
1454 | mov arg10, %r11 # %r11 = auth_tag_len | 1549 | mov arg10, %r11 # %r11 = auth_tag_len |
1455 | cmp $16, %r11 | 1550 | cmp $16, %r11 |
1456 | je _T_16_decrypt | 1551 | je _T_16_decrypt |
1457 | cmp $12, %r11 | 1552 | cmp $8, %r11 |
1458 | je _T_12_decrypt | 1553 | jl _T_4_decrypt |
1459 | _T_8_decrypt: | 1554 | _T_8_decrypt: |
1460 | MOVQ_R64_XMM %xmm0, %rax | 1555 | MOVQ_R64_XMM %xmm0, %rax |
1461 | mov %rax, (%r10) | 1556 | mov %rax, (%r10) |
1462 | jmp _return_T_done_decrypt | 1557 | add $8, %r10 |
1463 | _T_12_decrypt: | 1558 | sub $8, %r11 |
1464 | MOVQ_R64_XMM %xmm0, %rax | ||
1465 | mov %rax, (%r10) | ||
1466 | psrldq $8, %xmm0 | 1559 | psrldq $8, %xmm0 |
1560 | cmp $0, %r11 | ||
1561 | je _return_T_done_decrypt | ||
1562 | _T_4_decrypt: | ||
1563 | movd %xmm0, %eax | ||
1564 | mov %eax, (%r10) | ||
1565 | add $4, %r10 | ||
1566 | sub $4, %r11 | ||
1567 | psrldq $4, %xmm0 | ||
1568 | cmp $0, %r11 | ||
1569 | je _return_T_done_decrypt | ||
1570 | _T_123_decrypt: | ||
1467 | movd %xmm0, %eax | 1571 | movd %xmm0, %eax |
1468 | mov %eax, 8(%r10) | 1572 | cmp $2, %r11 |
1573 | jl _T_1_decrypt | ||
1574 | mov %ax, (%r10) | ||
1575 | cmp $2, %r11 | ||
1576 | je _return_T_done_decrypt | ||
1577 | add $2, %r10 | ||
1578 | sar $16, %eax | ||
1579 | _T_1_decrypt: | ||
1580 | mov %al, (%r10) | ||
1469 | jmp _return_T_done_decrypt | 1581 | jmp _return_T_done_decrypt |
1470 | _T_16_decrypt: | 1582 | _T_16_decrypt: |
1471 | movdqu %xmm0, (%r10) | 1583 | movdqu %xmm0, (%r10) |
@@ -1718,18 +1830,35 @@ _return_T_encrypt: | |||
1718 | mov arg10, %r11 # %r11 = auth_tag_len | 1830 | mov arg10, %r11 # %r11 = auth_tag_len |
1719 | cmp $16, %r11 | 1831 | cmp $16, %r11 |
1720 | je _T_16_encrypt | 1832 | je _T_16_encrypt |
1721 | cmp $12, %r11 | 1833 | cmp $8, %r11 |
1722 | je _T_12_encrypt | 1834 | jl _T_4_encrypt |
1723 | _T_8_encrypt: | 1835 | _T_8_encrypt: |
1724 | MOVQ_R64_XMM %xmm0, %rax | 1836 | MOVQ_R64_XMM %xmm0, %rax |
1725 | mov %rax, (%r10) | 1837 | mov %rax, (%r10) |
1726 | jmp _return_T_done_encrypt | 1838 | add $8, %r10 |
1727 | _T_12_encrypt: | 1839 | sub $8, %r11 |
1728 | MOVQ_R64_XMM %xmm0, %rax | ||
1729 | mov %rax, (%r10) | ||
1730 | psrldq $8, %xmm0 | 1840 | psrldq $8, %xmm0 |
1841 | cmp $0, %r11 | ||
1842 | je _return_T_done_encrypt | ||
1843 | _T_4_encrypt: | ||
1844 | movd %xmm0, %eax | ||
1845 | mov %eax, (%r10) | ||
1846 | add $4, %r10 | ||
1847 | sub $4, %r11 | ||
1848 | psrldq $4, %xmm0 | ||
1849 | cmp $0, %r11 | ||
1850 | je _return_T_done_encrypt | ||
1851 | _T_123_encrypt: | ||
1731 | movd %xmm0, %eax | 1852 | movd %xmm0, %eax |
1732 | mov %eax, 8(%r10) | 1853 | cmp $2, %r11 |
1854 | jl _T_1_encrypt | ||
1855 | mov %ax, (%r10) | ||
1856 | cmp $2, %r11 | ||
1857 | je _return_T_done_encrypt | ||
1858 | add $2, %r10 | ||
1859 | sar $16, %eax | ||
1860 | _T_1_encrypt: | ||
1861 | mov %al, (%r10) | ||
1733 | jmp _return_T_done_encrypt | 1862 | jmp _return_T_done_encrypt |
1734 | _T_16_encrypt: | 1863 | _T_16_encrypt: |
1735 | movdqu %xmm0, (%r10) | 1864 | movdqu %xmm0, (%r10) |
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index d664382c6e56..faecb1518bf8 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S | |||
@@ -155,6 +155,30 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100 | |||
155 | ALL_F: .octa 0xffffffffffffffffffffffffffffffff | 155 | ALL_F: .octa 0xffffffffffffffffffffffffffffffff |
156 | .octa 0x00000000000000000000000000000000 | 156 | .octa 0x00000000000000000000000000000000 |
157 | 157 | ||
158 | .section .rodata | ||
159 | .align 16 | ||
160 | .type aad_shift_arr, @object | ||
161 | .size aad_shift_arr, 272 | ||
162 | aad_shift_arr: | ||
163 | .octa 0xffffffffffffffffffffffffffffffff | ||
164 | .octa 0xffffffffffffffffffffffffffffff0C | ||
165 | .octa 0xffffffffffffffffffffffffffff0D0C | ||
166 | .octa 0xffffffffffffffffffffffffff0E0D0C | ||
167 | .octa 0xffffffffffffffffffffffff0F0E0D0C | ||
168 | .octa 0xffffffffffffffffffffff0C0B0A0908 | ||
169 | .octa 0xffffffffffffffffffff0D0C0B0A0908 | ||
170 | .octa 0xffffffffffffffffff0E0D0C0B0A0908 | ||
171 | .octa 0xffffffffffffffff0F0E0D0C0B0A0908 | ||
172 | .octa 0xffffffffffffff0C0B0A090807060504 | ||
173 | .octa 0xffffffffffff0D0C0B0A090807060504 | ||
174 | .octa 0xffffffffff0E0D0C0B0A090807060504 | ||
175 | .octa 0xffffffff0F0E0D0C0B0A090807060504 | ||
176 | .octa 0xffffff0C0B0A09080706050403020100 | ||
177 | .octa 0xffff0D0C0B0A09080706050403020100 | ||
178 | .octa 0xff0E0D0C0B0A09080706050403020100 | ||
179 | .octa 0x0F0E0D0C0B0A09080706050403020100 | ||
180 | |||
181 | |||
158 | .text | 182 | .text |
159 | 183 | ||
160 | 184 | ||
@@ -372,41 +396,72 @@ VARIABLE_OFFSET = 16*8 | |||
372 | 396 | ||
373 | .macro INITIAL_BLOCKS_AVX num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC | 397 | .macro INITIAL_BLOCKS_AVX num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC |
374 | i = (8-\num_initial_blocks) | 398 | i = (8-\num_initial_blocks) |
399 | j = 0 | ||
375 | setreg | 400 | setreg |
376 | 401 | ||
377 | mov arg6, %r10 # r10 = AAD | 402 | mov arg6, %r10 # r10 = AAD |
378 | mov arg7, %r12 # r12 = aadLen | 403 | mov arg7, %r12 # r12 = aadLen |
379 | 404 | ||
380 | 405 | ||
381 | mov %r12, %r11 | 406 | mov %r12, %r11 |
382 | 407 | ||
383 | vpxor reg_i, reg_i, reg_i | 408 | vpxor reg_j, reg_j, reg_j |
384 | _get_AAD_loop\@: | 409 | vpxor reg_i, reg_i, reg_i |
385 | vmovd (%r10), \T1 | 410 | cmp $16, %r11 |
386 | vpslldq $12, \T1, \T1 | 411 | jl _get_AAD_rest8\@ |
387 | vpsrldq $4, reg_i, reg_i | 412 | _get_AAD_blocks\@: |
388 | vpxor \T1, reg_i, reg_i | 413 | vmovdqu (%r10), reg_i |
389 | 414 | vpshufb SHUF_MASK(%rip), reg_i, reg_i | |
390 | add $4, %r10 | 415 | vpxor reg_i, reg_j, reg_j |
391 | sub $4, %r12 | 416 | GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6 |
392 | jg _get_AAD_loop\@ | 417 | add $16, %r10 |
393 | 418 | sub $16, %r12 | |
394 | 419 | sub $16, %r11 | |
395 | cmp $16, %r11 | 420 | cmp $16, %r11 |
396 | je _get_AAD_loop2_done\@ | 421 | jge _get_AAD_blocks\@ |
397 | mov $16, %r12 | 422 | vmovdqu reg_j, reg_i |
398 | 423 | cmp $0, %r11 | |
399 | _get_AAD_loop2\@: | 424 | je _get_AAD_done\@ |
400 | vpsrldq $4, reg_i, reg_i | 425 | |
401 | sub $4, %r12 | 426 | vpxor reg_i, reg_i, reg_i |
402 | cmp %r11, %r12 | 427 | |
403 | jg _get_AAD_loop2\@ | 428 | /* read the last <16B of AAD. since we have at least 4B of |
404 | 429 | data right after the AAD (the ICV, and maybe some CT), we can | |
405 | _get_AAD_loop2_done\@: | 430 | read 4B/8B blocks safely, and then get rid of the extra stuff */ |
406 | 431 | _get_AAD_rest8\@: | |
407 | #byte-reflect the AAD data | 432 | cmp $4, %r11 |
408 | vpshufb SHUF_MASK(%rip), reg_i, reg_i | 433 | jle _get_AAD_rest4\@ |
409 | 434 | movq (%r10), \T1 | |
435 | add $8, %r10 | ||
436 | sub $8, %r11 | ||
437 | vpslldq $8, \T1, \T1 | ||
438 | vpsrldq $8, reg_i, reg_i | ||
439 | vpxor \T1, reg_i, reg_i | ||
440 | jmp _get_AAD_rest8\@ | ||
441 | _get_AAD_rest4\@: | ||
442 | cmp $0, %r11 | ||
443 | jle _get_AAD_rest0\@ | ||
444 | mov (%r10), %eax | ||
445 | movq %rax, \T1 | ||
446 | add $4, %r10 | ||
447 | sub $4, %r11 | ||
448 | vpslldq $12, \T1, \T1 | ||
449 | vpsrldq $4, reg_i, reg_i | ||
450 | vpxor \T1, reg_i, reg_i | ||
451 | _get_AAD_rest0\@: | ||
452 | /* finalize: shift out the extra bytes we read, and align | ||
453 | left. since pslldq can only shift by an immediate, we use | ||
454 | vpshufb and an array of shuffle masks */ | ||
455 | movq %r12, %r11 | ||
456 | salq $4, %r11 | ||
457 | movdqu aad_shift_arr(%r11), \T1 | ||
458 | vpshufb \T1, reg_i, reg_i | ||
459 | _get_AAD_rest_final\@: | ||
460 | vpshufb SHUF_MASK(%rip), reg_i, reg_i | ||
461 | vpxor reg_j, reg_i, reg_i | ||
462 | GHASH_MUL_AVX reg_i, \T2, \T1, \T3, \T4, \T5, \T6 | ||
463 | |||
464 | _get_AAD_done\@: | ||
410 | # initialize the data pointer offset as zero | 465 | # initialize the data pointer offset as zero |
411 | xor %r11, %r11 | 466 | xor %r11, %r11 |
412 | 467 | ||
@@ -480,7 +535,6 @@ _get_AAD_loop2_done\@: | |||
480 | i = (8-\num_initial_blocks) | 535 | i = (8-\num_initial_blocks) |
481 | j = (9-\num_initial_blocks) | 536 | j = (9-\num_initial_blocks) |
482 | setreg | 537 | setreg |
483 | GHASH_MUL_AVX reg_i, \T2, \T1, \T3, \T4, \T5, \T6 | ||
484 | 538 | ||
485 | .rep \num_initial_blocks | 539 | .rep \num_initial_blocks |
486 | vpxor reg_i, reg_j, reg_j | 540 | vpxor reg_i, reg_j, reg_j |
@@ -1427,19 +1481,36 @@ _return_T\@: | |||
1427 | cmp $16, %r11 | 1481 | cmp $16, %r11 |
1428 | je _T_16\@ | 1482 | je _T_16\@ |
1429 | 1483 | ||
1430 | cmp $12, %r11 | 1484 | cmp $8, %r11 |
1431 | je _T_12\@ | 1485 | jl _T_4\@ |
1432 | 1486 | ||
1433 | _T_8\@: | 1487 | _T_8\@: |
1434 | vmovq %xmm9, %rax | 1488 | vmovq %xmm9, %rax |
1435 | mov %rax, (%r10) | 1489 | mov %rax, (%r10) |
1436 | jmp _return_T_done\@ | 1490 | add $8, %r10 |
1437 | _T_12\@: | 1491 | sub $8, %r11 |
1438 | vmovq %xmm9, %rax | ||
1439 | mov %rax, (%r10) | ||
1440 | vpsrldq $8, %xmm9, %xmm9 | 1492 | vpsrldq $8, %xmm9, %xmm9 |
1493 | cmp $0, %r11 | ||
1494 | je _return_T_done\@ | ||
1495 | _T_4\@: | ||
1441 | vmovd %xmm9, %eax | 1496 | vmovd %xmm9, %eax |
1442 | mov %eax, 8(%r10) | 1497 | mov %eax, (%r10) |
1498 | add $4, %r10 | ||
1499 | sub $4, %r11 | ||
1500 | vpsrldq $4, %xmm9, %xmm9 | ||
1501 | cmp $0, %r11 | ||
1502 | je _return_T_done\@ | ||
1503 | _T_123\@: | ||
1504 | vmovd %xmm9, %eax | ||
1505 | cmp $2, %r11 | ||
1506 | jl _T_1\@ | ||
1507 | mov %ax, (%r10) | ||
1508 | cmp $2, %r11 | ||
1509 | je _return_T_done\@ | ||
1510 | add $2, %r10 | ||
1511 | sar $16, %eax | ||
1512 | _T_1\@: | ||
1513 | mov %al, (%r10) | ||
1443 | jmp _return_T_done\@ | 1514 | jmp _return_T_done\@ |
1444 | 1515 | ||
1445 | _T_16\@: | 1516 | _T_16\@: |
@@ -1631,41 +1702,73 @@ ENDPROC(aesni_gcm_dec_avx_gen2) | |||
1631 | 1702 | ||
1632 | .macro INITIAL_BLOCKS_AVX2 num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER | 1703 | .macro INITIAL_BLOCKS_AVX2 num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER |
1633 | i = (8-\num_initial_blocks) | 1704 | i = (8-\num_initial_blocks) |
1705 | j = 0 | ||
1634 | setreg | 1706 | setreg |
1635 | 1707 | ||
1636 | mov arg6, %r10 # r10 = AAD | 1708 | mov arg6, %r10 # r10 = AAD |
1637 | mov arg7, %r12 # r12 = aadLen | 1709 | mov arg7, %r12 # r12 = aadLen |
1638 | 1710 | ||
1639 | 1711 | ||
1640 | mov %r12, %r11 | 1712 | mov %r12, %r11 |
1641 | 1713 | ||
1642 | vpxor reg_i, reg_i, reg_i | 1714 | vpxor reg_j, reg_j, reg_j |
1643 | _get_AAD_loop\@: | 1715 | vpxor reg_i, reg_i, reg_i |
1644 | vmovd (%r10), \T1 | 1716 | |
1645 | vpslldq $12, \T1, \T1 | 1717 | cmp $16, %r11 |
1646 | vpsrldq $4, reg_i, reg_i | 1718 | jl _get_AAD_rest8\@ |
1647 | vpxor \T1, reg_i, reg_i | 1719 | _get_AAD_blocks\@: |
1648 | 1720 | vmovdqu (%r10), reg_i | |
1649 | add $4, %r10 | 1721 | vpshufb SHUF_MASK(%rip), reg_i, reg_i |
1650 | sub $4, %r12 | 1722 | vpxor reg_i, reg_j, reg_j |
1651 | jg _get_AAD_loop\@ | 1723 | GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6 |
1652 | 1724 | add $16, %r10 | |
1653 | 1725 | sub $16, %r12 | |
1654 | cmp $16, %r11 | 1726 | sub $16, %r11 |
1655 | je _get_AAD_loop2_done\@ | 1727 | cmp $16, %r11 |
1656 | mov $16, %r12 | 1728 | jge _get_AAD_blocks\@ |
1657 | 1729 | vmovdqu reg_j, reg_i | |
1658 | _get_AAD_loop2\@: | 1730 | cmp $0, %r11 |
1659 | vpsrldq $4, reg_i, reg_i | 1731 | je _get_AAD_done\@ |
1660 | sub $4, %r12 | 1732 | |
1661 | cmp %r11, %r12 | 1733 | vpxor reg_i, reg_i, reg_i |
1662 | jg _get_AAD_loop2\@ | 1734 | |
1663 | 1735 | /* read the last <16B of AAD. since we have at least 4B of | |
1664 | _get_AAD_loop2_done\@: | 1736 | data right after the AAD (the ICV, and maybe some CT), we can |
1665 | 1737 | read 4B/8B blocks safely, and then get rid of the extra stuff */ | |
1666 | #byte-reflect the AAD data | 1738 | _get_AAD_rest8\@: |
1667 | vpshufb SHUF_MASK(%rip), reg_i, reg_i | 1739 | cmp $4, %r11 |
1668 | 1740 | jle _get_AAD_rest4\@ | |
1741 | movq (%r10), \T1 | ||
1742 | add $8, %r10 | ||
1743 | sub $8, %r11 | ||
1744 | vpslldq $8, \T1, \T1 | ||
1745 | vpsrldq $8, reg_i, reg_i | ||
1746 | vpxor \T1, reg_i, reg_i | ||
1747 | jmp _get_AAD_rest8\@ | ||
1748 | _get_AAD_rest4\@: | ||
1749 | cmp $0, %r11 | ||
1750 | jle _get_AAD_rest0\@ | ||
1751 | mov (%r10), %eax | ||
1752 | movq %rax, \T1 | ||
1753 | add $4, %r10 | ||
1754 | sub $4, %r11 | ||
1755 | vpslldq $12, \T1, \T1 | ||
1756 | vpsrldq $4, reg_i, reg_i | ||
1757 | vpxor \T1, reg_i, reg_i | ||
1758 | _get_AAD_rest0\@: | ||
1759 | /* finalize: shift out the extra bytes we read, and align | ||
1760 | left. since pslldq can only shift by an immediate, we use | ||
1761 | vpshufb and an array of shuffle masks */ | ||
1762 | movq %r12, %r11 | ||
1763 | salq $4, %r11 | ||
1764 | movdqu aad_shift_arr(%r11), \T1 | ||
1765 | vpshufb \T1, reg_i, reg_i | ||
1766 | _get_AAD_rest_final\@: | ||
1767 | vpshufb SHUF_MASK(%rip), reg_i, reg_i | ||
1768 | vpxor reg_j, reg_i, reg_i | ||
1769 | GHASH_MUL_AVX2 reg_i, \T2, \T1, \T3, \T4, \T5, \T6 | ||
1770 | |||
1771 | _get_AAD_done\@: | ||
1669 | # initialize the data pointer offset as zero | 1772 | # initialize the data pointer offset as zero |
1670 | xor %r11, %r11 | 1773 | xor %r11, %r11 |
1671 | 1774 | ||
@@ -1740,7 +1843,6 @@ _get_AAD_loop2_done\@: | |||
1740 | i = (8-\num_initial_blocks) | 1843 | i = (8-\num_initial_blocks) |
1741 | j = (9-\num_initial_blocks) | 1844 | j = (9-\num_initial_blocks) |
1742 | setreg | 1845 | setreg |
1743 | GHASH_MUL_AVX2 reg_i, \T2, \T1, \T3, \T4, \T5, \T6 | ||
1744 | 1846 | ||
1745 | .rep \num_initial_blocks | 1847 | .rep \num_initial_blocks |
1746 | vpxor reg_i, reg_j, reg_j | 1848 | vpxor reg_i, reg_j, reg_j |
@@ -2702,19 +2804,36 @@ _return_T\@: | |||
2702 | cmp $16, %r11 | 2804 | cmp $16, %r11 |
2703 | je _T_16\@ | 2805 | je _T_16\@ |
2704 | 2806 | ||
2705 | cmp $12, %r11 | 2807 | cmp $8, %r11 |
2706 | je _T_12\@ | 2808 | jl _T_4\@ |
2707 | 2809 | ||
2708 | _T_8\@: | 2810 | _T_8\@: |
2709 | vmovq %xmm9, %rax | 2811 | vmovq %xmm9, %rax |
2710 | mov %rax, (%r10) | 2812 | mov %rax, (%r10) |
2711 | jmp _return_T_done\@ | 2813 | add $8, %r10 |
2712 | _T_12\@: | 2814 | sub $8, %r11 |
2713 | vmovq %xmm9, %rax | ||
2714 | mov %rax, (%r10) | ||
2715 | vpsrldq $8, %xmm9, %xmm9 | 2815 | vpsrldq $8, %xmm9, %xmm9 |
2816 | cmp $0, %r11 | ||
2817 | je _return_T_done\@ | ||
2818 | _T_4\@: | ||
2716 | vmovd %xmm9, %eax | 2819 | vmovd %xmm9, %eax |
2717 | mov %eax, 8(%r10) | 2820 | mov %eax, (%r10) |
2821 | add $4, %r10 | ||
2822 | sub $4, %r11 | ||
2823 | vpsrldq $4, %xmm9, %xmm9 | ||
2824 | cmp $0, %r11 | ||
2825 | je _return_T_done\@ | ||
2826 | _T_123\@: | ||
2827 | vmovd %xmm9, %eax | ||
2828 | cmp $2, %r11 | ||
2829 | jl _T_1\@ | ||
2830 | mov %ax, (%r10) | ||
2831 | cmp $2, %r11 | ||
2832 | je _return_T_done\@ | ||
2833 | add $2, %r10 | ||
2834 | sar $16, %eax | ||
2835 | _T_1\@: | ||
2836 | mov %al, (%r10) | ||
2718 | jmp _return_T_done\@ | 2837 | jmp _return_T_done\@ |
2719 | 2838 | ||
2720 | _T_16\@: | 2839 | _T_16\@: |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 93de8ea51548..4a55cdcdc008 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -61,6 +61,11 @@ struct aesni_rfc4106_gcm_ctx { | |||
61 | u8 nonce[4]; | 61 | u8 nonce[4]; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | struct generic_gcmaes_ctx { | ||
65 | u8 hash_subkey[16] AESNI_ALIGN_ATTR; | ||
66 | struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR; | ||
67 | }; | ||
68 | |||
64 | struct aesni_xts_ctx { | 69 | struct aesni_xts_ctx { |
65 | u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; | 70 | u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; |
66 | u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; | 71 | u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR; |
@@ -102,13 +107,11 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, | |||
102 | * u8 *out, Ciphertext output. Encrypt in-place is allowed. | 107 | * u8 *out, Ciphertext output. Encrypt in-place is allowed. |
103 | * const u8 *in, Plaintext input | 108 | * const u8 *in, Plaintext input |
104 | * unsigned long plaintext_len, Length of data in bytes for encryption. | 109 | * unsigned long plaintext_len, Length of data in bytes for encryption. |
105 | * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) | 110 | * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. |
106 | * concatenated with 8 byte Initialisation Vector (from IPSec ESP | 111 | * 16-byte aligned pointer. |
107 | * Payload) concatenated with 0x00000001. 16-byte aligned pointer. | ||
108 | * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. | 112 | * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. |
109 | * const u8 *aad, Additional Authentication Data (AAD) | 113 | * const u8 *aad, Additional Authentication Data (AAD) |
110 | * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this | 114 | * unsigned long aad_len, Length of AAD in bytes. |
111 | * is going to be 8 or 12 bytes | ||
112 | * u8 *auth_tag, Authenticated Tag output. | 115 | * u8 *auth_tag, Authenticated Tag output. |
113 | * unsigned long auth_tag_len), Authenticated Tag Length in bytes. | 116 | * unsigned long auth_tag_len), Authenticated Tag Length in bytes. |
114 | * Valid values are 16 (most likely), 12 or 8. | 117 | * Valid values are 16 (most likely), 12 or 8. |
@@ -123,9 +126,8 @@ asmlinkage void aesni_gcm_enc(void *ctx, u8 *out, | |||
123 | * u8 *out, Plaintext output. Decrypt in-place is allowed. | 126 | * u8 *out, Plaintext output. Decrypt in-place is allowed. |
124 | * const u8 *in, Ciphertext input | 127 | * const u8 *in, Ciphertext input |
125 | * unsigned long ciphertext_len, Length of data in bytes for decryption. | 128 | * unsigned long ciphertext_len, Length of data in bytes for decryption. |
126 | * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association) | 129 | * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001. |
127 | * concatenated with 8 byte Initialisation Vector (from IPSec ESP | 130 | * 16-byte aligned pointer. |
128 | * Payload) concatenated with 0x00000001. 16-byte aligned pointer. | ||
129 | * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. | 131 | * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary. |
130 | * const u8 *aad, Additional Authentication Data (AAD) | 132 | * const u8 *aad, Additional Authentication Data (AAD) |
131 | * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going | 133 | * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going |
@@ -275,6 +277,16 @@ aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) | |||
275 | align = 1; | 277 | align = 1; |
276 | return PTR_ALIGN(crypto_aead_ctx(tfm), align); | 278 | return PTR_ALIGN(crypto_aead_ctx(tfm), align); |
277 | } | 279 | } |
280 | |||
281 | static inline struct | ||
282 | generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm) | ||
283 | { | ||
284 | unsigned long align = AESNI_ALIGN; | ||
285 | |||
286 | if (align <= crypto_tfm_ctx_alignment()) | ||
287 | align = 1; | ||
288 | return PTR_ALIGN(crypto_aead_ctx(tfm), align); | ||
289 | } | ||
278 | #endif | 290 | #endif |
279 | 291 | ||
280 | static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) | 292 | static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) |
@@ -712,32 +724,34 @@ static int rfc4106_set_authsize(struct crypto_aead *parent, | |||
712 | return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); | 724 | return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); |
713 | } | 725 | } |
714 | 726 | ||
715 | static int helper_rfc4106_encrypt(struct aead_request *req) | 727 | static int generic_gcmaes_set_authsize(struct crypto_aead *tfm, |
728 | unsigned int authsize) | ||
729 | { | ||
730 | switch (authsize) { | ||
731 | case 4: | ||
732 | case 8: | ||
733 | case 12: | ||
734 | case 13: | ||
735 | case 14: | ||
736 | case 15: | ||
737 | case 16: | ||
738 | break; | ||
739 | default: | ||
740 | return -EINVAL; | ||
741 | } | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen, | ||
747 | u8 *hash_subkey, u8 *iv, void *aes_ctx) | ||
716 | { | 748 | { |
717 | u8 one_entry_in_sg = 0; | 749 | u8 one_entry_in_sg = 0; |
718 | u8 *src, *dst, *assoc; | 750 | u8 *src, *dst, *assoc; |
719 | __be32 counter = cpu_to_be32(1); | ||
720 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 751 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
721 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | ||
722 | void *aes_ctx = &(ctx->aes_key_expanded); | ||
723 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); | 752 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); |
724 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | ||
725 | struct scatter_walk src_sg_walk; | 753 | struct scatter_walk src_sg_walk; |
726 | struct scatter_walk dst_sg_walk = {}; | 754 | struct scatter_walk dst_sg_walk = {}; |
727 | unsigned int i; | ||
728 | |||
729 | /* Assuming we are supporting rfc4106 64-bit extended */ | ||
730 | /* sequence numbers We need to have the AAD length equal */ | ||
731 | /* to 16 or 20 bytes */ | ||
732 | if (unlikely(req->assoclen != 16 && req->assoclen != 20)) | ||
733 | return -EINVAL; | ||
734 | |||
735 | /* IV below built */ | ||
736 | for (i = 0; i < 4; i++) | ||
737 | *(iv+i) = ctx->nonce[i]; | ||
738 | for (i = 0; i < 8; i++) | ||
739 | *(iv+4+i) = req->iv[i]; | ||
740 | *((__be32 *)(iv+12)) = counter; | ||
741 | 755 | ||
742 | if (sg_is_last(req->src) && | 756 | if (sg_is_last(req->src) && |
743 | (!PageHighMem(sg_page(req->src)) || | 757 | (!PageHighMem(sg_page(req->src)) || |
@@ -768,7 +782,7 @@ static int helper_rfc4106_encrypt(struct aead_request *req) | |||
768 | 782 | ||
769 | kernel_fpu_begin(); | 783 | kernel_fpu_begin(); |
770 | aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, | 784 | aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv, |
771 | ctx->hash_subkey, assoc, req->assoclen - 8, | 785 | hash_subkey, assoc, assoclen, |
772 | dst + req->cryptlen, auth_tag_len); | 786 | dst + req->cryptlen, auth_tag_len); |
773 | kernel_fpu_end(); | 787 | kernel_fpu_end(); |
774 | 788 | ||
@@ -791,37 +805,20 @@ static int helper_rfc4106_encrypt(struct aead_request *req) | |||
791 | return 0; | 805 | return 0; |
792 | } | 806 | } |
793 | 807 | ||
794 | static int helper_rfc4106_decrypt(struct aead_request *req) | 808 | static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen, |
809 | u8 *hash_subkey, u8 *iv, void *aes_ctx) | ||
795 | { | 810 | { |
796 | u8 one_entry_in_sg = 0; | 811 | u8 one_entry_in_sg = 0; |
797 | u8 *src, *dst, *assoc; | 812 | u8 *src, *dst, *assoc; |
798 | unsigned long tempCipherLen = 0; | 813 | unsigned long tempCipherLen = 0; |
799 | __be32 counter = cpu_to_be32(1); | ||
800 | int retval = 0; | ||
801 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 814 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
802 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | ||
803 | void *aes_ctx = &(ctx->aes_key_expanded); | ||
804 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); | 815 | unsigned long auth_tag_len = crypto_aead_authsize(tfm); |
805 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | ||
806 | u8 authTag[16]; | 816 | u8 authTag[16]; |
807 | struct scatter_walk src_sg_walk; | 817 | struct scatter_walk src_sg_walk; |
808 | struct scatter_walk dst_sg_walk = {}; | 818 | struct scatter_walk dst_sg_walk = {}; |
809 | unsigned int i; | 819 | int retval = 0; |
810 | |||
811 | if (unlikely(req->assoclen != 16 && req->assoclen != 20)) | ||
812 | return -EINVAL; | ||
813 | |||
814 | /* Assuming we are supporting rfc4106 64-bit extended */ | ||
815 | /* sequence numbers We need to have the AAD length */ | ||
816 | /* equal to 16 or 20 bytes */ | ||
817 | 820 | ||
818 | tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); | 821 | tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len); |
819 | /* IV below built */ | ||
820 | for (i = 0; i < 4; i++) | ||
821 | *(iv+i) = ctx->nonce[i]; | ||
822 | for (i = 0; i < 8; i++) | ||
823 | *(iv+4+i) = req->iv[i]; | ||
824 | *((__be32 *)(iv+12)) = counter; | ||
825 | 822 | ||
826 | if (sg_is_last(req->src) && | 823 | if (sg_is_last(req->src) && |
827 | (!PageHighMem(sg_page(req->src)) || | 824 | (!PageHighMem(sg_page(req->src)) || |
@@ -838,7 +835,6 @@ static int helper_rfc4106_decrypt(struct aead_request *req) | |||
838 | scatterwalk_start(&dst_sg_walk, req->dst); | 835 | scatterwalk_start(&dst_sg_walk, req->dst); |
839 | dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; | 836 | dst = scatterwalk_map(&dst_sg_walk) + req->assoclen; |
840 | } | 837 | } |
841 | |||
842 | } else { | 838 | } else { |
843 | /* Allocate memory for src, dst, assoc */ | 839 | /* Allocate memory for src, dst, assoc */ |
844 | assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); | 840 | assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); |
@@ -850,9 +846,10 @@ static int helper_rfc4106_decrypt(struct aead_request *req) | |||
850 | dst = src; | 846 | dst = src; |
851 | } | 847 | } |
852 | 848 | ||
849 | |||
853 | kernel_fpu_begin(); | 850 | kernel_fpu_begin(); |
854 | aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, | 851 | aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv, |
855 | ctx->hash_subkey, assoc, req->assoclen - 8, | 852 | hash_subkey, assoc, assoclen, |
856 | authTag, auth_tag_len); | 853 | authTag, auth_tag_len); |
857 | kernel_fpu_end(); | 854 | kernel_fpu_end(); |
858 | 855 | ||
@@ -875,6 +872,60 @@ static int helper_rfc4106_decrypt(struct aead_request *req) | |||
875 | kfree(assoc); | 872 | kfree(assoc); |
876 | } | 873 | } |
877 | return retval; | 874 | return retval; |
875 | |||
876 | } | ||
877 | |||
878 | static int helper_rfc4106_encrypt(struct aead_request *req) | ||
879 | { | ||
880 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
881 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | ||
882 | void *aes_ctx = &(ctx->aes_key_expanded); | ||
883 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | ||
884 | unsigned int i; | ||
885 | __be32 counter = cpu_to_be32(1); | ||
886 | |||
887 | /* Assuming we are supporting rfc4106 64-bit extended */ | ||
888 | /* sequence numbers We need to have the AAD length equal */ | ||
889 | /* to 16 or 20 bytes */ | ||
890 | if (unlikely(req->assoclen != 16 && req->assoclen != 20)) | ||
891 | return -EINVAL; | ||
892 | |||
893 | /* IV below built */ | ||
894 | for (i = 0; i < 4; i++) | ||
895 | *(iv+i) = ctx->nonce[i]; | ||
896 | for (i = 0; i < 8; i++) | ||
897 | *(iv+4+i) = req->iv[i]; | ||
898 | *((__be32 *)(iv+12)) = counter; | ||
899 | |||
900 | return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv, | ||
901 | aes_ctx); | ||
902 | } | ||
903 | |||
904 | static int helper_rfc4106_decrypt(struct aead_request *req) | ||
905 | { | ||
906 | __be32 counter = cpu_to_be32(1); | ||
907 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
908 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | ||
909 | void *aes_ctx = &(ctx->aes_key_expanded); | ||
910 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | ||
911 | unsigned int i; | ||
912 | |||
913 | if (unlikely(req->assoclen != 16 && req->assoclen != 20)) | ||
914 | return -EINVAL; | ||
915 | |||
916 | /* Assuming we are supporting rfc4106 64-bit extended */ | ||
917 | /* sequence numbers We need to have the AAD length */ | ||
918 | /* equal to 16 or 20 bytes */ | ||
919 | |||
920 | /* IV below built */ | ||
921 | for (i = 0; i < 4; i++) | ||
922 | *(iv+i) = ctx->nonce[i]; | ||
923 | for (i = 0; i < 8; i++) | ||
924 | *(iv+4+i) = req->iv[i]; | ||
925 | *((__be32 *)(iv+12)) = counter; | ||
926 | |||
927 | return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv, | ||
928 | aes_ctx); | ||
878 | } | 929 | } |
879 | 930 | ||
880 | static int rfc4106_encrypt(struct aead_request *req) | 931 | static int rfc4106_encrypt(struct aead_request *req) |
@@ -1035,6 +1086,46 @@ struct { | |||
1035 | }; | 1086 | }; |
1036 | 1087 | ||
1037 | #ifdef CONFIG_X86_64 | 1088 | #ifdef CONFIG_X86_64 |
1089 | static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key, | ||
1090 | unsigned int key_len) | ||
1091 | { | ||
1092 | struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead); | ||
1093 | |||
1094 | return aes_set_key_common(crypto_aead_tfm(aead), | ||
1095 | &ctx->aes_key_expanded, key, key_len) ?: | ||
1096 | rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); | ||
1097 | } | ||
1098 | |||
1099 | static int generic_gcmaes_encrypt(struct aead_request *req) | ||
1100 | { | ||
1101 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1102 | struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); | ||
1103 | void *aes_ctx = &(ctx->aes_key_expanded); | ||
1104 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | ||
1105 | __be32 counter = cpu_to_be32(1); | ||
1106 | |||
1107 | memcpy(iv, req->iv, 12); | ||
1108 | *((__be32 *)(iv+12)) = counter; | ||
1109 | |||
1110 | return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv, | ||
1111 | aes_ctx); | ||
1112 | } | ||
1113 | |||
1114 | static int generic_gcmaes_decrypt(struct aead_request *req) | ||
1115 | { | ||
1116 | __be32 counter = cpu_to_be32(1); | ||
1117 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | ||
1118 | struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); | ||
1119 | void *aes_ctx = &(ctx->aes_key_expanded); | ||
1120 | u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); | ||
1121 | |||
1122 | memcpy(iv, req->iv, 12); | ||
1123 | *((__be32 *)(iv+12)) = counter; | ||
1124 | |||
1125 | return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv, | ||
1126 | aes_ctx); | ||
1127 | } | ||
1128 | |||
1038 | static struct aead_alg aesni_aead_algs[] = { { | 1129 | static struct aead_alg aesni_aead_algs[] = { { |
1039 | .setkey = common_rfc4106_set_key, | 1130 | .setkey = common_rfc4106_set_key, |
1040 | .setauthsize = common_rfc4106_set_authsize, | 1131 | .setauthsize = common_rfc4106_set_authsize, |
@@ -1069,6 +1160,23 @@ static struct aead_alg aesni_aead_algs[] = { { | |||
1069 | .cra_ctxsize = sizeof(struct cryptd_aead *), | 1160 | .cra_ctxsize = sizeof(struct cryptd_aead *), |
1070 | .cra_module = THIS_MODULE, | 1161 | .cra_module = THIS_MODULE, |
1071 | }, | 1162 | }, |
1163 | }, { | ||
1164 | .setkey = generic_gcmaes_set_key, | ||
1165 | .setauthsize = generic_gcmaes_set_authsize, | ||
1166 | .encrypt = generic_gcmaes_encrypt, | ||
1167 | .decrypt = generic_gcmaes_decrypt, | ||
1168 | .ivsize = 12, | ||
1169 | .maxauthsize = 16, | ||
1170 | .base = { | ||
1171 | .cra_name = "gcm(aes)", | ||
1172 | .cra_driver_name = "generic-gcm-aesni", | ||
1173 | .cra_priority = 400, | ||
1174 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
1175 | .cra_blocksize = 1, | ||
1176 | .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), | ||
1177 | .cra_alignmask = AESNI_ALIGN - 1, | ||
1178 | .cra_module = THIS_MODULE, | ||
1179 | }, | ||
1072 | } }; | 1180 | } }; |
1073 | #else | 1181 | #else |
1074 | static struct aead_alg aesni_aead_algs[0]; | 1182 | static struct aead_alg aesni_aead_algs[0]; |
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index 24ac9fad832d..d61e57960fe0 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c | |||
@@ -176,9 +176,6 @@ __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, | |||
176 | src -= 1; | 176 | src -= 1; |
177 | dst -= 1; | 177 | dst -= 1; |
178 | } while (nbytes >= func_bytes); | 178 | } while (nbytes >= func_bytes); |
179 | |||
180 | if (nbytes < bsize) | ||
181 | goto done; | ||
182 | } | 179 | } |
183 | } | 180 | } |
184 | 181 | ||
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c index 2dd3674b5a1e..458409b7568d 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb.c +++ b/arch/x86/crypto/sha512-mb/sha512_mb.c | |||
@@ -269,19 +269,19 @@ static struct sha512_hash_ctx | |||
269 | * LAST | 269 | * LAST |
270 | */ | 270 | */ |
271 | ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; | 271 | ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; |
272 | return ctx; | 272 | goto unlock; |
273 | } | 273 | } |
274 | 274 | ||
275 | if (ctx->status & HASH_CTX_STS_PROCESSING) { | 275 | if (ctx->status & HASH_CTX_STS_PROCESSING) { |
276 | /* Cannot submit to a currently processing job. */ | 276 | /* Cannot submit to a currently processing job. */ |
277 | ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; | 277 | ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; |
278 | return ctx; | 278 | goto unlock; |
279 | } | 279 | } |
280 | 280 | ||
281 | if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { | 281 | if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { |
282 | /* Cannot update a finished job. */ | 282 | /* Cannot update a finished job. */ |
283 | ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; | 283 | ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; |
284 | return ctx; | 284 | goto unlock; |
285 | } | 285 | } |
286 | 286 | ||
287 | 287 | ||
@@ -363,6 +363,7 @@ static struct sha512_hash_ctx | |||
363 | } | 363 | } |
364 | 364 | ||
365 | ctx = sha512_ctx_mgr_resubmit(mgr, ctx); | 365 | ctx = sha512_ctx_mgr_resubmit(mgr, ctx); |
366 | unlock: | ||
366 | spin_unlock_irqrestore(&cstate->work_lock, irqflags); | 367 | spin_unlock_irqrestore(&cstate->work_lock, irqflags); |
367 | return ctx; | 368 | return ctx; |
368 | } | 369 | } |
diff --git a/crypto/Kconfig b/crypto/Kconfig index aac4bc90a138..caa770e535a2 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -130,6 +130,7 @@ config CRYPTO_DH | |||
130 | config CRYPTO_ECDH | 130 | config CRYPTO_ECDH |
131 | tristate "ECDH algorithm" | 131 | tristate "ECDH algorithm" |
132 | select CRYTPO_KPP | 132 | select CRYTPO_KPP |
133 | select CRYPTO_RNG_DEFAULT | ||
133 | help | 134 | help |
134 | Generic implementation of the ECDH algorithm | 135 | Generic implementation of the ECDH algorithm |
135 | 136 | ||
diff --git a/crypto/Makefile b/crypto/Makefile index 8a44057240d5..d41f0331b085 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -33,10 +33,6 @@ obj-$(CONFIG_CRYPTO_KPP2) += kpp.o | |||
33 | dh_generic-y := dh.o | 33 | dh_generic-y := dh.o |
34 | dh_generic-y += dh_helper.o | 34 | dh_generic-y += dh_helper.o |
35 | obj-$(CONFIG_CRYPTO_DH) += dh_generic.o | 35 | obj-$(CONFIG_CRYPTO_DH) += dh_generic.o |
36 | ecdh_generic-y := ecc.o | ||
37 | ecdh_generic-y += ecdh.o | ||
38 | ecdh_generic-y += ecdh_helper.o | ||
39 | obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o | ||
40 | 36 | ||
41 | $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h | 37 | $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h |
42 | $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h | 38 | $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h |
@@ -138,6 +134,11 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o | |||
138 | obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o | 134 | obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o |
139 | obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o | 135 | obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o |
140 | 136 | ||
137 | ecdh_generic-y := ecc.o | ||
138 | ecdh_generic-y += ecdh.o | ||
139 | ecdh_generic-y += ecdh_helper.o | ||
140 | obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o | ||
141 | |||
141 | # | 142 | # |
142 | # generic algorithms and the async_tx api | 143 | # generic algorithms and the async_tx api |
143 | # | 144 | # |
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c index 92644fd1ac19..03023b2290e8 100644 --- a/crypto/aes_ti.c +++ b/crypto/aes_ti.c | |||
@@ -114,7 +114,7 @@ static u32 mix_columns(u32 x) | |||
114 | * | 0x2 0x3 0x1 0x1 | | x[0] | | 114 | * | 0x2 0x3 0x1 0x1 | | x[0] | |
115 | * | 0x1 0x2 0x3 0x1 | | x[1] | | 115 | * | 0x1 0x2 0x3 0x1 | | x[1] | |
116 | * | 0x1 0x1 0x2 0x3 | x | x[2] | | 116 | * | 0x1 0x1 0x2 0x3 | x | x[2] | |
117 | * | 0x3 0x1 0x1 0x3 | | x[3] | | 117 | * | 0x3 0x1 0x1 0x2 | | x[3] | |
118 | */ | 118 | */ |
119 | u32 y = mul_by_x(x) ^ ror32(x, 16); | 119 | u32 y = mul_by_x(x) ^ ror32(x, 16); |
120 | 120 | ||
diff --git a/crypto/algapi.c b/crypto/algapi.c index 9eed4ef9c971..e4cc7615a139 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -260,7 +260,7 @@ void crypto_alg_tested(const char *name, int err) | |||
260 | goto found; | 260 | goto found; |
261 | } | 261 | } |
262 | 262 | ||
263 | printk(KERN_ERR "alg: Unexpected test result for %s: %d\n", name, err); | 263 | pr_err("alg: Unexpected test result for %s: %d\n", name, err); |
264 | goto unlock; | 264 | goto unlock; |
265 | 265 | ||
266 | found: | 266 | found: |
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 727bd5c3569e..61e7c4e02fd2 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c | |||
@@ -70,7 +70,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
70 | 70 | ||
71 | if (engine->unprepare_crypt_hardware && | 71 | if (engine->unprepare_crypt_hardware && |
72 | engine->unprepare_crypt_hardware(engine)) | 72 | engine->unprepare_crypt_hardware(engine)) |
73 | pr_err("failed to unprepare crypt hardware\n"); | 73 | dev_err(engine->dev, "failed to unprepare crypt hardware\n"); |
74 | 74 | ||
75 | spin_lock_irqsave(&engine->queue_lock, flags); | 75 | spin_lock_irqsave(&engine->queue_lock, flags); |
76 | engine->idling = false; | 76 | engine->idling = false; |
@@ -99,7 +99,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
99 | if (!was_busy && engine->prepare_crypt_hardware) { | 99 | if (!was_busy && engine->prepare_crypt_hardware) { |
100 | ret = engine->prepare_crypt_hardware(engine); | 100 | ret = engine->prepare_crypt_hardware(engine); |
101 | if (ret) { | 101 | if (ret) { |
102 | pr_err("failed to prepare crypt hardware\n"); | 102 | dev_err(engine->dev, "failed to prepare crypt hardware\n"); |
103 | goto req_err; | 103 | goto req_err; |
104 | } | 104 | } |
105 | } | 105 | } |
@@ -110,14 +110,15 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
110 | if (engine->prepare_hash_request) { | 110 | if (engine->prepare_hash_request) { |
111 | ret = engine->prepare_hash_request(engine, hreq); | 111 | ret = engine->prepare_hash_request(engine, hreq); |
112 | if (ret) { | 112 | if (ret) { |
113 | pr_err("failed to prepare request: %d\n", ret); | 113 | dev_err(engine->dev, "failed to prepare request: %d\n", |
114 | ret); | ||
114 | goto req_err; | 115 | goto req_err; |
115 | } | 116 | } |
116 | engine->cur_req_prepared = true; | 117 | engine->cur_req_prepared = true; |
117 | } | 118 | } |
118 | ret = engine->hash_one_request(engine, hreq); | 119 | ret = engine->hash_one_request(engine, hreq); |
119 | if (ret) { | 120 | if (ret) { |
120 | pr_err("failed to hash one request from queue\n"); | 121 | dev_err(engine->dev, "failed to hash one request from queue\n"); |
121 | goto req_err; | 122 | goto req_err; |
122 | } | 123 | } |
123 | return; | 124 | return; |
@@ -126,19 +127,20 @@ static void crypto_pump_requests(struct crypto_engine *engine, | |||
126 | if (engine->prepare_cipher_request) { | 127 | if (engine->prepare_cipher_request) { |
127 | ret = engine->prepare_cipher_request(engine, breq); | 128 | ret = engine->prepare_cipher_request(engine, breq); |
128 | if (ret) { | 129 | if (ret) { |
129 | pr_err("failed to prepare request: %d\n", ret); | 130 | dev_err(engine->dev, "failed to prepare request: %d\n", |
131 | ret); | ||
130 | goto req_err; | 132 | goto req_err; |
131 | } | 133 | } |
132 | engine->cur_req_prepared = true; | 134 | engine->cur_req_prepared = true; |
133 | } | 135 | } |
134 | ret = engine->cipher_one_request(engine, breq); | 136 | ret = engine->cipher_one_request(engine, breq); |
135 | if (ret) { | 137 | if (ret) { |
136 | pr_err("failed to cipher one request from queue\n"); | 138 | dev_err(engine->dev, "failed to cipher one request from queue\n"); |
137 | goto req_err; | 139 | goto req_err; |
138 | } | 140 | } |
139 | return; | 141 | return; |
140 | default: | 142 | default: |
141 | pr_err("failed to prepare request of unknown type\n"); | 143 | dev_err(engine->dev, "failed to prepare request of unknown type\n"); |
142 | return; | 144 | return; |
143 | } | 145 | } |
144 | 146 | ||
@@ -275,7 +277,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine, | |||
275 | engine->unprepare_cipher_request) { | 277 | engine->unprepare_cipher_request) { |
276 | ret = engine->unprepare_cipher_request(engine, req); | 278 | ret = engine->unprepare_cipher_request(engine, req); |
277 | if (ret) | 279 | if (ret) |
278 | pr_err("failed to unprepare request\n"); | 280 | dev_err(engine->dev, "failed to unprepare request\n"); |
279 | } | 281 | } |
280 | spin_lock_irqsave(&engine->queue_lock, flags); | 282 | spin_lock_irqsave(&engine->queue_lock, flags); |
281 | engine->cur_req = NULL; | 283 | engine->cur_req = NULL; |
@@ -312,7 +314,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine, | |||
312 | engine->unprepare_hash_request) { | 314 | engine->unprepare_hash_request) { |
313 | ret = engine->unprepare_hash_request(engine, req); | 315 | ret = engine->unprepare_hash_request(engine, req); |
314 | if (ret) | 316 | if (ret) |
315 | pr_err("failed to unprepare request\n"); | 317 | dev_err(engine->dev, "failed to unprepare request\n"); |
316 | } | 318 | } |
317 | spin_lock_irqsave(&engine->queue_lock, flags); | 319 | spin_lock_irqsave(&engine->queue_lock, flags); |
318 | engine->cur_req = NULL; | 320 | engine->cur_req = NULL; |
@@ -384,7 +386,7 @@ int crypto_engine_stop(struct crypto_engine *engine) | |||
384 | spin_unlock_irqrestore(&engine->queue_lock, flags); | 386 | spin_unlock_irqrestore(&engine->queue_lock, flags); |
385 | 387 | ||
386 | if (ret) | 388 | if (ret) |
387 | pr_warn("could not stop engine\n"); | 389 | dev_warn(engine->dev, "could not stop engine\n"); |
388 | 390 | ||
389 | return ret; | 391 | return ret; |
390 | } | 392 | } |
@@ -411,6 +413,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |||
411 | if (!engine) | 413 | if (!engine) |
412 | return NULL; | 414 | return NULL; |
413 | 415 | ||
416 | engine->dev = dev; | ||
414 | engine->rt = rt; | 417 | engine->rt = rt; |
415 | engine->running = false; | 418 | engine->running = false; |
416 | engine->busy = false; | 419 | engine->busy = false; |
diff --git a/crypto/dh.c b/crypto/dh.c index 87e3542cf1b8..b1032a5c1bfa 100644 --- a/crypto/dh.c +++ b/crypto/dh.c | |||
@@ -4,9 +4,9 @@ | |||
4 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> | 4 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public Licence | 7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the Licence, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
@@ -85,6 +85,9 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf, | |||
85 | struct dh_ctx *ctx = dh_get_ctx(tfm); | 85 | struct dh_ctx *ctx = dh_get_ctx(tfm); |
86 | struct dh params; | 86 | struct dh params; |
87 | 87 | ||
88 | /* Free the old MPI key if any */ | ||
89 | dh_free_ctx(ctx); | ||
90 | |||
88 | if (crypto_dh_decode_key(buf, len, ¶ms) < 0) | 91 | if (crypto_dh_decode_key(buf, len, ¶ms) < 0) |
89 | return -EINVAL; | 92 | return -EINVAL; |
90 | 93 | ||
@@ -144,7 +147,7 @@ err_free_val: | |||
144 | return ret; | 147 | return ret; |
145 | } | 148 | } |
146 | 149 | ||
147 | static int dh_max_size(struct crypto_kpp *tfm) | 150 | static unsigned int dh_max_size(struct crypto_kpp *tfm) |
148 | { | 151 | { |
149 | struct dh_ctx *ctx = dh_get_ctx(tfm); | 152 | struct dh_ctx *ctx = dh_get_ctx(tfm); |
150 | 153 | ||
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c index 02db76b20d00..8ba8a3f82620 100644 --- a/crypto/dh_helper.c +++ b/crypto/dh_helper.c | |||
@@ -3,9 +3,9 @@ | |||
3 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> | 3 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public Licence | 6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version | 7 | * as published by the Free Software Foundation; either version |
8 | * 2 of the Licence, or (at your option) any later version. | 8 | * 2 of the License, or (at your option) any later version. |
9 | */ | 9 | */ |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/export.h> | 11 | #include <linux/export.h> |
diff --git a/crypto/drbg.c b/crypto/drbg.c index cdb27ac4b226..633a88e93ab0 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c | |||
@@ -1691,6 +1691,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) | |||
1691 | return PTR_ERR(sk_tfm); | 1691 | return PTR_ERR(sk_tfm); |
1692 | } | 1692 | } |
1693 | drbg->ctr_handle = sk_tfm; | 1693 | drbg->ctr_handle = sk_tfm; |
1694 | init_completion(&drbg->ctr_completion); | ||
1694 | 1695 | ||
1695 | req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); | 1696 | req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); |
1696 | if (!req) { | 1697 | if (!req) { |
diff --git a/crypto/ecc.c b/crypto/ecc.c index 414c78a9c214..633a9bcdc574 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/swab.h> | 29 | #include <linux/swab.h> |
30 | #include <linux/fips.h> | 30 | #include <linux/fips.h> |
31 | #include <crypto/ecdh.h> | 31 | #include <crypto/ecdh.h> |
32 | #include <crypto/rng.h> | ||
32 | 33 | ||
33 | #include "ecc.h" | 34 | #include "ecc.h" |
34 | #include "ecc_curve_defs.h" | 35 | #include "ecc_curve_defs.h" |
@@ -904,7 +905,7 @@ static inline void ecc_swap_digits(const u64 *in, u64 *out, | |||
904 | } | 905 | } |
905 | 906 | ||
906 | int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, | 907 | int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, |
907 | const u8 *private_key, unsigned int private_key_len) | 908 | const u64 *private_key, unsigned int private_key_len) |
908 | { | 909 | { |
909 | int nbytes; | 910 | int nbytes; |
910 | const struct ecc_curve *curve = ecc_get_curve(curve_id); | 911 | const struct ecc_curve *curve = ecc_get_curve(curve_id); |
@@ -917,24 +918,77 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, | |||
917 | if (private_key_len != nbytes) | 918 | if (private_key_len != nbytes) |
918 | return -EINVAL; | 919 | return -EINVAL; |
919 | 920 | ||
920 | if (vli_is_zero((const u64 *)&private_key[0], ndigits)) | 921 | if (vli_is_zero(private_key, ndigits)) |
921 | return -EINVAL; | 922 | return -EINVAL; |
922 | 923 | ||
923 | /* Make sure the private key is in the range [1, n-1]. */ | 924 | /* Make sure the private key is in the range [1, n-1]. */ |
924 | if (vli_cmp(curve->n, (const u64 *)&private_key[0], ndigits) != 1) | 925 | if (vli_cmp(curve->n, private_key, ndigits) != 1) |
925 | return -EINVAL; | 926 | return -EINVAL; |
926 | 927 | ||
927 | return 0; | 928 | return 0; |
928 | } | 929 | } |
929 | 930 | ||
930 | int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits, | 931 | /* |
931 | const u8 *private_key, unsigned int private_key_len, | 932 | * ECC private keys are generated using the method of extra random bits, |
932 | u8 *public_key, unsigned int public_key_len) | 933 | * equivalent to that described in FIPS 186-4, Appendix B.4.1. |
934 | * | ||
935 | * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer | ||
936 | * than requested | ||
937 | * 0 <= c mod(n-1) <= n-2 and implies that | ||
938 | * 1 <= d <= n-1 | ||
939 | * | ||
940 | * This method generates a private key uniformly distributed in the range | ||
941 | * [1, n-1]. | ||
942 | */ | ||
943 | int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey) | ||
944 | { | ||
945 | const struct ecc_curve *curve = ecc_get_curve(curve_id); | ||
946 | u64 priv[ndigits]; | ||
947 | unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; | ||
948 | unsigned int nbits = vli_num_bits(curve->n, ndigits); | ||
949 | int err; | ||
950 | |||
951 | /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */ | ||
952 | if (nbits < 160) | ||
953 | return -EINVAL; | ||
954 | |||
955 | /* | ||
956 | * FIPS 186-4 recommends that the private key should be obtained from a | ||
957 | * RBG with a security strength equal to or greater than the security | ||
958 | * strength associated with N. | ||
959 | * | ||
960 | * The maximum security strength identified by NIST SP800-57pt1r4 for | ||
961 | * ECC is 256 (N >= 512). | ||
962 | * | ||
963 | * This condition is met by the default RNG because it selects a favored | ||
964 | * DRBG with a security strength of 256. | ||
965 | */ | ||
966 | if (crypto_get_default_rng()) | ||
967 | err = -EFAULT; | ||
968 | |||
969 | err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); | ||
970 | crypto_put_default_rng(); | ||
971 | if (err) | ||
972 | return err; | ||
973 | |||
974 | if (vli_is_zero(priv, ndigits)) | ||
975 | return -EINVAL; | ||
976 | |||
977 | /* Make sure the private key is in the range [1, n-1]. */ | ||
978 | if (vli_cmp(curve->n, priv, ndigits) != 1) | ||
979 | return -EINVAL; | ||
980 | |||
981 | ecc_swap_digits(priv, privkey, ndigits); | ||
982 | |||
983 | return 0; | ||
984 | } | ||
985 | |||
986 | int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, | ||
987 | const u64 *private_key, u64 *public_key) | ||
933 | { | 988 | { |
934 | int ret = 0; | 989 | int ret = 0; |
935 | struct ecc_point *pk; | 990 | struct ecc_point *pk; |
936 | u64 priv[ndigits]; | 991 | u64 priv[ndigits]; |
937 | unsigned int nbytes; | ||
938 | const struct ecc_curve *curve = ecc_get_curve(curve_id); | 992 | const struct ecc_curve *curve = ecc_get_curve(curve_id); |
939 | 993 | ||
940 | if (!private_key || !curve) { | 994 | if (!private_key || !curve) { |
@@ -942,7 +996,7 @@ int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits, | |||
942 | goto out; | 996 | goto out; |
943 | } | 997 | } |
944 | 998 | ||
945 | ecc_swap_digits((const u64 *)private_key, priv, ndigits); | 999 | ecc_swap_digits(private_key, priv, ndigits); |
946 | 1000 | ||
947 | pk = ecc_alloc_point(ndigits); | 1001 | pk = ecc_alloc_point(ndigits); |
948 | if (!pk) { | 1002 | if (!pk) { |
@@ -956,9 +1010,8 @@ int ecdh_make_pub_key(unsigned int curve_id, unsigned int ndigits, | |||
956 | goto err_free_point; | 1010 | goto err_free_point; |
957 | } | 1011 | } |
958 | 1012 | ||
959 | nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; | 1013 | ecc_swap_digits(pk->x, public_key, ndigits); |
960 | ecc_swap_digits(pk->x, (u64 *)public_key, ndigits); | 1014 | ecc_swap_digits(pk->y, &public_key[ndigits], ndigits); |
961 | ecc_swap_digits(pk->y, (u64 *)&public_key[nbytes], ndigits); | ||
962 | 1015 | ||
963 | err_free_point: | 1016 | err_free_point: |
964 | ecc_free_point(pk); | 1017 | ecc_free_point(pk); |
@@ -967,9 +1020,8 @@ out: | |||
967 | } | 1020 | } |
968 | 1021 | ||
969 | int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, | 1022 | int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, |
970 | const u8 *private_key, unsigned int private_key_len, | 1023 | const u64 *private_key, const u64 *public_key, |
971 | const u8 *public_key, unsigned int public_key_len, | 1024 | u64 *secret) |
972 | u8 *secret, unsigned int secret_len) | ||
973 | { | 1025 | { |
974 | int ret = 0; | 1026 | int ret = 0; |
975 | struct ecc_point *product, *pk; | 1027 | struct ecc_point *product, *pk; |
@@ -999,13 +1051,13 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, | |||
999 | goto err_alloc_product; | 1051 | goto err_alloc_product; |
1000 | } | 1052 | } |
1001 | 1053 | ||
1002 | ecc_swap_digits((const u64 *)public_key, pk->x, ndigits); | 1054 | ecc_swap_digits(public_key, pk->x, ndigits); |
1003 | ecc_swap_digits((const u64 *)&public_key[nbytes], pk->y, ndigits); | 1055 | ecc_swap_digits(&public_key[ndigits], pk->y, ndigits); |
1004 | ecc_swap_digits((const u64 *)private_key, priv, ndigits); | 1056 | ecc_swap_digits(private_key, priv, ndigits); |
1005 | 1057 | ||
1006 | ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits); | 1058 | ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits); |
1007 | 1059 | ||
1008 | ecc_swap_digits(product->x, (u64 *)secret, ndigits); | 1060 | ecc_swap_digits(product->x, secret, ndigits); |
1009 | 1061 | ||
1010 | if (ecc_point_is_zero(product)) | 1062 | if (ecc_point_is_zero(product)) |
1011 | ret = -EFAULT; | 1063 | ret = -EFAULT; |
diff --git a/crypto/ecc.h b/crypto/ecc.h index 663d598c7406..e4fd4492c765 100644 --- a/crypto/ecc.h +++ b/crypto/ecc.h | |||
@@ -34,41 +34,51 @@ | |||
34 | * ecc_is_key_valid() - Validate a given ECDH private key | 34 | * ecc_is_key_valid() - Validate a given ECDH private key |
35 | * | 35 | * |
36 | * @curve_id: id representing the curve to use | 36 | * @curve_id: id representing the curve to use |
37 | * @ndigits: curve number of digits | 37 | * @ndigits: curve's number of digits |
38 | * @private_key: private key to be used for the given curve | 38 | * @private_key: private key to be used for the given curve |
39 | * @private_key_len: private key len | 39 | * @private_key_len: private key length |
40 | * | 40 | * |
41 | * Returns 0 if the key is acceptable, a negative value otherwise | 41 | * Returns 0 if the key is acceptable, a negative value otherwise |
42 | */ | 42 | */ |
43 | int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, | 43 | int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, |
44 | const u8 *private_key, unsigned int private_key_len); | 44 | const u64 *private_key, unsigned int private_key_len); |
45 | |||
46 | /** | ||
47 | * ecc_gen_privkey() - Generates an ECC private key. | ||
48 | * The private key is a random integer in the range 0 < random < n, where n is a | ||
49 | * prime that is the order of the cyclic subgroup generated by the distinguished | ||
50 | * point G. | ||
51 | * @curve_id: id representing the curve to use | ||
52 | * @ndigits: curve number of digits | ||
53 | * @private_key: buffer for storing the generated private key | ||
54 | * | ||
55 | * Returns 0 if the private key was generated successfully, a negative value | ||
56 | * if an error occurred. | ||
57 | */ | ||
58 | int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey); | ||
45 | 59 | ||
46 | /** | 60 | /** |
47 | * ecdh_make_pub_key() - Compute an ECC public key | 61 | * ecc_make_pub_key() - Compute an ECC public key |
48 | * | 62 | * |
49 | * @curve_id: id representing the curve to use | 63 | * @curve_id: id representing the curve to use |
64 | * @ndigits: curve's number of digits | ||
50 | * @private_key: pregenerated private key for the given curve | 65 | * @private_key: pregenerated private key for the given curve |
51 | * @private_key_len: length of private_key | 66 | * @public_key: buffer for storing the generated public key |
52 | * @public_key: buffer for storing the public key generated | ||
53 | * @public_key_len: length of the public_key buffer | ||
54 | * | 67 | * |
55 | * Returns 0 if the public key was generated successfully, a negative value | 68 | * Returns 0 if the public key was generated successfully, a negative value |
56 | * if an error occurred. | 69 | * if an error occurred. |
57 | */ | 70 | */ |
58 | int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits, | 71 | int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits, |
59 | const u8 *private_key, unsigned int private_key_len, | 72 | const u64 *private_key, u64 *public_key); |
60 | u8 *public_key, unsigned int public_key_len); | ||
61 | 73 | ||
62 | /** | 74 | /** |
63 | * crypto_ecdh_shared_secret() - Compute a shared secret | 75 | * crypto_ecdh_shared_secret() - Compute a shared secret |
64 | * | 76 | * |
65 | * @curve_id: id representing the curve to use | 77 | * @curve_id: id representing the curve to use |
78 | * @ndigits: curve's number of digits | ||
66 | * @private_key: private key of part A | 79 | * @private_key: private key of part A |
67 | * @private_key_len: length of private_key | ||
68 | * @public_key: public key of counterpart B | 80 | * @public_key: public key of counterpart B |
69 | * @public_key_len: length of public_key | ||
70 | * @secret: buffer for storing the calculated shared secret | 81 | * @secret: buffer for storing the calculated shared secret |
71 | * @secret_len: length of the secret buffer | ||
72 | * | 82 | * |
73 | * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret | 83 | * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret |
74 | * before using it for symmetric encryption or HMAC. | 84 | * before using it for symmetric encryption or HMAC. |
@@ -77,7 +87,6 @@ int ecdh_make_pub_key(const unsigned int curve_id, unsigned int ndigits, | |||
77 | * if an error occurred. | 87 | * if an error occurred. |
78 | */ | 88 | */ |
79 | int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, | 89 | int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, |
80 | const u8 *private_key, unsigned int private_key_len, | 90 | const u64 *private_key, const u64 *public_key, |
81 | const u8 *public_key, unsigned int public_key_len, | 91 | u64 *secret); |
82 | u8 *secret, unsigned int secret_len); | ||
83 | #endif | 92 | #endif |
diff --git a/crypto/ecdh.c b/crypto/ecdh.c index 63ca33771e4e..61c7708905d0 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c | |||
@@ -4,9 +4,9 @@ | |||
4 | * Authors: Salvator Benedetto <salvatore.benedetto@intel.com> | 4 | * Authors: Salvator Benedetto <salvatore.benedetto@intel.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public Licence | 7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the Licence, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
@@ -55,8 +55,12 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, | |||
55 | ctx->curve_id = params.curve_id; | 55 | ctx->curve_id = params.curve_id; |
56 | ctx->ndigits = ndigits; | 56 | ctx->ndigits = ndigits; |
57 | 57 | ||
58 | if (!params.key || !params.key_size) | ||
59 | return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, | ||
60 | ctx->private_key); | ||
61 | |||
58 | if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, | 62 | if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits, |
59 | (const u8 *)params.key, params.key_size) < 0) | 63 | (const u64 *)params.key, params.key_size) < 0) |
60 | return -EINVAL; | 64 | return -EINVAL; |
61 | 65 | ||
62 | memcpy(ctx->private_key, params.key, params.key_size); | 66 | memcpy(ctx->private_key, params.key, params.key_size); |
@@ -81,16 +85,14 @@ static int ecdh_compute_value(struct kpp_request *req) | |||
81 | return -EINVAL; | 85 | return -EINVAL; |
82 | 86 | ||
83 | ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits, | 87 | ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits, |
84 | (const u8 *)ctx->private_key, nbytes, | 88 | ctx->private_key, |
85 | (const u8 *)ctx->public_key, 2 * nbytes, | 89 | ctx->public_key, |
86 | (u8 *)ctx->shared_secret, nbytes); | 90 | ctx->shared_secret); |
87 | 91 | ||
88 | buf = ctx->shared_secret; | 92 | buf = ctx->shared_secret; |
89 | } else { | 93 | } else { |
90 | ret = ecdh_make_pub_key(ctx->curve_id, ctx->ndigits, | 94 | ret = ecc_make_pub_key(ctx->curve_id, ctx->ndigits, |
91 | (const u8 *)ctx->private_key, nbytes, | 95 | ctx->private_key, ctx->public_key); |
92 | (u8 *)ctx->public_key, | ||
93 | sizeof(ctx->public_key)); | ||
94 | buf = ctx->public_key; | 96 | buf = ctx->public_key; |
95 | /* Public part is a point thus it has both coordinates */ | 97 | /* Public part is a point thus it has both coordinates */ |
96 | nbytes *= 2; | 98 | nbytes *= 2; |
@@ -106,13 +108,12 @@ static int ecdh_compute_value(struct kpp_request *req) | |||
106 | return ret; | 108 | return ret; |
107 | } | 109 | } |
108 | 110 | ||
109 | static int ecdh_max_size(struct crypto_kpp *tfm) | 111 | static unsigned int ecdh_max_size(struct crypto_kpp *tfm) |
110 | { | 112 | { |
111 | struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); | 113 | struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); |
112 | int nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT; | ||
113 | 114 | ||
114 | /* Public key is made of two coordinates */ | 115 | /* Public key is made of two coordinates, add one to the left shift */ |
115 | return 2 * nbytes; | 116 | return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1); |
116 | } | 117 | } |
117 | 118 | ||
118 | static void no_exit_tfm(struct crypto_kpp *tfm) | 119 | static void no_exit_tfm(struct crypto_kpp *tfm) |
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c index 3cd8a2414e60..f05bea5fd257 100644 --- a/crypto/ecdh_helper.c +++ b/crypto/ecdh_helper.c | |||
@@ -3,9 +3,9 @@ | |||
3 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> | 3 | * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public Licence | 6 | * modify it under the terms of the GNU General Public License |
7 | * as published by the Free Software Foundation; either version | 7 | * as published by the Free Software Foundation; either version |
8 | * 2 of the Licence, or (at your option) any later version. | 8 | * 2 of the License, or (at your option) any later version. |
9 | */ | 9 | */ |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/export.h> | 11 | #include <linux/export.h> |
diff --git a/crypto/hmac.c b/crypto/hmac.c index 72e38c098bb3..92871dc2a63e 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c | |||
@@ -16,6 +16,7 @@ | |||
16 | * | 16 | * |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <crypto/hmac.h> | ||
19 | #include <crypto/internal/hash.h> | 20 | #include <crypto/internal/hash.h> |
20 | #include <crypto/scatterwalk.h> | 21 | #include <crypto/scatterwalk.h> |
21 | #include <linux/err.h> | 22 | #include <linux/err.h> |
@@ -74,8 +75,8 @@ static int hmac_setkey(struct crypto_shash *parent, | |||
74 | memcpy(opad, ipad, bs); | 75 | memcpy(opad, ipad, bs); |
75 | 76 | ||
76 | for (i = 0; i < bs; i++) { | 77 | for (i = 0; i < bs; i++) { |
77 | ipad[i] ^= 0x36; | 78 | ipad[i] ^= HMAC_IPAD_VALUE; |
78 | opad[i] ^= 0x5c; | 79 | opad[i] ^= HMAC_OPAD_VALUE; |
79 | } | 80 | } |
80 | 81 | ||
81 | return crypto_shash_init(shash) ?: | 82 | return crypto_shash_init(shash) ?: |
diff --git a/crypto/rng.c b/crypto/rng.c index f46dac5288b9..5e8469244960 100644 --- a/crypto/rng.c +++ b/crypto/rng.c | |||
@@ -33,11 +33,6 @@ struct crypto_rng *crypto_default_rng; | |||
33 | EXPORT_SYMBOL_GPL(crypto_default_rng); | 33 | EXPORT_SYMBOL_GPL(crypto_default_rng); |
34 | static int crypto_default_rng_refcnt; | 34 | static int crypto_default_rng_refcnt; |
35 | 35 | ||
36 | static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) | ||
37 | { | ||
38 | return container_of(tfm, struct crypto_rng, base); | ||
39 | } | ||
40 | |||
41 | int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) | 36 | int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) |
42 | { | 37 | { |
43 | u8 *buf = NULL; | 38 | u8 *buf = NULL; |
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 8baab4307f7b..407c64bdcdd9 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c | |||
@@ -120,9 +120,6 @@ static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key, | |||
120 | 120 | ||
121 | /* Find out new modulus size from rsa implementation */ | 121 | /* Find out new modulus size from rsa implementation */ |
122 | err = crypto_akcipher_maxsize(ctx->child); | 122 | err = crypto_akcipher_maxsize(ctx->child); |
123 | if (err < 0) | ||
124 | return err; | ||
125 | |||
126 | if (err > PAGE_SIZE) | 123 | if (err > PAGE_SIZE) |
127 | return -ENOTSUPP; | 124 | return -ENOTSUPP; |
128 | 125 | ||
@@ -144,9 +141,6 @@ static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, | |||
144 | 141 | ||
145 | /* Find out new modulus size from rsa implementation */ | 142 | /* Find out new modulus size from rsa implementation */ |
146 | err = crypto_akcipher_maxsize(ctx->child); | 143 | err = crypto_akcipher_maxsize(ctx->child); |
147 | if (err < 0) | ||
148 | return err; | ||
149 | |||
150 | if (err > PAGE_SIZE) | 144 | if (err > PAGE_SIZE) |
151 | return -ENOTSUPP; | 145 | return -ENOTSUPP; |
152 | 146 | ||
@@ -154,7 +148,7 @@ static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key, | |||
154 | return 0; | 148 | return 0; |
155 | } | 149 | } |
156 | 150 | ||
157 | static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) | 151 | static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) |
158 | { | 152 | { |
159 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | 153 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); |
160 | 154 | ||
@@ -164,7 +158,7 @@ static int pkcs1pad_get_max_size(struct crypto_akcipher *tfm) | |||
164 | * decrypt/verify. | 158 | * decrypt/verify. |
165 | */ | 159 | */ |
166 | 160 | ||
167 | return ctx->key_size ?: -EINVAL; | 161 | return ctx->key_size; |
168 | } | 162 | } |
169 | 163 | ||
170 | static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, | 164 | static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len, |
@@ -496,7 +490,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) | |||
496 | goto done; | 490 | goto done; |
497 | pos++; | 491 | pos++; |
498 | 492 | ||
499 | if (memcmp(out_buf + pos, digest_info->data, digest_info->size)) | 493 | if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size)) |
500 | goto done; | 494 | goto done; |
501 | 495 | ||
502 | pos += digest_info->size; | 496 | pos += digest_info->size; |
diff --git a/crypto/rsa.c b/crypto/rsa.c index 4c280b6a3ea9..b067f3a93880 100644 --- a/crypto/rsa.c +++ b/crypto/rsa.c | |||
@@ -337,11 +337,11 @@ err: | |||
337 | return -ENOMEM; | 337 | return -ENOMEM; |
338 | } | 338 | } |
339 | 339 | ||
340 | static int rsa_max_size(struct crypto_akcipher *tfm) | 340 | static unsigned int rsa_max_size(struct crypto_akcipher *tfm) |
341 | { | 341 | { |
342 | struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm); | 342 | struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm); |
343 | 343 | ||
344 | return pkey->n ? mpi_get_size(pkey->n) : -EINVAL; | 344 | return mpi_get_size(pkey->n); |
345 | } | 345 | } |
346 | 346 | ||
347 | static void rsa_exit_tfm(struct crypto_akcipher *tfm) | 347 | static void rsa_exit_tfm(struct crypto_akcipher *tfm) |
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 9a11f3c2bf98..0dd6a432d6ca 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c | |||
@@ -138,8 +138,6 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen) | |||
138 | int ret = 0; | 138 | int ret = 0; |
139 | int i; | 139 | int i; |
140 | 140 | ||
141 | local_irq_disable(); | ||
142 | |||
143 | /* Warm-up run. */ | 141 | /* Warm-up run. */ |
144 | for (i = 0; i < 4; i++) { | 142 | for (i = 0; i < 4; i++) { |
145 | if (enc) | 143 | if (enc) |
@@ -169,8 +167,6 @@ static int test_aead_cycles(struct aead_request *req, int enc, int blen) | |||
169 | } | 167 | } |
170 | 168 | ||
171 | out: | 169 | out: |
172 | local_irq_enable(); | ||
173 | |||
174 | if (ret == 0) | 170 | if (ret == 0) |
175 | printk("1 operation in %lu cycles (%d bytes)\n", | 171 | printk("1 operation in %lu cycles (%d bytes)\n", |
176 | (cycles + 4) / 8, blen); | 172 | (cycles + 4) / 8, blen); |
diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 6f5f3ed8376c..7125ba3880af 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c | |||
@@ -218,14 +218,14 @@ static int ahash_partial_update(struct ahash_request **preq, | |||
218 | crypto_ahash_reqtfm(req)); | 218 | crypto_ahash_reqtfm(req)); |
219 | state = kmalloc(statesize + sizeof(guard), GFP_KERNEL); | 219 | state = kmalloc(statesize + sizeof(guard), GFP_KERNEL); |
220 | if (!state) { | 220 | if (!state) { |
221 | pr_err("alt: hash: Failed to alloc state for %s\n", algo); | 221 | pr_err("alg: hash: Failed to alloc state for %s\n", algo); |
222 | goto out_nostate; | 222 | goto out_nostate; |
223 | } | 223 | } |
224 | memcpy(state + statesize, guard, sizeof(guard)); | 224 | memcpy(state + statesize, guard, sizeof(guard)); |
225 | ret = crypto_ahash_export(req, state); | 225 | ret = crypto_ahash_export(req, state); |
226 | WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); | 226 | WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); |
227 | if (ret) { | 227 | if (ret) { |
228 | pr_err("alt: hash: Failed to export() for %s\n", algo); | 228 | pr_err("alg: hash: Failed to export() for %s\n", algo); |
229 | goto out; | 229 | goto out; |
230 | } | 230 | } |
231 | ahash_request_free(req); | 231 | ahash_request_free(req); |
@@ -344,19 +344,19 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
344 | } else { | 344 | } else { |
345 | ret = wait_async_op(&tresult, crypto_ahash_init(req)); | 345 | ret = wait_async_op(&tresult, crypto_ahash_init(req)); |
346 | if (ret) { | 346 | if (ret) { |
347 | pr_err("alt: hash: init failed on test %d " | 347 | pr_err("alg: hash: init failed on test %d " |
348 | "for %s: ret=%d\n", j, algo, -ret); | 348 | "for %s: ret=%d\n", j, algo, -ret); |
349 | goto out; | 349 | goto out; |
350 | } | 350 | } |
351 | ret = wait_async_op(&tresult, crypto_ahash_update(req)); | 351 | ret = wait_async_op(&tresult, crypto_ahash_update(req)); |
352 | if (ret) { | 352 | if (ret) { |
353 | pr_err("alt: hash: update failed on test %d " | 353 | pr_err("alg: hash: update failed on test %d " |
354 | "for %s: ret=%d\n", j, algo, -ret); | 354 | "for %s: ret=%d\n", j, algo, -ret); |
355 | goto out; | 355 | goto out; |
356 | } | 356 | } |
357 | ret = wait_async_op(&tresult, crypto_ahash_final(req)); | 357 | ret = wait_async_op(&tresult, crypto_ahash_final(req)); |
358 | if (ret) { | 358 | if (ret) { |
359 | pr_err("alt: hash: final failed on test %d " | 359 | pr_err("alg: hash: final failed on test %d " |
360 | "for %s: ret=%d\n", j, algo, -ret); | 360 | "for %s: ret=%d\n", j, algo, -ret); |
361 | goto out; | 361 | goto out; |
362 | } | 362 | } |
@@ -488,13 +488,13 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
488 | ahash_request_set_crypt(req, sg, result, template[i].tap[0]); | 488 | ahash_request_set_crypt(req, sg, result, template[i].tap[0]); |
489 | ret = wait_async_op(&tresult, crypto_ahash_init(req)); | 489 | ret = wait_async_op(&tresult, crypto_ahash_init(req)); |
490 | if (ret) { | 490 | if (ret) { |
491 | pr_err("alt: hash: init failed on test %d for %s: ret=%d\n", | 491 | pr_err("alg: hash: init failed on test %d for %s: ret=%d\n", |
492 | j, algo, -ret); | 492 | j, algo, -ret); |
493 | goto out; | 493 | goto out; |
494 | } | 494 | } |
495 | ret = wait_async_op(&tresult, crypto_ahash_update(req)); | 495 | ret = wait_async_op(&tresult, crypto_ahash_update(req)); |
496 | if (ret) { | 496 | if (ret) { |
497 | pr_err("alt: hash: update failed on test %d for %s: ret=%d\n", | 497 | pr_err("alg: hash: update failed on test %d for %s: ret=%d\n", |
498 | j, algo, -ret); | 498 | j, algo, -ret); |
499 | goto out; | 499 | goto out; |
500 | } | 500 | } |
@@ -505,7 +505,7 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
505 | hash_buff, k, temp, &sg[0], algo, result, | 505 | hash_buff, k, temp, &sg[0], algo, result, |
506 | &tresult); | 506 | &tresult); |
507 | if (ret) { | 507 | if (ret) { |
508 | pr_err("hash: partial update failed on test %d for %s: ret=%d\n", | 508 | pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n", |
509 | j, algo, -ret); | 509 | j, algo, -ret); |
510 | goto out_noreq; | 510 | goto out_noreq; |
511 | } | 511 | } |
@@ -513,7 +513,7 @@ static int __test_hash(struct crypto_ahash *tfm, | |||
513 | } | 513 | } |
514 | ret = wait_async_op(&tresult, crypto_ahash_final(req)); | 514 | ret = wait_async_op(&tresult, crypto_ahash_final(req)); |
515 | if (ret) { | 515 | if (ret) { |
516 | pr_err("alt: hash: final failed on test %d for %s: ret=%d\n", | 516 | pr_err("alg: hash: final failed on test %d for %s: ret=%d\n", |
517 | j, algo, -ret); | 517 | j, algo, -ret); |
518 | goto out; | 518 | goto out; |
519 | } | 519 | } |
@@ -1997,6 +1997,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, | |||
1997 | struct kpp_request *req; | 1997 | struct kpp_request *req; |
1998 | void *input_buf = NULL; | 1998 | void *input_buf = NULL; |
1999 | void *output_buf = NULL; | 1999 | void *output_buf = NULL; |
2000 | void *a_public = NULL; | ||
2001 | void *a_ss = NULL; | ||
2002 | void *shared_secret = NULL; | ||
2000 | struct tcrypt_result result; | 2003 | struct tcrypt_result result; |
2001 | unsigned int out_len_max; | 2004 | unsigned int out_len_max; |
2002 | int err = -ENOMEM; | 2005 | int err = -ENOMEM; |
@@ -2026,20 +2029,31 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, | |||
2026 | kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 2029 | kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
2027 | tcrypt_complete, &result); | 2030 | tcrypt_complete, &result); |
2028 | 2031 | ||
2029 | /* Compute public key */ | 2032 | /* Compute party A's public key */ |
2030 | err = wait_async_op(&result, crypto_kpp_generate_public_key(req)); | 2033 | err = wait_async_op(&result, crypto_kpp_generate_public_key(req)); |
2031 | if (err) { | 2034 | if (err) { |
2032 | pr_err("alg: %s: generate public key test failed. err %d\n", | 2035 | pr_err("alg: %s: Party A: generate public key test failed. err %d\n", |
2033 | alg, err); | 2036 | alg, err); |
2034 | goto free_output; | 2037 | goto free_output; |
2035 | } | 2038 | } |
2036 | /* Verify calculated public key */ | 2039 | |
2037 | if (memcmp(vec->expected_a_public, sg_virt(req->dst), | 2040 | if (vec->genkey) { |
2038 | vec->expected_a_public_size)) { | 2041 | /* Save party A's public key */ |
2039 | pr_err("alg: %s: generate public key test failed. Invalid output\n", | 2042 | a_public = kzalloc(out_len_max, GFP_KERNEL); |
2040 | alg); | 2043 | if (!a_public) { |
2041 | err = -EINVAL; | 2044 | err = -ENOMEM; |
2042 | goto free_output; | 2045 | goto free_output; |
2046 | } | ||
2047 | memcpy(a_public, sg_virt(req->dst), out_len_max); | ||
2048 | } else { | ||
2049 | /* Verify calculated public key */ | ||
2050 | if (memcmp(vec->expected_a_public, sg_virt(req->dst), | ||
2051 | vec->expected_a_public_size)) { | ||
2052 | pr_err("alg: %s: Party A: generate public key test failed. Invalid output\n", | ||
2053 | alg); | ||
2054 | err = -EINVAL; | ||
2055 | goto free_output; | ||
2056 | } | ||
2043 | } | 2057 | } |
2044 | 2058 | ||
2045 | /* Calculate shared secret key by using counter part (b) public key. */ | 2059 | /* Calculate shared secret key by using counter part (b) public key. */ |
@@ -2058,15 +2072,53 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, | |||
2058 | tcrypt_complete, &result); | 2072 | tcrypt_complete, &result); |
2059 | err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req)); | 2073 | err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req)); |
2060 | if (err) { | 2074 | if (err) { |
2061 | pr_err("alg: %s: compute shard secret test failed. err %d\n", | 2075 | pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n", |
2062 | alg, err); | 2076 | alg, err); |
2063 | goto free_all; | 2077 | goto free_all; |
2064 | } | 2078 | } |
2079 | |||
2080 | if (vec->genkey) { | ||
2081 | /* Save the shared secret obtained by party A */ | ||
2082 | a_ss = kzalloc(vec->expected_ss_size, GFP_KERNEL); | ||
2083 | if (!a_ss) { | ||
2084 | err = -ENOMEM; | ||
2085 | goto free_all; | ||
2086 | } | ||
2087 | memcpy(a_ss, sg_virt(req->dst), vec->expected_ss_size); | ||
2088 | |||
2089 | /* | ||
2090 | * Calculate party B's shared secret by using party A's | ||
2091 | * public key. | ||
2092 | */ | ||
2093 | err = crypto_kpp_set_secret(tfm, vec->b_secret, | ||
2094 | vec->b_secret_size); | ||
2095 | if (err < 0) | ||
2096 | goto free_all; | ||
2097 | |||
2098 | sg_init_one(&src, a_public, vec->expected_a_public_size); | ||
2099 | sg_init_one(&dst, output_buf, out_len_max); | ||
2100 | kpp_request_set_input(req, &src, vec->expected_a_public_size); | ||
2101 | kpp_request_set_output(req, &dst, out_len_max); | ||
2102 | kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
2103 | tcrypt_complete, &result); | ||
2104 | err = wait_async_op(&result, | ||
2105 | crypto_kpp_compute_shared_secret(req)); | ||
2106 | if (err) { | ||
2107 | pr_err("alg: %s: Party B: compute shared secret failed. err %d\n", | ||
2108 | alg, err); | ||
2109 | goto free_all; | ||
2110 | } | ||
2111 | |||
2112 | shared_secret = a_ss; | ||
2113 | } else { | ||
2114 | shared_secret = (void *)vec->expected_ss; | ||
2115 | } | ||
2116 | |||
2065 | /* | 2117 | /* |
2066 | * verify shared secret from which the user will derive | 2118 | * verify shared secret from which the user will derive |
2067 | * secret key by executing whatever hash it has chosen | 2119 | * secret key by executing whatever hash it has chosen |
2068 | */ | 2120 | */ |
2069 | if (memcmp(vec->expected_ss, sg_virt(req->dst), | 2121 | if (memcmp(shared_secret, sg_virt(req->dst), |
2070 | vec->expected_ss_size)) { | 2122 | vec->expected_ss_size)) { |
2071 | pr_err("alg: %s: compute shared secret test failed. Invalid output\n", | 2123 | pr_err("alg: %s: compute shared secret test failed. Invalid output\n", |
2072 | alg); | 2124 | alg); |
@@ -2074,8 +2126,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, | |||
2074 | } | 2126 | } |
2075 | 2127 | ||
2076 | free_all: | 2128 | free_all: |
2129 | kfree(a_ss); | ||
2077 | kfree(input_buf); | 2130 | kfree(input_buf); |
2078 | free_output: | 2131 | free_output: |
2132 | kfree(a_public); | ||
2079 | kfree(output_buf); | 2133 | kfree(output_buf); |
2080 | free_req: | 2134 | free_req: |
2081 | kpp_request_free(req); | 2135 | kpp_request_free(req); |
@@ -2168,8 +2222,11 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, | |||
2168 | akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 2222 | akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
2169 | tcrypt_complete, &result); | 2223 | tcrypt_complete, &result); |
2170 | 2224 | ||
2171 | /* Run RSA encrypt - c = m^e mod n;*/ | 2225 | err = wait_async_op(&result, vecs->siggen_sigver_test ? |
2172 | err = wait_async_op(&result, crypto_akcipher_encrypt(req)); | 2226 | /* Run asymmetric signature generation */ |
2227 | crypto_akcipher_sign(req) : | ||
2228 | /* Run asymmetric encrypt */ | ||
2229 | crypto_akcipher_encrypt(req)); | ||
2173 | if (err) { | 2230 | if (err) { |
2174 | pr_err("alg: akcipher: encrypt test failed. err %d\n", err); | 2231 | pr_err("alg: akcipher: encrypt test failed. err %d\n", err); |
2175 | goto free_all; | 2232 | goto free_all; |
@@ -2207,8 +2264,11 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, | |||
2207 | init_completion(&result.completion); | 2264 | init_completion(&result.completion); |
2208 | akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); | 2265 | akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); |
2209 | 2266 | ||
2210 | /* Run RSA decrypt - m = c^d mod n;*/ | 2267 | err = wait_async_op(&result, vecs->siggen_sigver_test ? |
2211 | err = wait_async_op(&result, crypto_akcipher_decrypt(req)); | 2268 | /* Run asymmetric signature verification */ |
2269 | crypto_akcipher_verify(req) : | ||
2270 | /* Run asymmetric decrypt */ | ||
2271 | crypto_akcipher_decrypt(req)); | ||
2212 | if (err) { | 2272 | if (err) { |
2213 | pr_err("alg: akcipher: decrypt test failed. err %d\n", err); | 2273 | pr_err("alg: akcipher: decrypt test failed. err %d\n", err); |
2214 | goto free_all; | 2274 | goto free_all; |
@@ -2306,6 +2366,7 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
2306 | }, { | 2366 | }, { |
2307 | .alg = "authenc(hmac(sha1),cbc(aes))", | 2367 | .alg = "authenc(hmac(sha1),cbc(aes))", |
2308 | .test = alg_test_aead, | 2368 | .test = alg_test_aead, |
2369 | .fips_allowed = 1, | ||
2309 | .suite = { | 2370 | .suite = { |
2310 | .aead = { | 2371 | .aead = { |
2311 | .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp) | 2372 | .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp) |
@@ -3255,6 +3316,25 @@ static const struct alg_test_desc alg_test_descs[] = { | |||
3255 | } | 3316 | } |
3256 | } | 3317 | } |
3257 | }, { | 3318 | }, { |
3319 | .alg = "pkcs1pad(rsa,sha224)", | ||
3320 | .test = alg_test_null, | ||
3321 | .fips_allowed = 1, | ||
3322 | }, { | ||
3323 | .alg = "pkcs1pad(rsa,sha256)", | ||
3324 | .test = alg_test_akcipher, | ||
3325 | .fips_allowed = 1, | ||
3326 | .suite = { | ||
3327 | .akcipher = __VECS(pkcs1pad_rsa_tv_template) | ||
3328 | } | ||
3329 | }, { | ||
3330 | .alg = "pkcs1pad(rsa,sha384)", | ||
3331 | .test = alg_test_null, | ||
3332 | .fips_allowed = 1, | ||
3333 | }, { | ||
3334 | .alg = "pkcs1pad(rsa,sha512)", | ||
3335 | .test = alg_test_null, | ||
3336 | .fips_allowed = 1, | ||
3337 | }, { | ||
3258 | .alg = "poly1305", | 3338 | .alg = "poly1305", |
3259 | .test = alg_test_hash, | 3339 | .test = alg_test_hash, |
3260 | .suite = { | 3340 | .suite = { |
diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 429357339dcc..6ceb0e2758bb 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h | |||
@@ -133,17 +133,21 @@ struct akcipher_testvec { | |||
133 | unsigned int m_size; | 133 | unsigned int m_size; |
134 | unsigned int c_size; | 134 | unsigned int c_size; |
135 | bool public_key_vec; | 135 | bool public_key_vec; |
136 | bool siggen_sigver_test; | ||
136 | }; | 137 | }; |
137 | 138 | ||
138 | struct kpp_testvec { | 139 | struct kpp_testvec { |
139 | const unsigned char *secret; | 140 | const unsigned char *secret; |
141 | const unsigned char *b_secret; | ||
140 | const unsigned char *b_public; | 142 | const unsigned char *b_public; |
141 | const unsigned char *expected_a_public; | 143 | const unsigned char *expected_a_public; |
142 | const unsigned char *expected_ss; | 144 | const unsigned char *expected_ss; |
143 | unsigned short secret_size; | 145 | unsigned short secret_size; |
146 | unsigned short b_secret_size; | ||
144 | unsigned short b_public_size; | 147 | unsigned short b_public_size; |
145 | unsigned short expected_a_public_size; | 148 | unsigned short expected_a_public_size; |
146 | unsigned short expected_ss_size; | 149 | unsigned short expected_ss_size; |
150 | bool genkey; | ||
147 | }; | 151 | }; |
148 | 152 | ||
149 | static const char zeroed_string[48]; | 153 | static const char zeroed_string[48]; |
@@ -538,6 +542,101 @@ static const struct akcipher_testvec rsa_tv_template[] = { | |||
538 | } | 542 | } |
539 | }; | 543 | }; |
540 | 544 | ||
545 | /* | ||
546 | * PKCS#1 RSA test vectors. Obtained from CAVS testing. | ||
547 | */ | ||
548 | static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { | ||
549 | { | ||
550 | .key = | ||
551 | "\x30\x82\x03\x1f\x02\x01\x10\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" | ||
552 | "\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28" | ||
553 | "\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67" | ||
554 | "\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d" | ||
555 | "\x70\xa7\x67\x22\xb3\x4d\x7a\x94\xc3\xba\x4b\x7c\x4b\xa9\x32\x7c" | ||
556 | "\xb7\x38\x95\x45\x64\xa4\x05\xa8\x9f\x12\x7c\x4e\xc6\xc8\x2d\x40" | ||
557 | "\x06\x30\xf4\x60\xa6\x91\xbb\x9b\xca\x04\x79\x11\x13\x75\xf0\xae" | ||
558 | "\xd3\x51\x89\xc5\x74\xb9\xaa\x3f\xb6\x83\xe4\x78\x6b\xcd\xf9\x5c" | ||
559 | "\x4c\x85\xea\x52\x3b\x51\x93\xfc\x14\x6b\x33\x5d\x30\x70\xfa\x50" | ||
560 | "\x1b\x1b\x38\x81\x13\x8d\xf7\xa5\x0c\xc0\x8e\xf9\x63\x52\x18\x4e" | ||
561 | "\xa9\xf9\xf8\x5c\x5d\xcd\x7a\x0d\xd4\x8e\x7b\xee\x91\x7b\xad\x7d" | ||
562 | "\xb4\x92\xd5\xab\x16\x3b\x0a\x8a\xce\x8e\xde\x47\x1a\x17\x01\x86" | ||
563 | "\x7b\xab\x99\xf1\x4b\x0c\x3a\x0d\x82\x47\xc1\x91\x8c\xbb\x2e\x22" | ||
564 | "\x9e\x49\x63\x6e\x02\xc1\xc9\x3a\x9b\xa5\x22\x1b\x07\x95\xd6\x10" | ||
565 | "\x02\x50\xfd\xfd\xd1\x9b\xbe\xab\xc2\xc0\x74\xd7\xec\x00\xfb\x11" | ||
566 | "\x71\xcb\x7a\xdc\x81\x79\x9f\x86\x68\x46\x63\x82\x4d\xb7\xf1\xe6" | ||
567 | "\x16\x6f\x42\x63\xf4\x94\xa0\xca\x33\xcc\x75\x13\x02\x82\x01\x00" | ||
568 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
569 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
570 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
571 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
572 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
573 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
574 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
575 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
576 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
577 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
578 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
579 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
580 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
581 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
582 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
583 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01" | ||
584 | "\x02\x82\x01\x00\x62\xb5\x60\x31\x4f\x3f\x66\x16\xc1\x60\xac\x47" | ||
585 | "\x2a\xff\x6b\x69\x00\x4a\xb2\x5c\xe1\x50\xb9\x18\x74\xa8\xe4\xdc" | ||
586 | "\xa8\xec\xcd\x30\xbb\xc1\xc6\xe3\xc6\xac\x20\x2a\x3e\x5e\x8b\x12" | ||
587 | "\xe6\x82\x08\x09\x38\x0b\xab\x7c\xb3\xcc\x9c\xce\x97\x67\xdd\xef" | ||
588 | "\x95\x40\x4e\x92\xe2\x44\xe9\x1d\xc1\x14\xfd\xa9\xb1\xdc\x71\x9c" | ||
589 | "\x46\x21\xbd\x58\x88\x6e\x22\x15\x56\xc1\xef\xe0\xc9\x8d\xe5\x80" | ||
590 | "\x3e\xda\x7e\x93\x0f\x52\xf6\xf5\xc1\x91\x90\x9e\x42\x49\x4f\x8d" | ||
591 | "\x9c\xba\x38\x83\xe9\x33\xc2\x50\x4f\xec\xc2\xf0\xa8\xb7\x6e\x28" | ||
592 | "\x25\x56\x6b\x62\x67\xfe\x08\xf1\x56\xe5\x6f\x0e\x99\xf1\xe5\x95" | ||
593 | "\x7b\xef\xeb\x0a\x2c\x92\x97\x57\x23\x33\x36\x07\xdd\xfb\xae\xf1" | ||
594 | "\xb1\xd8\x33\xb7\x96\x71\x42\x36\xc5\xa4\xa9\x19\x4b\x1b\x52\x4c" | ||
595 | "\x50\x69\x91\xf0\x0e\xfa\x80\x37\x4b\xb5\xd0\x2f\xb7\x44\x0d\xd4" | ||
596 | "\xf8\x39\x8d\xab\x71\x67\x59\x05\x88\x3d\xeb\x48\x48\x33\x88\x4e" | ||
597 | "\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a" | ||
598 | "\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda" | ||
599 | "\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46" | ||
600 | "\xb8\x35\xdf\x41\x02\x01\x30\x02\x01\x30\x02\x01\x30\x02\x01\x30" | ||
601 | "\x02\x01\x30", | ||
602 | .key_len = 804, | ||
603 | /* | ||
604 | * m is SHA256 hash of following message: | ||
605 | * "\x49\x41\xbe\x0a\x0c\xc9\xf6\x35\x51\xe4\x27\x56\x13\x71\x4b\xd0" | ||
606 | * "\x36\x92\x84\x89\x1b\xf8\x56\x4a\x72\x61\x14\x69\x4f\x5e\x98\xa5" | ||
607 | * "\x80\x5a\x37\x51\x1f\xd8\xf5\xb5\x63\xfc\xf4\xb1\xbb\x4d\x33\xa3" | ||
608 | * "\x1e\xb9\x75\x8b\x9c\xda\x7e\x6d\x3a\x77\x85\xf7\xfc\x4e\xe7\x64" | ||
609 | * "\x43\x10\x19\xa0\x59\xae\xe0\xad\x4b\xd3\xc4\x45\xf7\xb1\xc2\xc1" | ||
610 | * "\x65\x01\x41\x39\x5b\x45\x47\xed\x2b\x51\xed\xe3\xd0\x09\x10\xd2" | ||
611 | * "\x39\x6c\x4a\x3f\xe5\xd2\x20\xe6\xb0\x71\x7d\x5b\xed\x26\x60\xf1" | ||
612 | * "\xb4\x73\xd1\xdb\x7d\xc4\x19\x91\xee\xf6\x32\x76\xf2\x19\x7d\xb7" | ||
613 | */ | ||
614 | .m = | ||
615 | "\x3e\xc8\xa1\x26\x20\x54\x44\x52\x48\x0d\xe5\x66\xf3\xb3\xf5\x04" | ||
616 | "\xbe\x10\xa8\x48\x94\x22\x2d\xdd\xba\x7a\xb4\x76\x8d\x79\x98\x89", | ||
617 | .m_size = 32, | ||
618 | .c = | ||
619 | "\xc7\xa3\x98\xeb\x43\xd1\x08\xc2\x3d\x78\x45\x04\x70\xc9\x01\xee" | ||
620 | "\xf8\x85\x37\x7c\x0b\xf9\x19\x70\x5c\x45\x7b\x2f\x3a\x0b\xb7\x8b" | ||
621 | "\xc4\x0d\x7b\x3a\x64\x0b\x0f\xdb\x78\xa9\x0b\xfd\x8d\x82\xa4\x86" | ||
622 | "\x39\xbf\x21\xb8\x84\xc4\xce\x9f\xc2\xe8\xb6\x61\x46\x17\xb9\x4e" | ||
623 | "\x0b\x57\x05\xb4\x4f\xf9\x9c\x93\x2d\x9b\xd5\x48\x1d\x80\x12\xef" | ||
624 | "\x3a\x77\x7f\xbc\xb5\x8e\x2b\x6b\x7c\xfc\x9f\x8c\x9d\xa2\xc4\x85" | ||
625 | "\xb0\x87\xe9\x17\x9b\xb6\x23\x62\xd2\xa9\x9f\x57\xe8\xf7\x04\x45" | ||
626 | "\x24\x3a\x45\xeb\xeb\x6a\x08\x8e\xaf\xc8\xa0\x84\xbc\x5d\x13\x38" | ||
627 | "\xf5\x17\x8c\xa3\x96\x9b\xa9\x38\x8d\xf0\x35\xad\x32\x8a\x72\x5b" | ||
628 | "\xdf\x21\xab\x4b\x0e\xa8\x29\xbb\x61\x54\xbf\x05\xdb\x84\x84\xde" | ||
629 | "\xdd\x16\x36\x31\xda\xf3\x42\x6d\x7a\x90\x22\x9b\x11\x29\xa6\xf8" | ||
630 | "\x30\x61\xda\xd3\x8b\x54\x1e\x42\xd1\x47\x1d\x6f\xd1\xcd\x42\x0b" | ||
631 | "\xd1\xe4\x15\x85\x7e\x08\xd6\x59\x64\x4c\x01\x34\x91\x92\x26\xe8" | ||
632 | "\xb0\x25\x8c\xf8\xf4\xfa\x8b\xc9\x31\x33\x76\x72\xfb\x64\x92\x9f" | ||
633 | "\xda\x62\x8d\xe1\x2a\x71\x91\x43\x40\x61\x3c\x5a\xbe\x86\xfc\x5b" | ||
634 | "\xe6\xf9\xa9\x16\x31\x1f\xaf\x25\x6d\xc2\x4a\x23\x6e\x63\x02\xa2", | ||
635 | .c_size = 256, | ||
636 | .siggen_sigver_test = true, | ||
637 | } | ||
638 | }; | ||
639 | |||
541 | static const struct kpp_testvec dh_tv_template[] = { | 640 | static const struct kpp_testvec dh_tv_template[] = { |
542 | { | 641 | { |
543 | .secret = | 642 | .secret = |
@@ -840,6 +939,50 @@ static const struct kpp_testvec ecdh_tv_template[] = { | |||
840 | .b_public_size = 64, | 939 | .b_public_size = 64, |
841 | .expected_a_public_size = 64, | 940 | .expected_a_public_size = 64, |
842 | .expected_ss_size = 32 | 941 | .expected_ss_size = 32 |
942 | }, { | ||
943 | .secret = | ||
944 | #ifdef __LITTLE_ENDIAN | ||
945 | "\x02\x00" /* type */ | ||
946 | "\x08\x00" /* len */ | ||
947 | "\x02\x00" /* curve_id */ | ||
948 | "\x00\x00", /* key_size */ | ||
949 | #else | ||
950 | "\x00\x02" /* type */ | ||
951 | "\x00\x08" /* len */ | ||
952 | "\x00\x02" /* curve_id */ | ||
953 | "\x00\x00", /* key_size */ | ||
954 | #endif | ||
955 | .b_secret = | ||
956 | #ifdef __LITTLE_ENDIAN | ||
957 | "\x02\x00" /* type */ | ||
958 | "\x28\x00" /* len */ | ||
959 | "\x02\x00" /* curve_id */ | ||
960 | "\x20\x00" /* key_size */ | ||
961 | #else | ||
962 | "\x00\x02" /* type */ | ||
963 | "\x00\x28" /* len */ | ||
964 | "\x00\x02" /* curve_id */ | ||
965 | "\x00\x20" /* key_size */ | ||
966 | #endif | ||
967 | "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83" | ||
968 | "\xf6\x62\x1b\x6e\x43\x84\x3a\xa3" | ||
969 | "\x8b\xe0\x86\xc3\x20\x19\xda\x92" | ||
970 | "\x50\x53\x03\xe1\xc0\xea\xb8\x82", | ||
971 | .b_public = | ||
972 | "\x1a\x7f\xeb\x52\x00\xbd\x3c\x31" | ||
973 | "\x7d\xb6\x70\xc1\x86\xa6\xc7\xc4" | ||
974 | "\x3b\xc5\x5f\x6c\x6f\x58\x3c\xf5" | ||
975 | "\xb6\x63\x82\x77\x33\x24\xa1\x5f" | ||
976 | "\x6a\xca\x43\x6f\xf7\x7e\xff\x02" | ||
977 | "\x37\x08\xcc\x40\x5e\x7a\xfd\x6a" | ||
978 | "\x6a\x02\x6e\x41\x87\x68\x38\x77" | ||
979 | "\xfa\xa9\x44\x43\x2d\xef\x09\xdf", | ||
980 | .secret_size = 8, | ||
981 | .b_secret_size = 40, | ||
982 | .b_public_size = 64, | ||
983 | .expected_a_public_size = 64, | ||
984 | .expected_ss_size = 32, | ||
985 | .genkey = true, | ||
843 | } | 986 | } |
844 | }; | 987 | }; |
845 | 988 | ||
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c index df8eb54fd5a3..8da7bcf54105 100644 --- a/drivers/char/hw_random/mtk-rng.c +++ b/drivers/char/hw_random/mtk-rng.c | |||
@@ -25,6 +25,10 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/of.h> | 26 | #include <linux/of.h> |
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/pm_runtime.h> | ||
29 | |||
30 | /* Runtime PM autosuspend timeout: */ | ||
31 | #define RNG_AUTOSUSPEND_TIMEOUT 100 | ||
28 | 32 | ||
29 | #define USEC_POLL 2 | 33 | #define USEC_POLL 2 |
30 | #define TIMEOUT_POLL 20 | 34 | #define TIMEOUT_POLL 20 |
@@ -90,6 +94,8 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) | |||
90 | struct mtk_rng *priv = to_mtk_rng(rng); | 94 | struct mtk_rng *priv = to_mtk_rng(rng); |
91 | int retval = 0; | 95 | int retval = 0; |
92 | 96 | ||
97 | pm_runtime_get_sync((struct device *)priv->rng.priv); | ||
98 | |||
93 | while (max >= sizeof(u32)) { | 99 | while (max >= sizeof(u32)) { |
94 | if (!mtk_rng_wait_ready(rng, wait)) | 100 | if (!mtk_rng_wait_ready(rng, wait)) |
95 | break; | 101 | break; |
@@ -100,6 +106,9 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) | |||
100 | max -= sizeof(u32); | 106 | max -= sizeof(u32); |
101 | } | 107 | } |
102 | 108 | ||
109 | pm_runtime_mark_last_busy((struct device *)priv->rng.priv); | ||
110 | pm_runtime_put_sync_autosuspend((struct device *)priv->rng.priv); | ||
111 | |||
103 | return retval || !wait ? retval : -EIO; | 112 | return retval || !wait ? retval : -EIO; |
104 | } | 113 | } |
105 | 114 | ||
@@ -120,9 +129,12 @@ static int mtk_rng_probe(struct platform_device *pdev) | |||
120 | return -ENOMEM; | 129 | return -ENOMEM; |
121 | 130 | ||
122 | priv->rng.name = pdev->name; | 131 | priv->rng.name = pdev->name; |
132 | #ifndef CONFIG_PM | ||
123 | priv->rng.init = mtk_rng_init; | 133 | priv->rng.init = mtk_rng_init; |
124 | priv->rng.cleanup = mtk_rng_cleanup; | 134 | priv->rng.cleanup = mtk_rng_cleanup; |
135 | #endif | ||
125 | priv->rng.read = mtk_rng_read; | 136 | priv->rng.read = mtk_rng_read; |
137 | priv->rng.priv = (unsigned long)&pdev->dev; | ||
126 | 138 | ||
127 | priv->clk = devm_clk_get(&pdev->dev, "rng"); | 139 | priv->clk = devm_clk_get(&pdev->dev, "rng"); |
128 | if (IS_ERR(priv->clk)) { | 140 | if (IS_ERR(priv->clk)) { |
@@ -142,11 +154,40 @@ static int mtk_rng_probe(struct platform_device *pdev) | |||
142 | return ret; | 154 | return ret; |
143 | } | 155 | } |
144 | 156 | ||
157 | dev_set_drvdata(&pdev->dev, priv); | ||
158 | pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT); | ||
159 | pm_runtime_use_autosuspend(&pdev->dev); | ||
160 | pm_runtime_enable(&pdev->dev); | ||
161 | |||
145 | dev_info(&pdev->dev, "registered RNG driver\n"); | 162 | dev_info(&pdev->dev, "registered RNG driver\n"); |
146 | 163 | ||
147 | return 0; | 164 | return 0; |
148 | } | 165 | } |
149 | 166 | ||
167 | #ifdef CONFIG_PM | ||
168 | static int mtk_rng_runtime_suspend(struct device *dev) | ||
169 | { | ||
170 | struct mtk_rng *priv = dev_get_drvdata(dev); | ||
171 | |||
172 | mtk_rng_cleanup(&priv->rng); | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int mtk_rng_runtime_resume(struct device *dev) | ||
178 | { | ||
179 | struct mtk_rng *priv = dev_get_drvdata(dev); | ||
180 | |||
181 | return mtk_rng_init(&priv->rng); | ||
182 | } | ||
183 | |||
184 | static UNIVERSAL_DEV_PM_OPS(mtk_rng_pm_ops, mtk_rng_runtime_suspend, | ||
185 | mtk_rng_runtime_resume, NULL); | ||
186 | #define MTK_RNG_PM_OPS (&mtk_rng_pm_ops) | ||
187 | #else /* CONFIG_PM */ | ||
188 | #define MTK_RNG_PM_OPS NULL | ||
189 | #endif /* CONFIG_PM */ | ||
190 | |||
150 | static const struct of_device_id mtk_rng_match[] = { | 191 | static const struct of_device_id mtk_rng_match[] = { |
151 | { .compatible = "mediatek,mt7623-rng" }, | 192 | { .compatible = "mediatek,mt7623-rng" }, |
152 | {}, | 193 | {}, |
@@ -157,6 +198,7 @@ static struct platform_driver mtk_rng_driver = { | |||
157 | .probe = mtk_rng_probe, | 198 | .probe = mtk_rng_probe, |
158 | .driver = { | 199 | .driver = { |
159 | .name = MTK_RNG_DEV, | 200 | .name = MTK_RNG_DEV, |
201 | .pm = MTK_RNG_PM_OPS, | ||
160 | .of_match_table = mtk_rng_match, | 202 | .of_match_table = mtk_rng_match, |
161 | }, | 203 | }, |
162 | }; | 204 | }; |
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index 37a58d78aab3..38b719017186 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c | |||
@@ -53,7 +53,10 @@ static int omap3_rom_rng_get_random(void *buf, unsigned int count) | |||
53 | 53 | ||
54 | cancel_delayed_work_sync(&idle_work); | 54 | cancel_delayed_work_sync(&idle_work); |
55 | if (rng_idle) { | 55 | if (rng_idle) { |
56 | clk_prepare_enable(rng_clk); | 56 | r = clk_prepare_enable(rng_clk); |
57 | if (r) | ||
58 | return r; | ||
59 | |||
57 | r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); | 60 | r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); |
58 | if (r != 0) { | 61 | if (r != 0) { |
59 | clk_disable_unprepare(rng_clk); | 62 | clk_disable_unprepare(rng_clk); |
@@ -88,6 +91,8 @@ static struct hwrng omap3_rom_rng_ops = { | |||
88 | 91 | ||
89 | static int omap3_rom_rng_probe(struct platform_device *pdev) | 92 | static int omap3_rom_rng_probe(struct platform_device *pdev) |
90 | { | 93 | { |
94 | int ret = 0; | ||
95 | |||
91 | pr_info("initializing\n"); | 96 | pr_info("initializing\n"); |
92 | 97 | ||
93 | omap3_rom_rng_call = pdev->dev.platform_data; | 98 | omap3_rom_rng_call = pdev->dev.platform_data; |
@@ -104,7 +109,9 @@ static int omap3_rom_rng_probe(struct platform_device *pdev) | |||
104 | } | 109 | } |
105 | 110 | ||
106 | /* Leave the RNG in reset state. */ | 111 | /* Leave the RNG in reset state. */ |
107 | clk_prepare_enable(rng_clk); | 112 | ret = clk_prepare_enable(rng_clk); |
113 | if (ret) | ||
114 | return ret; | ||
108 | omap3_rom_rng_idle(0); | 115 | omap3_rom_rng_idle(0); |
109 | 116 | ||
110 | return hwrng_register(&omap3_rom_rng_ops); | 117 | return hwrng_register(&omap3_rom_rng_ops); |
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c index a0faa5f05deb..03ff5483d865 100644 --- a/drivers/char/hw_random/timeriomem-rng.c +++ b/drivers/char/hw_random/timeriomem-rng.c | |||
@@ -151,8 +151,15 @@ static int timeriomem_rng_probe(struct platform_device *pdev) | |||
151 | dev_err(&pdev->dev, "missing period\n"); | 151 | dev_err(&pdev->dev, "missing period\n"); |
152 | return -EINVAL; | 152 | return -EINVAL; |
153 | } | 153 | } |
154 | |||
155 | if (!of_property_read_u32(pdev->dev.of_node, | ||
156 | "quality", &i)) | ||
157 | priv->rng_ops.quality = i; | ||
158 | else | ||
159 | priv->rng_ops.quality = 0; | ||
154 | } else { | 160 | } else { |
155 | period = pdata->period; | 161 | period = pdata->period; |
162 | priv->rng_ops.quality = pdata->quality; | ||
156 | } | 163 | } |
157 | 164 | ||
158 | priv->period = ns_to_ktime(period * NSEC_PER_USEC); | 165 | priv->period = ns_to_ktime(period * NSEC_PER_USEC); |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 9c7951bb05ac..193204dfbf3a 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -327,6 +327,15 @@ config HW_RANDOM_PPC4XX | |||
327 | This option provides the kernel-side support for the TRNG hardware | 327 | This option provides the kernel-side support for the TRNG hardware |
328 | found in the security function of some PowerPC 4xx SoCs. | 328 | found in the security function of some PowerPC 4xx SoCs. |
329 | 329 | ||
330 | config CRYPTO_DEV_OMAP | ||
331 | tristate "Support for OMAP crypto HW accelerators" | ||
332 | depends on ARCH_OMAP2PLUS | ||
333 | help | ||
334 | OMAP processors have various crypto HW accelerators. Select this if | ||
335 | you want to use the OMAP modules for any of the crypto algorithms. | ||
336 | |||
337 | if CRYPTO_DEV_OMAP | ||
338 | |||
330 | config CRYPTO_DEV_OMAP_SHAM | 339 | config CRYPTO_DEV_OMAP_SHAM |
331 | tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator" | 340 | tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator" |
332 | depends on ARCH_OMAP2PLUS | 341 | depends on ARCH_OMAP2PLUS |
@@ -348,6 +357,7 @@ config CRYPTO_DEV_OMAP_AES | |||
348 | select CRYPTO_CBC | 357 | select CRYPTO_CBC |
349 | select CRYPTO_ECB | 358 | select CRYPTO_ECB |
350 | select CRYPTO_CTR | 359 | select CRYPTO_CTR |
360 | select CRYPTO_AEAD | ||
351 | help | 361 | help |
352 | OMAP processors have AES module accelerator. Select this if you | 362 | OMAP processors have AES module accelerator. Select this if you |
353 | want to use the OMAP module for AES algorithms. | 363 | want to use the OMAP module for AES algorithms. |
@@ -364,6 +374,8 @@ config CRYPTO_DEV_OMAP_DES | |||
364 | the ECB and CBC modes of operation are supported by the driver. Also | 374 | the ECB and CBC modes of operation are supported by the driver. Also |
365 | accesses made on unaligned boundaries are supported. | 375 | accesses made on unaligned boundaries are supported. |
366 | 376 | ||
377 | endif # CRYPTO_DEV_OMAP | ||
378 | |||
367 | config CRYPTO_DEV_PICOXCELL | 379 | config CRYPTO_DEV_PICOXCELL |
368 | tristate "Support for picoXcell IPSEC and Layer2 crypto engines" | 380 | tristate "Support for picoXcell IPSEC and Layer2 crypto engines" |
369 | depends on (ARCH_PICOXCELL || COMPILE_TEST) && HAVE_CLK | 381 | depends on (ARCH_PICOXCELL || COMPILE_TEST) && HAVE_CLK |
@@ -542,6 +554,7 @@ config CRYPTO_DEV_MXS_DCP | |||
542 | 554 | ||
543 | source "drivers/crypto/qat/Kconfig" | 555 | source "drivers/crypto/qat/Kconfig" |
544 | source "drivers/crypto/cavium/cpt/Kconfig" | 556 | source "drivers/crypto/cavium/cpt/Kconfig" |
557 | source "drivers/crypto/cavium/nitrox/Kconfig" | ||
545 | 558 | ||
546 | config CRYPTO_DEV_CAVIUM_ZIP | 559 | config CRYPTO_DEV_CAVIUM_ZIP |
547 | tristate "Cavium ZIP driver" | 560 | tristate "Cavium ZIP driver" |
@@ -656,4 +669,21 @@ config CRYPTO_DEV_BCM_SPU | |||
656 | 669 | ||
657 | source "drivers/crypto/stm32/Kconfig" | 670 | source "drivers/crypto/stm32/Kconfig" |
658 | 671 | ||
672 | config CRYPTO_DEV_SAFEXCEL | ||
673 | tristate "Inside Secure's SafeXcel cryptographic engine driver" | ||
674 | depends on HAS_DMA && OF | ||
675 | depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT) | ||
676 | select CRYPTO_AES | ||
677 | select CRYPTO_BLKCIPHER | ||
678 | select CRYPTO_HASH | ||
679 | select CRYPTO_HMAC | ||
680 | select CRYPTO_SHA1 | ||
681 | select CRYPTO_SHA256 | ||
682 | select CRYPTO_SHA512 | ||
683 | help | ||
684 | This driver interfaces with the SafeXcel EIP-197 cryptographic engine | ||
685 | designed by Inside Secure. Select this if you want to use CBC/ECB | ||
686 | chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash | ||
687 | algorithms. | ||
688 | |||
659 | endif # CRYPTO_HW | 689 | endif # CRYPTO_HW |
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 463f33592d93..2c555a3393b2 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile | |||
@@ -6,6 +6,7 @@ obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/ | |||
6 | obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ | 6 | obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/ |
7 | obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ | 7 | obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ |
8 | obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/ | 8 | obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/ |
9 | obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/ | ||
9 | obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o | 10 | obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o |
10 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ | 11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ |
11 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o | 12 | obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o |
@@ -20,7 +21,9 @@ obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o | |||
20 | obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o | 21 | obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o |
21 | n2_crypto-y := n2_core.o n2_asm.o | 22 | n2_crypto-y := n2_core.o n2_asm.o |
22 | obj-$(CONFIG_CRYPTO_DEV_NX) += nx/ | 23 | obj-$(CONFIG_CRYPTO_DEV_NX) += nx/ |
23 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o | 24 | obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o |
25 | obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o | ||
26 | omap-aes-driver-objs := omap-aes.o omap-aes-gcm.o | ||
24 | obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o | 27 | obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o |
25 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o | 28 | obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o |
26 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o | 29 | obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o |
@@ -39,3 +42,4 @@ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ | |||
39 | obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ | 42 | obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ |
40 | obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ | 43 | obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ |
41 | obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ | 44 | obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ |
45 | obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ | ||
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index fdc83a2281ca..65dc78b91dea 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c | |||
@@ -1179,6 +1179,7 @@ static int crypto4xx_probe(struct platform_device *ofdev) | |||
1179 | dev_set_drvdata(dev, core_dev); | 1179 | dev_set_drvdata(dev, core_dev); |
1180 | core_dev->ofdev = ofdev; | 1180 | core_dev->ofdev = ofdev; |
1181 | core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL); | 1181 | core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL); |
1182 | rc = -ENOMEM; | ||
1182 | if (!core_dev->dev) | 1183 | if (!core_dev->dev) |
1183 | goto err_alloc_dev; | 1184 | goto err_alloc_dev; |
1184 | 1185 | ||
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index cc0d5b98006e..9cfd36c1bcb6 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <crypto/internal/aead.h> | 36 | #include <crypto/internal/aead.h> |
37 | #include <crypto/aes.h> | 37 | #include <crypto/aes.h> |
38 | #include <crypto/des.h> | 38 | #include <crypto/des.h> |
39 | #include <crypto/hmac.h> | ||
39 | #include <crypto/sha.h> | 40 | #include <crypto/sha.h> |
40 | #include <crypto/md5.h> | 41 | #include <crypto/md5.h> |
41 | #include <crypto/authenc.h> | 42 | #include <crypto/authenc.h> |
@@ -2510,8 +2511,8 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, | |||
2510 | memcpy(ctx->opad, ctx->ipad, blocksize); | 2511 | memcpy(ctx->opad, ctx->ipad, blocksize); |
2511 | 2512 | ||
2512 | for (index = 0; index < blocksize; index++) { | 2513 | for (index = 0; index < blocksize; index++) { |
2513 | ctx->ipad[index] ^= 0x36; | 2514 | ctx->ipad[index] ^= HMAC_IPAD_VALUE; |
2514 | ctx->opad[index] ^= 0x5c; | 2515 | ctx->opad[index] ^= HMAC_OPAD_VALUE; |
2515 | } | 2516 | } |
2516 | 2517 | ||
2517 | flow_dump(" ipad: ", ctx->ipad, blocksize); | 2518 | flow_dump(" ipad: ", ctx->ipad, blocksize); |
@@ -2638,7 +2639,7 @@ static int aead_need_fallback(struct aead_request *req) | |||
2638 | (spu->spu_type == SPU_TYPE_SPUM) && | 2639 | (spu->spu_type == SPU_TYPE_SPUM) && |
2639 | (ctx->digestsize != 8) && (ctx->digestsize != 12) && | 2640 | (ctx->digestsize != 8) && (ctx->digestsize != 12) && |
2640 | (ctx->digestsize != 16)) { | 2641 | (ctx->digestsize != 16)) { |
2641 | flow_log("%s() AES CCM needs fallbck for digest size %d\n", | 2642 | flow_log("%s() AES CCM needs fallback for digest size %d\n", |
2642 | __func__, ctx->digestsize); | 2643 | __func__, ctx->digestsize); |
2643 | return 1; | 2644 | return 1; |
2644 | } | 2645 | } |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 398807d1b77e..fde399c88779 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -1187,8 +1187,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
1187 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | 1187 | struct crypto_aead *aead = crypto_aead_reqtfm(req); |
1188 | struct caam_ctx *ctx = crypto_aead_ctx(aead); | 1188 | struct caam_ctx *ctx = crypto_aead_ctx(aead); |
1189 | struct device *jrdev = ctx->jrdev; | 1189 | struct device *jrdev = ctx->jrdev; |
1190 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1190 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1191 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 1191 | GFP_KERNEL : GFP_ATOMIC; |
1192 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 1192 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
1193 | struct aead_edesc *edesc; | 1193 | struct aead_edesc *edesc; |
1194 | int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; | 1194 | int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; |
@@ -1475,8 +1475,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
1475 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | 1475 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
1476 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | 1476 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); |
1477 | struct device *jrdev = ctx->jrdev; | 1477 | struct device *jrdev = ctx->jrdev; |
1478 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1478 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1479 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? | ||
1480 | GFP_KERNEL : GFP_ATOMIC; | 1479 | GFP_KERNEL : GFP_ATOMIC; |
1481 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 1480 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
1482 | struct ablkcipher_edesc *edesc; | 1481 | struct ablkcipher_edesc *edesc; |
@@ -1681,8 +1680,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( | |||
1681 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | 1680 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
1682 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | 1681 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); |
1683 | struct device *jrdev = ctx->jrdev; | 1682 | struct device *jrdev = ctx->jrdev; |
1684 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1683 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1685 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? | ||
1686 | GFP_KERNEL : GFP_ATOMIC; | 1684 | GFP_KERNEL : GFP_ATOMIC; |
1687 | int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; | 1685 | int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; |
1688 | struct ablkcipher_edesc *edesc; | 1686 | struct ablkcipher_edesc *edesc; |
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index ea0e5b8b9171..78c4c0485c58 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
@@ -555,8 +555,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, | |||
555 | struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), | 555 | struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), |
556 | typeof(*alg), aead); | 556 | typeof(*alg), aead); |
557 | struct device *qidev = ctx->qidev; | 557 | struct device *qidev = ctx->qidev; |
558 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 558 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
559 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 559 | GFP_KERNEL : GFP_ATOMIC; |
560 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 560 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
561 | struct aead_edesc *edesc; | 561 | struct aead_edesc *edesc; |
562 | dma_addr_t qm_sg_dma, iv_dma = 0; | 562 | dma_addr_t qm_sg_dma, iv_dma = 0; |
@@ -808,8 +808,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request | |||
808 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | 808 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
809 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | 809 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); |
810 | struct device *qidev = ctx->qidev; | 810 | struct device *qidev = ctx->qidev; |
811 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 811 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
812 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? | ||
813 | GFP_KERNEL : GFP_ATOMIC; | 812 | GFP_KERNEL : GFP_ATOMIC; |
814 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; | 813 | int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; |
815 | struct ablkcipher_edesc *edesc; | 814 | struct ablkcipher_edesc *edesc; |
@@ -953,8 +952,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( | |||
953 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); | 952 | struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); |
954 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); | 953 | struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); |
955 | struct device *qidev = ctx->qidev; | 954 | struct device *qidev = ctx->qidev; |
956 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 955 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
957 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? | ||
958 | GFP_KERNEL : GFP_ATOMIC; | 956 | GFP_KERNEL : GFP_ATOMIC; |
959 | int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; | 957 | int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; |
960 | struct ablkcipher_edesc *edesc; | 958 | struct ablkcipher_edesc *edesc; |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index da4f94eab3da..7c44c90ad593 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -719,8 +719,8 @@ static int ahash_update_ctx(struct ahash_request *req) | |||
719 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 719 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
720 | struct caam_hash_state *state = ahash_request_ctx(req); | 720 | struct caam_hash_state *state = ahash_request_ctx(req); |
721 | struct device *jrdev = ctx->jrdev; | 721 | struct device *jrdev = ctx->jrdev; |
722 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 722 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
723 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 723 | GFP_KERNEL : GFP_ATOMIC; |
724 | u8 *buf = current_buf(state); | 724 | u8 *buf = current_buf(state); |
725 | int *buflen = current_buflen(state); | 725 | int *buflen = current_buflen(state); |
726 | u8 *next_buf = alt_buf(state); | 726 | u8 *next_buf = alt_buf(state); |
@@ -849,8 +849,8 @@ static int ahash_final_ctx(struct ahash_request *req) | |||
849 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 849 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
850 | struct caam_hash_state *state = ahash_request_ctx(req); | 850 | struct caam_hash_state *state = ahash_request_ctx(req); |
851 | struct device *jrdev = ctx->jrdev; | 851 | struct device *jrdev = ctx->jrdev; |
852 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 852 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
853 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 853 | GFP_KERNEL : GFP_ATOMIC; |
854 | int buflen = *current_buflen(state); | 854 | int buflen = *current_buflen(state); |
855 | u32 *desc; | 855 | u32 *desc; |
856 | int sec4_sg_bytes, sec4_sg_src_index; | 856 | int sec4_sg_bytes, sec4_sg_src_index; |
@@ -926,8 +926,8 @@ static int ahash_finup_ctx(struct ahash_request *req) | |||
926 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 926 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
927 | struct caam_hash_state *state = ahash_request_ctx(req); | 927 | struct caam_hash_state *state = ahash_request_ctx(req); |
928 | struct device *jrdev = ctx->jrdev; | 928 | struct device *jrdev = ctx->jrdev; |
929 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 929 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
930 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 930 | GFP_KERNEL : GFP_ATOMIC; |
931 | int buflen = *current_buflen(state); | 931 | int buflen = *current_buflen(state); |
932 | u32 *desc; | 932 | u32 *desc; |
933 | int sec4_sg_src_index; | 933 | int sec4_sg_src_index; |
@@ -1013,8 +1013,8 @@ static int ahash_digest(struct ahash_request *req) | |||
1013 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 1013 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
1014 | struct caam_hash_state *state = ahash_request_ctx(req); | 1014 | struct caam_hash_state *state = ahash_request_ctx(req); |
1015 | struct device *jrdev = ctx->jrdev; | 1015 | struct device *jrdev = ctx->jrdev; |
1016 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1016 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1017 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 1017 | GFP_KERNEL : GFP_ATOMIC; |
1018 | u32 *desc; | 1018 | u32 *desc; |
1019 | int digestsize = crypto_ahash_digestsize(ahash); | 1019 | int digestsize = crypto_ahash_digestsize(ahash); |
1020 | int src_nents, mapped_nents; | 1020 | int src_nents, mapped_nents; |
@@ -1093,8 +1093,8 @@ static int ahash_final_no_ctx(struct ahash_request *req) | |||
1093 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 1093 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
1094 | struct caam_hash_state *state = ahash_request_ctx(req); | 1094 | struct caam_hash_state *state = ahash_request_ctx(req); |
1095 | struct device *jrdev = ctx->jrdev; | 1095 | struct device *jrdev = ctx->jrdev; |
1096 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1096 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1097 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 1097 | GFP_KERNEL : GFP_ATOMIC; |
1098 | u8 *buf = current_buf(state); | 1098 | u8 *buf = current_buf(state); |
1099 | int buflen = *current_buflen(state); | 1099 | int buflen = *current_buflen(state); |
1100 | u32 *desc; | 1100 | u32 *desc; |
@@ -1154,8 +1154,8 @@ static int ahash_update_no_ctx(struct ahash_request *req) | |||
1154 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 1154 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
1155 | struct caam_hash_state *state = ahash_request_ctx(req); | 1155 | struct caam_hash_state *state = ahash_request_ctx(req); |
1156 | struct device *jrdev = ctx->jrdev; | 1156 | struct device *jrdev = ctx->jrdev; |
1157 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1157 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1158 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 1158 | GFP_KERNEL : GFP_ATOMIC; |
1159 | u8 *buf = current_buf(state); | 1159 | u8 *buf = current_buf(state); |
1160 | int *buflen = current_buflen(state); | 1160 | int *buflen = current_buflen(state); |
1161 | u8 *next_buf = alt_buf(state); | 1161 | u8 *next_buf = alt_buf(state); |
@@ -1280,8 +1280,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) | |||
1280 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 1280 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
1281 | struct caam_hash_state *state = ahash_request_ctx(req); | 1281 | struct caam_hash_state *state = ahash_request_ctx(req); |
1282 | struct device *jrdev = ctx->jrdev; | 1282 | struct device *jrdev = ctx->jrdev; |
1283 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1283 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1284 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 1284 | GFP_KERNEL : GFP_ATOMIC; |
1285 | int buflen = *current_buflen(state); | 1285 | int buflen = *current_buflen(state); |
1286 | u32 *desc; | 1286 | u32 *desc; |
1287 | int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; | 1287 | int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; |
@@ -1370,8 +1370,8 @@ static int ahash_update_first(struct ahash_request *req) | |||
1370 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); | 1370 | struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); |
1371 | struct caam_hash_state *state = ahash_request_ctx(req); | 1371 | struct caam_hash_state *state = ahash_request_ctx(req); |
1372 | struct device *jrdev = ctx->jrdev; | 1372 | struct device *jrdev = ctx->jrdev; |
1373 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 1373 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
1374 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 1374 | GFP_KERNEL : GFP_ATOMIC; |
1375 | u8 *next_buf = alt_buf(state); | 1375 | u8 *next_buf = alt_buf(state); |
1376 | int *next_buflen = alt_buflen(state); | 1376 | int *next_buflen = alt_buflen(state); |
1377 | int to_hash; | 1377 | int to_hash; |
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 49cbdcba7883..7a897209f181 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
@@ -18,6 +18,10 @@ | |||
18 | #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) | 18 | #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) |
19 | #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ | 19 | #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ |
20 | sizeof(struct rsa_priv_f1_pdb)) | 20 | sizeof(struct rsa_priv_f1_pdb)) |
21 | #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ | ||
22 | sizeof(struct rsa_priv_f2_pdb)) | ||
23 | #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ | ||
24 | sizeof(struct rsa_priv_f3_pdb)) | ||
21 | 25 | ||
22 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, | 26 | static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, |
23 | struct akcipher_request *req) | 27 | struct akcipher_request *req) |
@@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, | |||
54 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); | 58 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); |
55 | } | 59 | } |
56 | 60 | ||
61 | static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, | ||
62 | struct akcipher_request *req) | ||
63 | { | ||
64 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
65 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
66 | struct caam_rsa_key *key = &ctx->key; | ||
67 | struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; | ||
68 | size_t p_sz = key->p_sz; | ||
69 | size_t q_sz = key->p_sz; | ||
70 | |||
71 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); | ||
72 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); | ||
73 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | ||
74 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | ||
75 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | ||
76 | } | ||
77 | |||
78 | static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, | ||
79 | struct akcipher_request *req) | ||
80 | { | ||
81 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
82 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
83 | struct caam_rsa_key *key = &ctx->key; | ||
84 | struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; | ||
85 | size_t p_sz = key->p_sz; | ||
86 | size_t q_sz = key->p_sz; | ||
87 | |||
88 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); | ||
89 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | ||
90 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); | ||
91 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); | ||
92 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | ||
93 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | ||
94 | dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); | ||
95 | } | ||
96 | |||
57 | /* RSA Job Completion handler */ | 97 | /* RSA Job Completion handler */ |
58 | static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) | 98 | static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) |
59 | { | 99 | { |
@@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, | |||
90 | akcipher_request_complete(req, err); | 130 | akcipher_request_complete(req, err); |
91 | } | 131 | } |
92 | 132 | ||
133 | static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, | ||
134 | void *context) | ||
135 | { | ||
136 | struct akcipher_request *req = context; | ||
137 | struct rsa_edesc *edesc; | ||
138 | |||
139 | if (err) | ||
140 | caam_jr_strstatus(dev, err); | ||
141 | |||
142 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); | ||
143 | |||
144 | rsa_priv_f2_unmap(dev, edesc, req); | ||
145 | rsa_io_unmap(dev, edesc, req); | ||
146 | kfree(edesc); | ||
147 | |||
148 | akcipher_request_complete(req, err); | ||
149 | } | ||
150 | |||
151 | static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, | ||
152 | void *context) | ||
153 | { | ||
154 | struct akcipher_request *req = context; | ||
155 | struct rsa_edesc *edesc; | ||
156 | |||
157 | if (err) | ||
158 | caam_jr_strstatus(dev, err); | ||
159 | |||
160 | edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); | ||
161 | |||
162 | rsa_priv_f3_unmap(dev, edesc, req); | ||
163 | rsa_io_unmap(dev, edesc, req); | ||
164 | kfree(edesc); | ||
165 | |||
166 | akcipher_request_complete(req, err); | ||
167 | } | ||
168 | |||
93 | static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | 169 | static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, |
94 | size_t desclen) | 170 | size_t desclen) |
95 | { | 171 | { |
@@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, | |||
97 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 173 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
98 | struct device *dev = ctx->dev; | 174 | struct device *dev = ctx->dev; |
99 | struct rsa_edesc *edesc; | 175 | struct rsa_edesc *edesc; |
100 | gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | | 176 | gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? |
101 | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; | 177 | GFP_KERNEL : GFP_ATOMIC; |
102 | int sgc; | 178 | int sgc; |
103 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; | 179 | int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; |
104 | int src_nents, dst_nents; | 180 | int src_nents, dst_nents; |
@@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct akcipher_request *req, | |||
258 | return 0; | 334 | return 0; |
259 | } | 335 | } |
260 | 336 | ||
337 | static int set_rsa_priv_f2_pdb(struct akcipher_request *req, | ||
338 | struct rsa_edesc *edesc) | ||
339 | { | ||
340 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
341 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
342 | struct caam_rsa_key *key = &ctx->key; | ||
343 | struct device *dev = ctx->dev; | ||
344 | struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; | ||
345 | int sec4_sg_index = 0; | ||
346 | size_t p_sz = key->p_sz; | ||
347 | size_t q_sz = key->p_sz; | ||
348 | |||
349 | pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); | ||
350 | if (dma_mapping_error(dev, pdb->d_dma)) { | ||
351 | dev_err(dev, "Unable to map RSA private exponent memory\n"); | ||
352 | return -ENOMEM; | ||
353 | } | ||
354 | |||
355 | pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); | ||
356 | if (dma_mapping_error(dev, pdb->p_dma)) { | ||
357 | dev_err(dev, "Unable to map RSA prime factor p memory\n"); | ||
358 | goto unmap_d; | ||
359 | } | ||
360 | |||
361 | pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); | ||
362 | if (dma_mapping_error(dev, pdb->q_dma)) { | ||
363 | dev_err(dev, "Unable to map RSA prime factor q memory\n"); | ||
364 | goto unmap_p; | ||
365 | } | ||
366 | |||
367 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | ||
368 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | ||
369 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | ||
370 | goto unmap_q; | ||
371 | } | ||
372 | |||
373 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | ||
374 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | ||
375 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | ||
376 | goto unmap_tmp1; | ||
377 | } | ||
378 | |||
379 | if (edesc->src_nents > 1) { | ||
380 | pdb->sgf |= RSA_PRIV_PDB_SGF_G; | ||
381 | pdb->g_dma = edesc->sec4_sg_dma; | ||
382 | sec4_sg_index += edesc->src_nents; | ||
383 | } else { | ||
384 | pdb->g_dma = sg_dma_address(req->src); | ||
385 | } | ||
386 | |||
387 | if (edesc->dst_nents > 1) { | ||
388 | pdb->sgf |= RSA_PRIV_PDB_SGF_F; | ||
389 | pdb->f_dma = edesc->sec4_sg_dma + | ||
390 | sec4_sg_index * sizeof(struct sec4_sg_entry); | ||
391 | } else { | ||
392 | pdb->f_dma = sg_dma_address(req->dst); | ||
393 | } | ||
394 | |||
395 | pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; | ||
396 | pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; | ||
397 | |||
398 | return 0; | ||
399 | |||
400 | unmap_tmp1: | ||
401 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | ||
402 | unmap_q: | ||
403 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | ||
404 | unmap_p: | ||
405 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); | ||
406 | unmap_d: | ||
407 | dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); | ||
408 | |||
409 | return -ENOMEM; | ||
410 | } | ||
411 | |||
412 | static int set_rsa_priv_f3_pdb(struct akcipher_request *req, | ||
413 | struct rsa_edesc *edesc) | ||
414 | { | ||
415 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
416 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
417 | struct caam_rsa_key *key = &ctx->key; | ||
418 | struct device *dev = ctx->dev; | ||
419 | struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; | ||
420 | int sec4_sg_index = 0; | ||
421 | size_t p_sz = key->p_sz; | ||
422 | size_t q_sz = key->p_sz; | ||
423 | |||
424 | pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); | ||
425 | if (dma_mapping_error(dev, pdb->p_dma)) { | ||
426 | dev_err(dev, "Unable to map RSA prime factor p memory\n"); | ||
427 | return -ENOMEM; | ||
428 | } | ||
429 | |||
430 | pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); | ||
431 | if (dma_mapping_error(dev, pdb->q_dma)) { | ||
432 | dev_err(dev, "Unable to map RSA prime factor q memory\n"); | ||
433 | goto unmap_p; | ||
434 | } | ||
435 | |||
436 | pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE); | ||
437 | if (dma_mapping_error(dev, pdb->dp_dma)) { | ||
438 | dev_err(dev, "Unable to map RSA exponent dp memory\n"); | ||
439 | goto unmap_q; | ||
440 | } | ||
441 | |||
442 | pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE); | ||
443 | if (dma_mapping_error(dev, pdb->dq_dma)) { | ||
444 | dev_err(dev, "Unable to map RSA exponent dq memory\n"); | ||
445 | goto unmap_dp; | ||
446 | } | ||
447 | |||
448 | pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE); | ||
449 | if (dma_mapping_error(dev, pdb->c_dma)) { | ||
450 | dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n"); | ||
451 | goto unmap_dq; | ||
452 | } | ||
453 | |||
454 | pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); | ||
455 | if (dma_mapping_error(dev, pdb->tmp1_dma)) { | ||
456 | dev_err(dev, "Unable to map RSA tmp1 memory\n"); | ||
457 | goto unmap_qinv; | ||
458 | } | ||
459 | |||
460 | pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); | ||
461 | if (dma_mapping_error(dev, pdb->tmp2_dma)) { | ||
462 | dev_err(dev, "Unable to map RSA tmp2 memory\n"); | ||
463 | goto unmap_tmp1; | ||
464 | } | ||
465 | |||
466 | if (edesc->src_nents > 1) { | ||
467 | pdb->sgf |= RSA_PRIV_PDB_SGF_G; | ||
468 | pdb->g_dma = edesc->sec4_sg_dma; | ||
469 | sec4_sg_index += edesc->src_nents; | ||
470 | } else { | ||
471 | pdb->g_dma = sg_dma_address(req->src); | ||
472 | } | ||
473 | |||
474 | if (edesc->dst_nents > 1) { | ||
475 | pdb->sgf |= RSA_PRIV_PDB_SGF_F; | ||
476 | pdb->f_dma = edesc->sec4_sg_dma + | ||
477 | sec4_sg_index * sizeof(struct sec4_sg_entry); | ||
478 | } else { | ||
479 | pdb->f_dma = sg_dma_address(req->dst); | ||
480 | } | ||
481 | |||
482 | pdb->sgf |= key->n_sz; | ||
483 | pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; | ||
484 | |||
485 | return 0; | ||
486 | |||
487 | unmap_tmp1: | ||
488 | dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); | ||
489 | unmap_qinv: | ||
490 | dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); | ||
491 | unmap_dq: | ||
492 | dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); | ||
493 | unmap_dp: | ||
494 | dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); | ||
495 | unmap_q: | ||
496 | dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); | ||
497 | unmap_p: | ||
498 | dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); | ||
499 | |||
500 | return -ENOMEM; | ||
501 | } | ||
502 | |||
261 | static int caam_rsa_enc(struct akcipher_request *req) | 503 | static int caam_rsa_enc(struct akcipher_request *req) |
262 | { | 504 | { |
263 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | 505 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
@@ -301,24 +543,14 @@ init_fail: | |||
301 | return ret; | 543 | return ret; |
302 | } | 544 | } |
303 | 545 | ||
304 | static int caam_rsa_dec(struct akcipher_request *req) | 546 | static int caam_rsa_dec_priv_f1(struct akcipher_request *req) |
305 | { | 547 | { |
306 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | 548 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
307 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 549 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
308 | struct caam_rsa_key *key = &ctx->key; | ||
309 | struct device *jrdev = ctx->dev; | 550 | struct device *jrdev = ctx->dev; |
310 | struct rsa_edesc *edesc; | 551 | struct rsa_edesc *edesc; |
311 | int ret; | 552 | int ret; |
312 | 553 | ||
313 | if (unlikely(!key->n || !key->d)) | ||
314 | return -EINVAL; | ||
315 | |||
316 | if (req->dst_len < key->n_sz) { | ||
317 | req->dst_len = key->n_sz; | ||
318 | dev_err(jrdev, "Output buffer length less than parameter n\n"); | ||
319 | return -EOVERFLOW; | ||
320 | } | ||
321 | |||
322 | /* Allocate extended descriptor */ | 554 | /* Allocate extended descriptor */ |
323 | edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); | 555 | edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); |
324 | if (IS_ERR(edesc)) | 556 | if (IS_ERR(edesc)) |
@@ -344,17 +576,147 @@ init_fail: | |||
344 | return ret; | 576 | return ret; |
345 | } | 577 | } |
346 | 578 | ||
579 | static int caam_rsa_dec_priv_f2(struct akcipher_request *req) | ||
580 | { | ||
581 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
582 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
583 | struct device *jrdev = ctx->dev; | ||
584 | struct rsa_edesc *edesc; | ||
585 | int ret; | ||
586 | |||
587 | /* Allocate extended descriptor */ | ||
588 | edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN); | ||
589 | if (IS_ERR(edesc)) | ||
590 | return PTR_ERR(edesc); | ||
591 | |||
592 | /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */ | ||
593 | ret = set_rsa_priv_f2_pdb(req, edesc); | ||
594 | if (ret) | ||
595 | goto init_fail; | ||
596 | |||
597 | /* Initialize Job Descriptor */ | ||
598 | init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); | ||
599 | |||
600 | ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req); | ||
601 | if (!ret) | ||
602 | return -EINPROGRESS; | ||
603 | |||
604 | rsa_priv_f2_unmap(jrdev, edesc, req); | ||
605 | |||
606 | init_fail: | ||
607 | rsa_io_unmap(jrdev, edesc, req); | ||
608 | kfree(edesc); | ||
609 | return ret; | ||
610 | } | ||
611 | |||
612 | static int caam_rsa_dec_priv_f3(struct akcipher_request *req) | ||
613 | { | ||
614 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
615 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
616 | struct device *jrdev = ctx->dev; | ||
617 | struct rsa_edesc *edesc; | ||
618 | int ret; | ||
619 | |||
620 | /* Allocate extended descriptor */ | ||
621 | edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN); | ||
622 | if (IS_ERR(edesc)) | ||
623 | return PTR_ERR(edesc); | ||
624 | |||
625 | /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */ | ||
626 | ret = set_rsa_priv_f3_pdb(req, edesc); | ||
627 | if (ret) | ||
628 | goto init_fail; | ||
629 | |||
630 | /* Initialize Job Descriptor */ | ||
631 | init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3); | ||
632 | |||
633 | ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req); | ||
634 | if (!ret) | ||
635 | return -EINPROGRESS; | ||
636 | |||
637 | rsa_priv_f3_unmap(jrdev, edesc, req); | ||
638 | |||
639 | init_fail: | ||
640 | rsa_io_unmap(jrdev, edesc, req); | ||
641 | kfree(edesc); | ||
642 | return ret; | ||
643 | } | ||
644 | |||
645 | static int caam_rsa_dec(struct akcipher_request *req) | ||
646 | { | ||
647 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | ||
648 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | ||
649 | struct caam_rsa_key *key = &ctx->key; | ||
650 | int ret; | ||
651 | |||
652 | if (unlikely(!key->n || !key->d)) | ||
653 | return -EINVAL; | ||
654 | |||
655 | if (req->dst_len < key->n_sz) { | ||
656 | req->dst_len = key->n_sz; | ||
657 | dev_err(ctx->dev, "Output buffer length less than parameter n\n"); | ||
658 | return -EOVERFLOW; | ||
659 | } | ||
660 | |||
661 | if (key->priv_form == FORM3) | ||
662 | ret = caam_rsa_dec_priv_f3(req); | ||
663 | else if (key->priv_form == FORM2) | ||
664 | ret = caam_rsa_dec_priv_f2(req); | ||
665 | else | ||
666 | ret = caam_rsa_dec_priv_f1(req); | ||
667 | |||
668 | return ret; | ||
669 | } | ||
670 | |||
347 | static void caam_rsa_free_key(struct caam_rsa_key *key) | 671 | static void caam_rsa_free_key(struct caam_rsa_key *key) |
348 | { | 672 | { |
349 | kzfree(key->d); | 673 | kzfree(key->d); |
674 | kzfree(key->p); | ||
675 | kzfree(key->q); | ||
676 | kzfree(key->dp); | ||
677 | kzfree(key->dq); | ||
678 | kzfree(key->qinv); | ||
679 | kzfree(key->tmp1); | ||
680 | kzfree(key->tmp2); | ||
350 | kfree(key->e); | 681 | kfree(key->e); |
351 | kfree(key->n); | 682 | kfree(key->n); |
352 | key->d = NULL; | 683 | memset(key, 0, sizeof(*key)); |
353 | key->e = NULL; | 684 | } |
354 | key->n = NULL; | 685 | |
355 | key->d_sz = 0; | 686 | static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes) |
356 | key->e_sz = 0; | 687 | { |
357 | key->n_sz = 0; | 688 | while (!**ptr && *nbytes) { |
689 | (*ptr)++; | ||
690 | (*nbytes)--; | ||
691 | } | ||
692 | } | ||
693 | |||
694 | /** | ||
695 | * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members. | ||
696 | * dP, dQ and qInv could decode to less than corresponding p, q length, as the | ||
697 | * BER-encoding requires that the minimum number of bytes be used to encode the | ||
698 | * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate | ||
699 | * length. | ||
700 | * | ||
701 | * @ptr : pointer to {dP, dQ, qInv} CRT member | ||
702 | * @nbytes: length in bytes of {dP, dQ, qInv} CRT member | ||
703 | * @dstlen: length in bytes of corresponding p or q prime factor | ||
704 | */ | ||
705 | static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen) | ||
706 | { | ||
707 | u8 *dst; | ||
708 | |||
709 | caam_rsa_drop_leading_zeros(&ptr, &nbytes); | ||
710 | if (!nbytes) | ||
711 | return NULL; | ||
712 | |||
713 | dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL); | ||
714 | if (!dst) | ||
715 | return NULL; | ||
716 | |||
717 | memcpy(dst + (dstlen - nbytes), ptr, nbytes); | ||
718 | |||
719 | return dst; | ||
358 | } | 720 | } |
359 | 721 | ||
360 | /** | 722 | /** |
@@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes) | |||
370 | { | 732 | { |
371 | u8 *val; | 733 | u8 *val; |
372 | 734 | ||
373 | while (!*buf && *nbytes) { | 735 | caam_rsa_drop_leading_zeros(&buf, nbytes); |
374 | buf++; | 736 | if (!*nbytes) |
375 | (*nbytes)--; | 737 | return NULL; |
376 | } | ||
377 | 738 | ||
378 | val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL); | 739 | val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL); |
379 | if (!val) | 740 | if (!val) |
@@ -437,6 +798,64 @@ err: | |||
437 | return -ENOMEM; | 798 | return -ENOMEM; |
438 | } | 799 | } |
439 | 800 | ||
801 | static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, | ||
802 | struct rsa_key *raw_key) | ||
803 | { | ||
804 | struct caam_rsa_key *rsa_key = &ctx->key; | ||
805 | size_t p_sz = raw_key->p_sz; | ||
806 | size_t q_sz = raw_key->q_sz; | ||
807 | |||
808 | rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz); | ||
809 | if (!rsa_key->p) | ||
810 | return; | ||
811 | rsa_key->p_sz = p_sz; | ||
812 | |||
813 | rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz); | ||
814 | if (!rsa_key->q) | ||
815 | goto free_p; | ||
816 | rsa_key->q_sz = q_sz; | ||
817 | |||
818 | rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL); | ||
819 | if (!rsa_key->tmp1) | ||
820 | goto free_q; | ||
821 | |||
822 | rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL); | ||
823 | if (!rsa_key->tmp2) | ||
824 | goto free_tmp1; | ||
825 | |||
826 | rsa_key->priv_form = FORM2; | ||
827 | |||
828 | rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz); | ||
829 | if (!rsa_key->dp) | ||
830 | goto free_tmp2; | ||
831 | |||
832 | rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz); | ||
833 | if (!rsa_key->dq) | ||
834 | goto free_dp; | ||
835 | |||
836 | rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz, | ||
837 | q_sz); | ||
838 | if (!rsa_key->qinv) | ||
839 | goto free_dq; | ||
840 | |||
841 | rsa_key->priv_form = FORM3; | ||
842 | |||
843 | return; | ||
844 | |||
845 | free_dq: | ||
846 | kzfree(rsa_key->dq); | ||
847 | free_dp: | ||
848 | kzfree(rsa_key->dp); | ||
849 | free_tmp2: | ||
850 | kzfree(rsa_key->tmp2); | ||
851 | free_tmp1: | ||
852 | kzfree(rsa_key->tmp1); | ||
853 | free_q: | ||
854 | kzfree(rsa_key->q); | ||
855 | free_p: | ||
856 | kzfree(rsa_key->p); | ||
857 | } | ||
858 | |||
440 | static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, | 859 | static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, |
441 | unsigned int keylen) | 860 | unsigned int keylen) |
442 | { | 861 | { |
@@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, | |||
483 | memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); | 902 | memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); |
484 | memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); | 903 | memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); |
485 | 904 | ||
905 | caam_rsa_set_priv_key_form(ctx, &raw_key); | ||
906 | |||
486 | return 0; | 907 | return 0; |
487 | 908 | ||
488 | err: | 909 | err: |
@@ -490,12 +911,11 @@ err: | |||
490 | return -ENOMEM; | 911 | return -ENOMEM; |
491 | } | 912 | } |
492 | 913 | ||
493 | static int caam_rsa_max_size(struct crypto_akcipher *tfm) | 914 | static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm) |
494 | { | 915 | { |
495 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 916 | struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
496 | struct caam_rsa_key *key = &ctx->key; | ||
497 | 917 | ||
498 | return (key->n) ? key->n_sz : -EINVAL; | 918 | return ctx->key.n_sz; |
499 | } | 919 | } |
500 | 920 | ||
501 | /* Per session pkc's driver context creation function */ | 921 | /* Per session pkc's driver context creation function */ |
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h index f595d159b112..87ab75e9df43 100644 --- a/drivers/crypto/caam/caampkc.h +++ b/drivers/crypto/caam/caampkc.h | |||
@@ -13,21 +13,75 @@ | |||
13 | #include "pdb.h" | 13 | #include "pdb.h" |
14 | 14 | ||
15 | /** | 15 | /** |
16 | * caam_priv_key_form - CAAM RSA private key representation | ||
17 | * CAAM RSA private key may have either of three forms. | ||
18 | * | ||
19 | * 1. The first representation consists of the pair (n, d), where the | ||
20 | * components have the following meanings: | ||
21 | * n the RSA modulus | ||
22 | * d the RSA private exponent | ||
23 | * | ||
24 | * 2. The second representation consists of the triplet (p, q, d), where the | ||
25 | * components have the following meanings: | ||
26 | * p the first prime factor of the RSA modulus n | ||
27 | * q the second prime factor of the RSA modulus n | ||
28 | * d the RSA private exponent | ||
29 | * | ||
30 | * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv), | ||
31 | * where the components have the following meanings: | ||
32 | * p the first prime factor of the RSA modulus n | ||
33 | * q the second prime factor of the RSA modulus n | ||
34 | * dP the first factors's CRT exponent | ||
35 | * dQ the second factors's CRT exponent | ||
36 | * qInv the (first) CRT coefficient | ||
37 | * | ||
38 | * The benefit of using the third or the second key form is lower computational | ||
39 | * cost for the decryption and signature operations. | ||
40 | */ | ||
41 | enum caam_priv_key_form { | ||
42 | FORM1, | ||
43 | FORM2, | ||
44 | FORM3 | ||
45 | }; | ||
46 | |||
47 | /** | ||
16 | * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone. | 48 | * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone. |
17 | * @n : RSA modulus raw byte stream | 49 | * @n : RSA modulus raw byte stream |
18 | * @e : RSA public exponent raw byte stream | 50 | * @e : RSA public exponent raw byte stream |
19 | * @d : RSA private exponent raw byte stream | 51 | * @d : RSA private exponent raw byte stream |
52 | * @p : RSA prime factor p of RSA modulus n | ||
53 | * @q : RSA prime factor q of RSA modulus n | ||
54 | * @dp : RSA CRT exponent of p | ||
55 | * @dp : RSA CRT exponent of q | ||
56 | * @qinv : RSA CRT coefficient | ||
57 | * @tmp1 : CAAM uses this temporary buffer as internal state buffer. | ||
58 | * It is assumed to be as long as p. | ||
59 | * @tmp2 : CAAM uses this temporary buffer as internal state buffer. | ||
60 | * It is assumed to be as long as q. | ||
20 | * @n_sz : length in bytes of RSA modulus n | 61 | * @n_sz : length in bytes of RSA modulus n |
21 | * @e_sz : length in bytes of RSA public exponent | 62 | * @e_sz : length in bytes of RSA public exponent |
22 | * @d_sz : length in bytes of RSA private exponent | 63 | * @d_sz : length in bytes of RSA private exponent |
64 | * @p_sz : length in bytes of RSA prime factor p of RSA modulus n | ||
65 | * @q_sz : length in bytes of RSA prime factor q of RSA modulus n | ||
66 | * @priv_form : CAAM RSA private key representation | ||
23 | */ | 67 | */ |
24 | struct caam_rsa_key { | 68 | struct caam_rsa_key { |
25 | u8 *n; | 69 | u8 *n; |
26 | u8 *e; | 70 | u8 *e; |
27 | u8 *d; | 71 | u8 *d; |
72 | u8 *p; | ||
73 | u8 *q; | ||
74 | u8 *dp; | ||
75 | u8 *dq; | ||
76 | u8 *qinv; | ||
77 | u8 *tmp1; | ||
78 | u8 *tmp2; | ||
28 | size_t n_sz; | 79 | size_t n_sz; |
29 | size_t e_sz; | 80 | size_t e_sz; |
30 | size_t d_sz; | 81 | size_t d_sz; |
82 | size_t p_sz; | ||
83 | size_t q_sz; | ||
84 | enum caam_priv_key_form priv_form; | ||
31 | }; | 85 | }; |
32 | 86 | ||
33 | /** | 87 | /** |
@@ -59,6 +113,8 @@ struct rsa_edesc { | |||
59 | union { | 113 | union { |
60 | struct rsa_pub_pdb pub; | 114 | struct rsa_pub_pdb pub; |
61 | struct rsa_priv_f1_pdb priv_f1; | 115 | struct rsa_priv_f1_pdb priv_f1; |
116 | struct rsa_priv_f2_pdb priv_f2; | ||
117 | struct rsa_priv_f3_pdb priv_f3; | ||
62 | } pdb; | 118 | } pdb; |
63 | u32 hw_desc[]; | 119 | u32 hw_desc[]; |
64 | }; | 120 | }; |
@@ -66,5 +122,7 @@ struct rsa_edesc { | |||
66 | /* Descriptor construction primitives. */ | 122 | /* Descriptor construction primitives. */ |
67 | void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb); | 123 | void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb); |
68 | void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb); | 124 | void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb); |
125 | void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb); | ||
126 | void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb); | ||
69 | 127 | ||
70 | #endif | 128 | #endif |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 27631000b9f8..1ccfb317d468 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
@@ -536,7 +536,7 @@ static int caam_jr_probe(struct platform_device *pdev) | |||
536 | return 0; | 536 | return 0; |
537 | } | 537 | } |
538 | 538 | ||
539 | static struct of_device_id caam_jr_match[] = { | 539 | static const struct of_device_id caam_jr_match[] = { |
540 | { | 540 | { |
541 | .compatible = "fsl,sec-v4.0-job-ring", | 541 | .compatible = "fsl,sec-v4.0-job-ring", |
542 | }, | 542 | }, |
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h index aaa00dd1c601..31e59963f4d2 100644 --- a/drivers/crypto/caam/pdb.h +++ b/drivers/crypto/caam/pdb.h | |||
@@ -483,6 +483,8 @@ struct dsa_verify_pdb { | |||
483 | #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT) | 483 | #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT) |
484 | #define RSA_PDB_D_SHIFT 12 | 484 | #define RSA_PDB_D_SHIFT 12 |
485 | #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT) | 485 | #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT) |
486 | #define RSA_PDB_Q_SHIFT 12 | ||
487 | #define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT) | ||
486 | 488 | ||
487 | #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT) | 489 | #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT) |
488 | #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT) | 490 | #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT) |
@@ -490,6 +492,8 @@ struct dsa_verify_pdb { | |||
490 | #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT) | 492 | #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT) |
491 | 493 | ||
492 | #define RSA_PRIV_KEY_FRM_1 0 | 494 | #define RSA_PRIV_KEY_FRM_1 0 |
495 | #define RSA_PRIV_KEY_FRM_2 1 | ||
496 | #define RSA_PRIV_KEY_FRM_3 2 | ||
493 | 497 | ||
494 | /** | 498 | /** |
495 | * RSA Encrypt Protocol Data Block | 499 | * RSA Encrypt Protocol Data Block |
@@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb { | |||
525 | dma_addr_t d_dma; | 529 | dma_addr_t d_dma; |
526 | } __packed; | 530 | } __packed; |
527 | 531 | ||
532 | /** | ||
533 | * RSA Decrypt PDB - Private Key Form #2 | ||
534 | * @sgf : scatter-gather field | ||
535 | * @g_dma : dma address of encrypted input data | ||
536 | * @f_dma : dma address of output data | ||
537 | * @d_dma : dma address of RSA private exponent | ||
538 | * @p_dma : dma address of RSA prime factor p of RSA modulus n | ||
539 | * @q_dma : dma address of RSA prime factor q of RSA modulus n | ||
540 | * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer | ||
541 | * as internal state buffer. It is assumed to be as long as p. | ||
542 | * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer | ||
543 | * as internal state buffer. It is assumed to be as long as q. | ||
544 | * @p_q_len : length in bytes of first two prime factors of the RSA modulus n | ||
545 | */ | ||
546 | struct rsa_priv_f2_pdb { | ||
547 | u32 sgf; | ||
548 | dma_addr_t g_dma; | ||
549 | dma_addr_t f_dma; | ||
550 | dma_addr_t d_dma; | ||
551 | dma_addr_t p_dma; | ||
552 | dma_addr_t q_dma; | ||
553 | dma_addr_t tmp1_dma; | ||
554 | dma_addr_t tmp2_dma; | ||
555 | u32 p_q_len; | ||
556 | } __packed; | ||
557 | |||
558 | /** | ||
559 | * RSA Decrypt PDB - Private Key Form #3 | ||
560 | * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of | ||
561 | * the RSA modulus. | ||
562 | * @sgf : scatter-gather field | ||
563 | * @g_dma : dma address of encrypted input data | ||
564 | * @f_dma : dma address of output data | ||
565 | * @c_dma : dma address of RSA CRT coefficient | ||
566 | * @p_dma : dma address of RSA prime factor p of RSA modulus n | ||
567 | * @q_dma : dma address of RSA prime factor q of RSA modulus n | ||
568 | * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p | ||
569 | * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q | ||
570 | * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer | ||
571 | * as internal state buffer. It is assumed to be as long as p. | ||
572 | * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer | ||
573 | * as internal state buffer. It is assumed to be as long as q. | ||
574 | * @p_q_len : length in bytes of first two prime factors of the RSA modulus n | ||
575 | */ | ||
576 | struct rsa_priv_f3_pdb { | ||
577 | u32 sgf; | ||
578 | dma_addr_t g_dma; | ||
579 | dma_addr_t f_dma; | ||
580 | dma_addr_t c_dma; | ||
581 | dma_addr_t p_dma; | ||
582 | dma_addr_t q_dma; | ||
583 | dma_addr_t dp_dma; | ||
584 | dma_addr_t dq_dma; | ||
585 | dma_addr_t tmp1_dma; | ||
586 | dma_addr_t tmp2_dma; | ||
587 | u32 p_q_len; | ||
588 | } __packed; | ||
589 | |||
528 | #endif | 590 | #endif |
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c index 4e4183e615ea..9e2ce6fe2e43 100644 --- a/drivers/crypto/caam/pkc_desc.c +++ b/drivers/crypto/caam/pkc_desc.c | |||
@@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) | |||
34 | append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | | 34 | append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | |
35 | RSA_PRIV_KEY_FRM_1); | 35 | RSA_PRIV_KEY_FRM_1); |
36 | } | 36 | } |
37 | |||
38 | /* Descriptor for RSA Private operation - Private Key Form #2 */ | ||
39 | void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) | ||
40 | { | ||
41 | init_job_desc_pdb(desc, 0, sizeof(*pdb)); | ||
42 | append_cmd(desc, pdb->sgf); | ||
43 | append_ptr(desc, pdb->g_dma); | ||
44 | append_ptr(desc, pdb->f_dma); | ||
45 | append_ptr(desc, pdb->d_dma); | ||
46 | append_ptr(desc, pdb->p_dma); | ||
47 | append_ptr(desc, pdb->q_dma); | ||
48 | append_ptr(desc, pdb->tmp1_dma); | ||
49 | append_ptr(desc, pdb->tmp2_dma); | ||
50 | append_cmd(desc, pdb->p_q_len); | ||
51 | append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | | ||
52 | RSA_PRIV_KEY_FRM_2); | ||
53 | } | ||
54 | |||
55 | /* Descriptor for RSA Private operation - Private Key Form #3 */ | ||
56 | void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb) | ||
57 | { | ||
58 | init_job_desc_pdb(desc, 0, sizeof(*pdb)); | ||
59 | append_cmd(desc, pdb->sgf); | ||
60 | append_ptr(desc, pdb->g_dma); | ||
61 | append_ptr(desc, pdb->f_dma); | ||
62 | append_ptr(desc, pdb->c_dma); | ||
63 | append_ptr(desc, pdb->p_dma); | ||
64 | append_ptr(desc, pdb->q_dma); | ||
65 | append_ptr(desc, pdb->dp_dma); | ||
66 | append_ptr(desc, pdb->dq_dma); | ||
67 | append_ptr(desc, pdb->tmp1_dma); | ||
68 | append_ptr(desc, pdb->tmp2_dma); | ||
69 | append_cmd(desc, pdb->p_q_len); | ||
70 | append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | | ||
71 | RSA_PRIV_KEY_FRM_3); | ||
72 | } | ||
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index cc853f913d4b..1b220f3ed017 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c | |||
@@ -98,7 +98,6 @@ static inline void update_output_data(struct cpt_request_info *req_info, | |||
98 | } | 98 | } |
99 | 99 | ||
100 | static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc, | 100 | static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc, |
101 | u32 cipher_type, u32 aes_key_type, | ||
102 | u32 *argcnt) | 101 | u32 *argcnt) |
103 | { | 102 | { |
104 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 103 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
@@ -124,11 +123,11 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc, | |||
124 | req_info->req.param1 = req->nbytes; /* Encryption Data length */ | 123 | req_info->req.param1 = req->nbytes; /* Encryption Data length */ |
125 | req_info->req.param2 = 0; /*Auth data length */ | 124 | req_info->req.param2 = 0; /*Auth data length */ |
126 | 125 | ||
127 | fctx->enc.enc_ctrl.e.enc_cipher = cipher_type; | 126 | fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type; |
128 | fctx->enc.enc_ctrl.e.aes_key = aes_key_type; | 127 | fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; |
129 | fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR; | 128 | fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR; |
130 | 129 | ||
131 | if (cipher_type == AES_XTS) | 130 | if (ctx->cipher_type == AES_XTS) |
132 | memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2); | 131 | memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2); |
133 | else | 132 | else |
134 | memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len); | 133 | memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len); |
@@ -154,14 +153,13 @@ static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc, | |||
154 | } | 153 | } |
155 | 154 | ||
156 | static inline u32 create_input_list(struct ablkcipher_request *req, u32 enc, | 155 | static inline u32 create_input_list(struct ablkcipher_request *req, u32 enc, |
157 | u32 cipher_type, u32 aes_key_type, | ||
158 | u32 enc_iv_len) | 156 | u32 enc_iv_len) |
159 | { | 157 | { |
160 | struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req); | 158 | struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req); |
161 | struct cpt_request_info *req_info = &rctx->cpt_req; | 159 | struct cpt_request_info *req_info = &rctx->cpt_req; |
162 | u32 argcnt = 0; | 160 | u32 argcnt = 0; |
163 | 161 | ||
164 | create_ctx_hdr(req, enc, cipher_type, aes_key_type, &argcnt); | 162 | create_ctx_hdr(req, enc, &argcnt); |
165 | update_input_iv(req_info, req->info, enc_iv_len, &argcnt); | 163 | update_input_iv(req_info, req->info, enc_iv_len, &argcnt); |
166 | update_input_data(req_info, req->src, req->nbytes, &argcnt); | 164 | update_input_data(req_info, req->src, req->nbytes, &argcnt); |
167 | req_info->incnt = argcnt; | 165 | req_info->incnt = argcnt; |
@@ -177,7 +175,6 @@ static inline void store_cb_info(struct ablkcipher_request *req, | |||
177 | } | 175 | } |
178 | 176 | ||
179 | static inline void create_output_list(struct ablkcipher_request *req, | 177 | static inline void create_output_list(struct ablkcipher_request *req, |
180 | u32 cipher_type, | ||
181 | u32 enc_iv_len) | 178 | u32 enc_iv_len) |
182 | { | 179 | { |
183 | struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req); | 180 | struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req); |
@@ -197,12 +194,9 @@ static inline void create_output_list(struct ablkcipher_request *req, | |||
197 | req_info->outcnt = argcnt; | 194 | req_info->outcnt = argcnt; |
198 | } | 195 | } |
199 | 196 | ||
200 | static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc, | 197 | static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc) |
201 | u32 cipher_type) | ||
202 | { | 198 | { |
203 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 199 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
204 | struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
205 | u32 key_type = AES_128_BIT; | ||
206 | struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req); | 200 | struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req); |
207 | u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm); | 201 | u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm); |
208 | struct fc_context *fctx = &rctx->fctx; | 202 | struct fc_context *fctx = &rctx->fctx; |
@@ -210,36 +204,10 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc, | |||
210 | void *cdev = NULL; | 204 | void *cdev = NULL; |
211 | int status; | 205 | int status; |
212 | 206 | ||
213 | switch (ctx->key_len) { | ||
214 | case 16: | ||
215 | key_type = AES_128_BIT; | ||
216 | break; | ||
217 | case 24: | ||
218 | key_type = AES_192_BIT; | ||
219 | break; | ||
220 | case 32: | ||
221 | if (cipher_type == AES_XTS) | ||
222 | key_type = AES_128_BIT; | ||
223 | else | ||
224 | key_type = AES_256_BIT; | ||
225 | break; | ||
226 | case 64: | ||
227 | if (cipher_type == AES_XTS) | ||
228 | key_type = AES_256_BIT; | ||
229 | else | ||
230 | return -EINVAL; | ||
231 | break; | ||
232 | default: | ||
233 | return -EINVAL; | ||
234 | } | ||
235 | |||
236 | if (cipher_type == DES3_CBC) | ||
237 | key_type = 0; | ||
238 | |||
239 | memset(req_info, 0, sizeof(struct cpt_request_info)); | 207 | memset(req_info, 0, sizeof(struct cpt_request_info)); |
240 | memset(fctx, 0, sizeof(struct fc_context)); | 208 | memset(fctx, 0, sizeof(struct fc_context)); |
241 | create_input_list(req, enc, cipher_type, key_type, enc_iv_len); | 209 | create_input_list(req, enc, enc_iv_len); |
242 | create_output_list(req, cipher_type, enc_iv_len); | 210 | create_output_list(req, enc_iv_len); |
243 | store_cb_info(req, req_info); | 211 | store_cb_info(req, req_info); |
244 | cdev = dev_handle.cdev[smp_processor_id()]; | 212 | cdev = dev_handle.cdev[smp_processor_id()]; |
245 | status = cptvf_do_request(cdev, req_info); | 213 | status = cptvf_do_request(cdev, req_info); |
@@ -254,34 +222,14 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc, | |||
254 | return -EINPROGRESS; | 222 | return -EINPROGRESS; |
255 | } | 223 | } |
256 | 224 | ||
257 | int cvm_des3_encrypt_cbc(struct ablkcipher_request *req) | 225 | int cvm_encrypt(struct ablkcipher_request *req) |
258 | { | 226 | { |
259 | return cvm_enc_dec(req, true, DES3_CBC); | 227 | return cvm_enc_dec(req, true); |
260 | } | 228 | } |
261 | 229 | ||
262 | int cvm_des3_decrypt_cbc(struct ablkcipher_request *req) | 230 | int cvm_decrypt(struct ablkcipher_request *req) |
263 | { | 231 | { |
264 | return cvm_enc_dec(req, false, DES3_CBC); | 232 | return cvm_enc_dec(req, false); |
265 | } | ||
266 | |||
267 | int cvm_aes_encrypt_xts(struct ablkcipher_request *req) | ||
268 | { | ||
269 | return cvm_enc_dec(req, true, AES_XTS); | ||
270 | } | ||
271 | |||
272 | int cvm_aes_decrypt_xts(struct ablkcipher_request *req) | ||
273 | { | ||
274 | return cvm_enc_dec(req, false, AES_XTS); | ||
275 | } | ||
276 | |||
277 | int cvm_aes_encrypt_cbc(struct ablkcipher_request *req) | ||
278 | { | ||
279 | return cvm_enc_dec(req, true, AES_CBC); | ||
280 | } | ||
281 | |||
282 | int cvm_aes_decrypt_cbc(struct ablkcipher_request *req) | ||
283 | { | ||
284 | return cvm_enc_dec(req, false, AES_CBC); | ||
285 | } | 233 | } |
286 | 234 | ||
287 | int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 235 | int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
@@ -299,24 +247,93 @@ int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |||
299 | ctx->key_len = keylen; | 247 | ctx->key_len = keylen; |
300 | memcpy(ctx->enc_key, key1, keylen / 2); | 248 | memcpy(ctx->enc_key, key1, keylen / 2); |
301 | memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2); | 249 | memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2); |
250 | ctx->cipher_type = AES_XTS; | ||
251 | switch (ctx->key_len) { | ||
252 | case 32: | ||
253 | ctx->key_type = AES_128_BIT; | ||
254 | break; | ||
255 | case 64: | ||
256 | ctx->key_type = AES_256_BIT; | ||
257 | break; | ||
258 | default: | ||
259 | return -EINVAL; | ||
260 | } | ||
302 | 261 | ||
303 | return 0; | 262 | return 0; |
304 | } | 263 | } |
305 | 264 | ||
306 | int cvm_enc_dec_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 265 | static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen) |
307 | u32 keylen) | 266 | { |
267 | if ((keylen == 16) || (keylen == 24) || (keylen == 32)) { | ||
268 | ctx->key_len = keylen; | ||
269 | switch (ctx->key_len) { | ||
270 | case 16: | ||
271 | ctx->key_type = AES_128_BIT; | ||
272 | break; | ||
273 | case 24: | ||
274 | ctx->key_type = AES_192_BIT; | ||
275 | break; | ||
276 | case 32: | ||
277 | ctx->key_type = AES_256_BIT; | ||
278 | break; | ||
279 | default: | ||
280 | return -EINVAL; | ||
281 | } | ||
282 | |||
283 | if (ctx->cipher_type == DES3_CBC) | ||
284 | ctx->key_type = 0; | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | return -EINVAL; | ||
290 | } | ||
291 | |||
292 | static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
293 | u32 keylen, u8 cipher_type) | ||
308 | { | 294 | { |
309 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 295 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
310 | struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm); | 296 | struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm); |
311 | 297 | ||
312 | if ((keylen == 16) || (keylen == 24) || (keylen == 32)) { | 298 | ctx->cipher_type = cipher_type; |
313 | ctx->key_len = keylen; | 299 | if (!cvm_validate_keylen(ctx, keylen)) { |
314 | memcpy(ctx->enc_key, key, keylen); | 300 | memcpy(ctx->enc_key, key, keylen); |
315 | return 0; | 301 | return 0; |
302 | } else { | ||
303 | crypto_ablkcipher_set_flags(cipher, | ||
304 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
305 | return -EINVAL; | ||
316 | } | 306 | } |
317 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 307 | } |
318 | 308 | ||
319 | return -EINVAL; | 309 | static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
310 | u32 keylen) | ||
311 | { | ||
312 | return cvm_setkey(cipher, key, keylen, AES_CBC); | ||
313 | } | ||
314 | |||
315 | static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
316 | u32 keylen) | ||
317 | { | ||
318 | return cvm_setkey(cipher, key, keylen, AES_ECB); | ||
319 | } | ||
320 | |||
321 | static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
322 | u32 keylen) | ||
323 | { | ||
324 | return cvm_setkey(cipher, key, keylen, AES_CFB); | ||
325 | } | ||
326 | |||
327 | static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
328 | u32 keylen) | ||
329 | { | ||
330 | return cvm_setkey(cipher, key, keylen, DES3_CBC); | ||
331 | } | ||
332 | |||
333 | static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | ||
334 | u32 keylen) | ||
335 | { | ||
336 | return cvm_setkey(cipher, key, keylen, DES3_ECB); | ||
320 | } | 337 | } |
321 | 338 | ||
322 | int cvm_enc_dec_init(struct crypto_tfm *tfm) | 339 | int cvm_enc_dec_init(struct crypto_tfm *tfm) |
@@ -349,8 +366,8 @@ struct crypto_alg algs[] = { { | |||
349 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | 366 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
350 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | 367 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
351 | .setkey = cvm_xts_setkey, | 368 | .setkey = cvm_xts_setkey, |
352 | .encrypt = cvm_aes_encrypt_xts, | 369 | .encrypt = cvm_encrypt, |
353 | .decrypt = cvm_aes_decrypt_xts, | 370 | .decrypt = cvm_decrypt, |
354 | }, | 371 | }, |
355 | }, | 372 | }, |
356 | .cra_init = cvm_enc_dec_init, | 373 | .cra_init = cvm_enc_dec_init, |
@@ -369,9 +386,51 @@ struct crypto_alg algs[] = { { | |||
369 | .ivsize = AES_BLOCK_SIZE, | 386 | .ivsize = AES_BLOCK_SIZE, |
370 | .min_keysize = AES_MIN_KEY_SIZE, | 387 | .min_keysize = AES_MIN_KEY_SIZE, |
371 | .max_keysize = AES_MAX_KEY_SIZE, | 388 | .max_keysize = AES_MAX_KEY_SIZE, |
372 | .setkey = cvm_enc_dec_setkey, | 389 | .setkey = cvm_cbc_aes_setkey, |
373 | .encrypt = cvm_aes_encrypt_cbc, | 390 | .encrypt = cvm_encrypt, |
374 | .decrypt = cvm_aes_decrypt_cbc, | 391 | .decrypt = cvm_decrypt, |
392 | }, | ||
393 | }, | ||
394 | .cra_init = cvm_enc_dec_init, | ||
395 | .cra_module = THIS_MODULE, | ||
396 | }, { | ||
397 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
398 | .cra_blocksize = AES_BLOCK_SIZE, | ||
399 | .cra_ctxsize = sizeof(struct cvm_enc_ctx), | ||
400 | .cra_alignmask = 7, | ||
401 | .cra_priority = 4001, | ||
402 | .cra_name = "ecb(aes)", | ||
403 | .cra_driver_name = "cavium-ecb-aes", | ||
404 | .cra_type = &crypto_ablkcipher_type, | ||
405 | .cra_u = { | ||
406 | .ablkcipher = { | ||
407 | .ivsize = AES_BLOCK_SIZE, | ||
408 | .min_keysize = AES_MIN_KEY_SIZE, | ||
409 | .max_keysize = AES_MAX_KEY_SIZE, | ||
410 | .setkey = cvm_ecb_aes_setkey, | ||
411 | .encrypt = cvm_encrypt, | ||
412 | .decrypt = cvm_decrypt, | ||
413 | }, | ||
414 | }, | ||
415 | .cra_init = cvm_enc_dec_init, | ||
416 | .cra_module = THIS_MODULE, | ||
417 | }, { | ||
418 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
419 | .cra_blocksize = AES_BLOCK_SIZE, | ||
420 | .cra_ctxsize = sizeof(struct cvm_enc_ctx), | ||
421 | .cra_alignmask = 7, | ||
422 | .cra_priority = 4001, | ||
423 | .cra_name = "cfb(aes)", | ||
424 | .cra_driver_name = "cavium-cfb-aes", | ||
425 | .cra_type = &crypto_ablkcipher_type, | ||
426 | .cra_u = { | ||
427 | .ablkcipher = { | ||
428 | .ivsize = AES_BLOCK_SIZE, | ||
429 | .min_keysize = AES_MIN_KEY_SIZE, | ||
430 | .max_keysize = AES_MAX_KEY_SIZE, | ||
431 | .setkey = cvm_cfb_aes_setkey, | ||
432 | .encrypt = cvm_encrypt, | ||
433 | .decrypt = cvm_decrypt, | ||
375 | }, | 434 | }, |
376 | }, | 435 | }, |
377 | .cra_init = cvm_enc_dec_init, | 436 | .cra_init = cvm_enc_dec_init, |
@@ -390,9 +449,30 @@ struct crypto_alg algs[] = { { | |||
390 | .min_keysize = DES3_EDE_KEY_SIZE, | 449 | .min_keysize = DES3_EDE_KEY_SIZE, |
391 | .max_keysize = DES3_EDE_KEY_SIZE, | 450 | .max_keysize = DES3_EDE_KEY_SIZE, |
392 | .ivsize = DES_BLOCK_SIZE, | 451 | .ivsize = DES_BLOCK_SIZE, |
393 | .setkey = cvm_enc_dec_setkey, | 452 | .setkey = cvm_cbc_des3_setkey, |
394 | .encrypt = cvm_des3_encrypt_cbc, | 453 | .encrypt = cvm_encrypt, |
395 | .decrypt = cvm_des3_decrypt_cbc, | 454 | .decrypt = cvm_decrypt, |
455 | }, | ||
456 | }, | ||
457 | .cra_init = cvm_enc_dec_init, | ||
458 | .cra_module = THIS_MODULE, | ||
459 | }, { | ||
460 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | ||
461 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
462 | .cra_ctxsize = sizeof(struct cvm_des3_ctx), | ||
463 | .cra_alignmask = 7, | ||
464 | .cra_priority = 4001, | ||
465 | .cra_name = "ecb(des3_ede)", | ||
466 | .cra_driver_name = "cavium-ecb-des3_ede", | ||
467 | .cra_type = &crypto_ablkcipher_type, | ||
468 | .cra_u = { | ||
469 | .ablkcipher = { | ||
470 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
471 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
472 | .ivsize = DES_BLOCK_SIZE, | ||
473 | .setkey = cvm_ecb_des3_setkey, | ||
474 | .encrypt = cvm_encrypt, | ||
475 | .decrypt = cvm_decrypt, | ||
396 | }, | 476 | }, |
397 | }, | 477 | }, |
398 | .cra_init = cvm_enc_dec_init, | 478 | .cra_init = cvm_enc_dec_init, |
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.h b/drivers/crypto/cavium/cpt/cptvf_algs.h index a12050d11b0c..902f25751123 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.h +++ b/drivers/crypto/cavium/cpt/cptvf_algs.h | |||
@@ -77,6 +77,11 @@ union encr_ctrl { | |||
77 | } e; | 77 | } e; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | struct cvm_cipher { | ||
81 | const char *name; | ||
82 | u8 value; | ||
83 | }; | ||
84 | |||
80 | struct enc_context { | 85 | struct enc_context { |
81 | union encr_ctrl enc_ctrl; | 86 | union encr_ctrl enc_ctrl; |
82 | u8 encr_key[32]; | 87 | u8 encr_key[32]; |
@@ -96,6 +101,8 @@ struct fc_context { | |||
96 | struct cvm_enc_ctx { | 101 | struct cvm_enc_ctx { |
97 | u32 key_len; | 102 | u32 key_len; |
98 | u8 enc_key[MAX_KEY_SIZE]; | 103 | u8 enc_key[MAX_KEY_SIZE]; |
104 | u8 cipher_type:4; | ||
105 | u8 key_type:2; | ||
99 | }; | 106 | }; |
100 | 107 | ||
101 | struct cvm_des3_ctx { | 108 | struct cvm_des3_ctx { |
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c index 6ffc740c7431..5c796ed55eba 100644 --- a/drivers/crypto/cavium/cpt/cptvf_main.c +++ b/drivers/crypto/cavium/cpt/cptvf_main.c | |||
@@ -525,7 +525,7 @@ static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq) | |||
525 | intr = cptvf_read_vf_misc_intr_status(cptvf); | 525 | intr = cptvf_read_vf_misc_intr_status(cptvf); |
526 | /*Check for MISC interrupt types*/ | 526 | /*Check for MISC interrupt types*/ |
527 | if (likely(intr & CPT_VF_INTR_MBOX_MASK)) { | 527 | if (likely(intr & CPT_VF_INTR_MBOX_MASK)) { |
528 | dev_err(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n", | 528 | dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n", |
529 | intr, cptvf->vfid); | 529 | intr, cptvf->vfid); |
530 | cptvf_handle_mbox_intr(cptvf); | 530 | cptvf_handle_mbox_intr(cptvf); |
531 | cptvf_clear_mbox_intr(cptvf); | 531 | cptvf_clear_mbox_intr(cptvf); |
diff --git a/drivers/crypto/cavium/nitrox/Kconfig b/drivers/crypto/cavium/nitrox/Kconfig new file mode 100644 index 000000000000..731e6a57218a --- /dev/null +++ b/drivers/crypto/cavium/nitrox/Kconfig | |||
@@ -0,0 +1,21 @@ | |||
1 | # | ||
2 | # Cavium NITROX Crypto Device configuration | ||
3 | # | ||
4 | config CRYPTO_DEV_NITROX | ||
5 | tristate | ||
6 | select CRYPTO_BLKCIPHER | ||
7 | select CRYPTO_AES | ||
8 | select CRYPTO_DES | ||
9 | select FW_LOADER | ||
10 | |||
11 | config CRYPTO_DEV_NITROX_CNN55XX | ||
12 | tristate "Support for Cavium CNN55XX driver" | ||
13 | depends on PCI_MSI && 64BIT | ||
14 | select CRYPTO_DEV_NITROX | ||
15 | default m | ||
16 | help | ||
17 | Support for Cavium NITROX family CNN55XX driver | ||
18 | for accelerating crypto workloads. | ||
19 | |||
20 | To compile this as a module, choose M here: the module | ||
21 | will be called n5pf. | ||
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile new file mode 100644 index 000000000000..5af2e4368267 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_NITROX_CNN55XX) += n5pf.o | ||
2 | |||
3 | n5pf-objs := nitrox_main.o \ | ||
4 | nitrox_isr.o \ | ||
5 | nitrox_lib.o \ | ||
6 | nitrox_hal.o \ | ||
7 | nitrox_reqmgr.o \ | ||
8 | nitrox_algs.o | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c new file mode 100644 index 000000000000..ce330278ef8a --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c | |||
@@ -0,0 +1,457 @@ | |||
1 | #include <linux/crypto.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/printk.h> | ||
5 | |||
6 | #include <crypto/aes.h> | ||
7 | #include <crypto/skcipher.h> | ||
8 | #include <crypto/ctr.h> | ||
9 | #include <crypto/des.h> | ||
10 | #include <crypto/xts.h> | ||
11 | |||
12 | #include "nitrox_dev.h" | ||
13 | #include "nitrox_common.h" | ||
14 | #include "nitrox_req.h" | ||
15 | |||
16 | #define PRIO 4001 | ||
17 | |||
18 | struct nitrox_cipher { | ||
19 | const char *name; | ||
20 | enum flexi_cipher value; | ||
21 | }; | ||
22 | |||
23 | /** | ||
24 | * supported cipher list | ||
25 | */ | ||
26 | static const struct nitrox_cipher flexi_cipher_table[] = { | ||
27 | { "null", CIPHER_NULL }, | ||
28 | { "cbc(des3_ede)", CIPHER_3DES_CBC }, | ||
29 | { "ecb(des3_ede)", CIPHER_3DES_ECB }, | ||
30 | { "cbc(aes)", CIPHER_AES_CBC }, | ||
31 | { "ecb(aes)", CIPHER_AES_ECB }, | ||
32 | { "cfb(aes)", CIPHER_AES_CFB }, | ||
33 | { "rfc3686(ctr(aes))", CIPHER_AES_CTR }, | ||
34 | { "xts(aes)", CIPHER_AES_XTS }, | ||
35 | { "cts(cbc(aes))", CIPHER_AES_CBC_CTS }, | ||
36 | { NULL, CIPHER_INVALID } | ||
37 | }; | ||
38 | |||
39 | static enum flexi_cipher flexi_cipher_type(const char *name) | ||
40 | { | ||
41 | const struct nitrox_cipher *cipher = flexi_cipher_table; | ||
42 | |||
43 | while (cipher->name) { | ||
44 | if (!strcmp(cipher->name, name)) | ||
45 | break; | ||
46 | cipher++; | ||
47 | } | ||
48 | return cipher->value; | ||
49 | } | ||
50 | |||
51 | static int flexi_aes_keylen(int keylen) | ||
52 | { | ||
53 | int aes_keylen; | ||
54 | |||
55 | switch (keylen) { | ||
56 | case AES_KEYSIZE_128: | ||
57 | aes_keylen = 1; | ||
58 | break; | ||
59 | case AES_KEYSIZE_192: | ||
60 | aes_keylen = 2; | ||
61 | break; | ||
62 | case AES_KEYSIZE_256: | ||
63 | aes_keylen = 3; | ||
64 | break; | ||
65 | default: | ||
66 | aes_keylen = -EINVAL; | ||
67 | break; | ||
68 | } | ||
69 | return aes_keylen; | ||
70 | } | ||
71 | |||
72 | static int nitrox_skcipher_init(struct crypto_skcipher *tfm) | ||
73 | { | ||
74 | struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); | ||
75 | void *fctx; | ||
76 | |||
77 | /* get the first device */ | ||
78 | nctx->ndev = nitrox_get_first_device(); | ||
79 | if (!nctx->ndev) | ||
80 | return -ENODEV; | ||
81 | |||
82 | /* allocate nitrox crypto context */ | ||
83 | fctx = crypto_alloc_context(nctx->ndev); | ||
84 | if (!fctx) { | ||
85 | nitrox_put_device(nctx->ndev); | ||
86 | return -ENOMEM; | ||
87 | } | ||
88 | nctx->u.ctx_handle = (uintptr_t)fctx; | ||
89 | crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + | ||
90 | sizeof(struct nitrox_kcrypt_request)); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) | ||
95 | { | ||
96 | struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); | ||
97 | |||
98 | /* free the nitrox crypto context */ | ||
99 | if (nctx->u.ctx_handle) { | ||
100 | struct flexi_crypto_context *fctx = nctx->u.fctx; | ||
101 | |||
102 | memset(&fctx->crypto, 0, sizeof(struct crypto_keys)); | ||
103 | memset(&fctx->auth, 0, sizeof(struct auth_keys)); | ||
104 | crypto_free_context((void *)fctx); | ||
105 | } | ||
106 | nitrox_put_device(nctx->ndev); | ||
107 | |||
108 | nctx->u.ctx_handle = 0; | ||
109 | nctx->ndev = NULL; | ||
110 | } | ||
111 | |||
112 | static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher, | ||
113 | int aes_keylen, const u8 *key, | ||
114 | unsigned int keylen) | ||
115 | { | ||
116 | struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); | ||
117 | struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); | ||
118 | struct flexi_crypto_context *fctx; | ||
119 | enum flexi_cipher cipher_type; | ||
120 | const char *name; | ||
121 | |||
122 | name = crypto_tfm_alg_name(tfm); | ||
123 | cipher_type = flexi_cipher_type(name); | ||
124 | if (unlikely(cipher_type == CIPHER_INVALID)) { | ||
125 | pr_err("unsupported cipher: %s\n", name); | ||
126 | return -EINVAL; | ||
127 | } | ||
128 | |||
129 | /* fill crypto context */ | ||
130 | fctx = nctx->u.fctx; | ||
131 | fctx->flags = 0; | ||
132 | fctx->w0.cipher_type = cipher_type; | ||
133 | fctx->w0.aes_keylen = aes_keylen; | ||
134 | fctx->w0.iv_source = IV_FROM_DPTR; | ||
135 | fctx->flags = cpu_to_be64(*(u64 *)&fctx->w0); | ||
136 | /* copy the key to context */ | ||
137 | memcpy(fctx->crypto.u.key, key, keylen); | ||
138 | |||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, | ||
143 | unsigned int keylen) | ||
144 | { | ||
145 | int aes_keylen; | ||
146 | |||
147 | aes_keylen = flexi_aes_keylen(keylen); | ||
148 | if (aes_keylen < 0) { | ||
149 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
150 | return -EINVAL; | ||
151 | } | ||
152 | return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); | ||
153 | } | ||
154 | |||
155 | static void nitrox_skcipher_callback(struct skcipher_request *skreq, | ||
156 | int err) | ||
157 | { | ||
158 | if (err) { | ||
159 | pr_err_ratelimited("request failed status 0x%0x\n", err); | ||
160 | err = -EINVAL; | ||
161 | } | ||
162 | skcipher_request_complete(skreq, err); | ||
163 | } | ||
164 | |||
165 | static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) | ||
166 | { | ||
167 | struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); | ||
168 | struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); | ||
169 | struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); | ||
170 | int ivsize = crypto_skcipher_ivsize(cipher); | ||
171 | struct se_crypto_request *creq; | ||
172 | |||
173 | creq = &nkreq->creq; | ||
174 | creq->flags = skreq->base.flags; | ||
175 | creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | ||
176 | GFP_KERNEL : GFP_ATOMIC; | ||
177 | |||
178 | /* fill the request */ | ||
179 | creq->ctrl.value = 0; | ||
180 | creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; | ||
181 | creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT); | ||
182 | /* param0: length of the data to be encrypted */ | ||
183 | creq->gph.param0 = cpu_to_be16(skreq->cryptlen); | ||
184 | creq->gph.param1 = 0; | ||
185 | /* param2: encryption data offset */ | ||
186 | creq->gph.param2 = cpu_to_be16(ivsize); | ||
187 | creq->gph.param3 = 0; | ||
188 | |||
189 | creq->ctx_handle = nctx->u.ctx_handle; | ||
190 | creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); | ||
191 | |||
192 | /* copy the iv */ | ||
193 | memcpy(creq->iv, skreq->iv, ivsize); | ||
194 | creq->ivsize = ivsize; | ||
195 | creq->src = skreq->src; | ||
196 | creq->dst = skreq->dst; | ||
197 | |||
198 | nkreq->nctx = nctx; | ||
199 | nkreq->skreq = skreq; | ||
200 | |||
201 | /* send the crypto request */ | ||
202 | return nitrox_process_se_request(nctx->ndev, creq, | ||
203 | nitrox_skcipher_callback, skreq); | ||
204 | } | ||
205 | |||
206 | static int nitrox_aes_encrypt(struct skcipher_request *skreq) | ||
207 | { | ||
208 | return nitrox_skcipher_crypt(skreq, true); | ||
209 | } | ||
210 | |||
211 | static int nitrox_aes_decrypt(struct skcipher_request *skreq) | ||
212 | { | ||
213 | return nitrox_skcipher_crypt(skreq, false); | ||
214 | } | ||
215 | |||
216 | static int nitrox_3des_setkey(struct crypto_skcipher *cipher, | ||
217 | const u8 *key, unsigned int keylen) | ||
218 | { | ||
219 | if (keylen != DES3_EDE_KEY_SIZE) { | ||
220 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
221 | return -EINVAL; | ||
222 | } | ||
223 | |||
224 | return nitrox_skcipher_setkey(cipher, 0, key, keylen); | ||
225 | } | ||
226 | |||
227 | static int nitrox_3des_encrypt(struct skcipher_request *skreq) | ||
228 | { | ||
229 | return nitrox_skcipher_crypt(skreq, true); | ||
230 | } | ||
231 | |||
232 | static int nitrox_3des_decrypt(struct skcipher_request *skreq) | ||
233 | { | ||
234 | return nitrox_skcipher_crypt(skreq, false); | ||
235 | } | ||
236 | |||
237 | static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, | ||
238 | const u8 *key, unsigned int keylen) | ||
239 | { | ||
240 | struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); | ||
241 | struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); | ||
242 | struct flexi_crypto_context *fctx; | ||
243 | int aes_keylen, ret; | ||
244 | |||
245 | ret = xts_check_key(tfm, key, keylen); | ||
246 | if (ret) | ||
247 | return ret; | ||
248 | |||
249 | keylen /= 2; | ||
250 | |||
251 | aes_keylen = flexi_aes_keylen(keylen); | ||
252 | if (aes_keylen < 0) { | ||
253 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
254 | return -EINVAL; | ||
255 | } | ||
256 | |||
257 | fctx = nctx->u.fctx; | ||
258 | /* copy KEY2 */ | ||
259 | memcpy(fctx->auth.u.key2, (key + keylen), keylen); | ||
260 | |||
261 | return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); | ||
262 | } | ||
263 | |||
264 | static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, | ||
265 | const u8 *key, unsigned int keylen) | ||
266 | { | ||
267 | struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); | ||
268 | struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); | ||
269 | struct flexi_crypto_context *fctx; | ||
270 | int aes_keylen; | ||
271 | |||
272 | if (keylen < CTR_RFC3686_NONCE_SIZE) | ||
273 | return -EINVAL; | ||
274 | |||
275 | fctx = nctx->u.fctx; | ||
276 | |||
277 | memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE), | ||
278 | CTR_RFC3686_NONCE_SIZE); | ||
279 | |||
280 | keylen -= CTR_RFC3686_NONCE_SIZE; | ||
281 | |||
282 | aes_keylen = flexi_aes_keylen(keylen); | ||
283 | if (aes_keylen < 0) { | ||
284 | crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
285 | return -EINVAL; | ||
286 | } | ||
287 | return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); | ||
288 | } | ||
289 | |||
290 | static struct skcipher_alg nitrox_skciphers[] = { { | ||
291 | .base = { | ||
292 | .cra_name = "cbc(aes)", | ||
293 | .cra_driver_name = "n5_cbc(aes)", | ||
294 | .cra_priority = PRIO, | ||
295 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
296 | .cra_blocksize = AES_BLOCK_SIZE, | ||
297 | .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), | ||
298 | .cra_alignmask = 0, | ||
299 | .cra_module = THIS_MODULE, | ||
300 | }, | ||
301 | .min_keysize = AES_MIN_KEY_SIZE, | ||
302 | .max_keysize = AES_MAX_KEY_SIZE, | ||
303 | .ivsize = AES_BLOCK_SIZE, | ||
304 | .setkey = nitrox_aes_setkey, | ||
305 | .encrypt = nitrox_aes_encrypt, | ||
306 | .decrypt = nitrox_aes_decrypt, | ||
307 | .init = nitrox_skcipher_init, | ||
308 | .exit = nitrox_skcipher_exit, | ||
309 | }, { | ||
310 | .base = { | ||
311 | .cra_name = "ecb(aes)", | ||
312 | .cra_driver_name = "n5_ecb(aes)", | ||
313 | .cra_priority = PRIO, | ||
314 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
315 | .cra_blocksize = AES_BLOCK_SIZE, | ||
316 | .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), | ||
317 | .cra_alignmask = 0, | ||
318 | .cra_module = THIS_MODULE, | ||
319 | }, | ||
320 | .min_keysize = AES_MIN_KEY_SIZE, | ||
321 | .max_keysize = AES_MAX_KEY_SIZE, | ||
322 | .ivsize = AES_BLOCK_SIZE, | ||
323 | .setkey = nitrox_aes_setkey, | ||
324 | .encrypt = nitrox_aes_encrypt, | ||
325 | .decrypt = nitrox_aes_decrypt, | ||
326 | .init = nitrox_skcipher_init, | ||
327 | .exit = nitrox_skcipher_exit, | ||
328 | }, { | ||
329 | .base = { | ||
330 | .cra_name = "cfb(aes)", | ||
331 | .cra_driver_name = "n5_cfb(aes)", | ||
332 | .cra_priority = PRIO, | ||
333 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
334 | .cra_blocksize = AES_BLOCK_SIZE, | ||
335 | .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), | ||
336 | .cra_alignmask = 0, | ||
337 | .cra_module = THIS_MODULE, | ||
338 | }, | ||
339 | .min_keysize = AES_MIN_KEY_SIZE, | ||
340 | .max_keysize = AES_MAX_KEY_SIZE, | ||
341 | .ivsize = AES_BLOCK_SIZE, | ||
342 | .setkey = nitrox_aes_setkey, | ||
343 | .encrypt = nitrox_aes_encrypt, | ||
344 | .decrypt = nitrox_aes_decrypt, | ||
345 | .init = nitrox_skcipher_init, | ||
346 | .exit = nitrox_skcipher_exit, | ||
347 | }, { | ||
348 | .base = { | ||
349 | .cra_name = "xts(aes)", | ||
350 | .cra_driver_name = "n5_xts(aes)", | ||
351 | .cra_priority = PRIO, | ||
352 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
353 | .cra_blocksize = AES_BLOCK_SIZE, | ||
354 | .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), | ||
355 | .cra_alignmask = 0, | ||
356 | .cra_module = THIS_MODULE, | ||
357 | }, | ||
358 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | ||
359 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | ||
360 | .ivsize = AES_BLOCK_SIZE, | ||
361 | .setkey = nitrox_aes_xts_setkey, | ||
362 | .encrypt = nitrox_aes_encrypt, | ||
363 | .decrypt = nitrox_aes_decrypt, | ||
364 | .init = nitrox_skcipher_init, | ||
365 | .exit = nitrox_skcipher_exit, | ||
366 | }, { | ||
367 | .base = { | ||
368 | .cra_name = "rfc3686(ctr(aes))", | ||
369 | .cra_driver_name = "n5_rfc3686(ctr(aes))", | ||
370 | .cra_priority = PRIO, | ||
371 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
372 | .cra_blocksize = 1, | ||
373 | .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), | ||
374 | .cra_alignmask = 0, | ||
375 | .cra_module = THIS_MODULE, | ||
376 | }, | ||
377 | .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, | ||
378 | .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, | ||
379 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
380 | .init = nitrox_skcipher_init, | ||
381 | .exit = nitrox_skcipher_exit, | ||
382 | .setkey = nitrox_aes_ctr_rfc3686_setkey, | ||
383 | .encrypt = nitrox_aes_encrypt, | ||
384 | .decrypt = nitrox_aes_decrypt, | ||
385 | }, { | ||
386 | .base = { | ||
387 | .cra_name = "cts(cbc(aes))", | ||
388 | .cra_driver_name = "n5_cts(cbc(aes))", | ||
389 | .cra_priority = PRIO, | ||
390 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
391 | .cra_blocksize = AES_BLOCK_SIZE, | ||
392 | .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), | ||
393 | .cra_alignmask = 0, | ||
394 | .cra_type = &crypto_ablkcipher_type, | ||
395 | .cra_module = THIS_MODULE, | ||
396 | }, | ||
397 | .min_keysize = AES_MIN_KEY_SIZE, | ||
398 | .max_keysize = AES_MAX_KEY_SIZE, | ||
399 | .ivsize = AES_BLOCK_SIZE, | ||
400 | .setkey = nitrox_aes_setkey, | ||
401 | .encrypt = nitrox_aes_encrypt, | ||
402 | .decrypt = nitrox_aes_decrypt, | ||
403 | .init = nitrox_skcipher_init, | ||
404 | .exit = nitrox_skcipher_exit, | ||
405 | }, { | ||
406 | .base = { | ||
407 | .cra_name = "cbc(des3_ede)", | ||
408 | .cra_driver_name = "n5_cbc(des3_ede)", | ||
409 | .cra_priority = PRIO, | ||
410 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
411 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
412 | .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), | ||
413 | .cra_alignmask = 0, | ||
414 | .cra_module = THIS_MODULE, | ||
415 | }, | ||
416 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
417 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
418 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
419 | .setkey = nitrox_3des_setkey, | ||
420 | .encrypt = nitrox_3des_encrypt, | ||
421 | .decrypt = nitrox_3des_decrypt, | ||
422 | .init = nitrox_skcipher_init, | ||
423 | .exit = nitrox_skcipher_exit, | ||
424 | }, { | ||
425 | .base = { | ||
426 | .cra_name = "ecb(des3_ede)", | ||
427 | .cra_driver_name = "n5_ecb(des3_ede)", | ||
428 | .cra_priority = PRIO, | ||
429 | .cra_flags = CRYPTO_ALG_ASYNC, | ||
430 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
431 | .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), | ||
432 | .cra_alignmask = 0, | ||
433 | .cra_module = THIS_MODULE, | ||
434 | }, | ||
435 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
436 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
437 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
438 | .setkey = nitrox_3des_setkey, | ||
439 | .encrypt = nitrox_3des_encrypt, | ||
440 | .decrypt = nitrox_3des_decrypt, | ||
441 | .init = nitrox_skcipher_init, | ||
442 | .exit = nitrox_skcipher_exit, | ||
443 | } | ||
444 | |||
445 | }; | ||
446 | |||
447 | int nitrox_crypto_register(void) | ||
448 | { | ||
449 | return crypto_register_skciphers(nitrox_skciphers, | ||
450 | ARRAY_SIZE(nitrox_skciphers)); | ||
451 | } | ||
452 | |||
453 | void nitrox_crypto_unregister(void) | ||
454 | { | ||
455 | crypto_unregister_skciphers(nitrox_skciphers, | ||
456 | ARRAY_SIZE(nitrox_skciphers)); | ||
457 | } | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h new file mode 100644 index 000000000000..4888c7823a5f --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_common.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef __NITROX_COMMON_H | ||
2 | #define __NITROX_COMMON_H | ||
3 | |||
4 | #include "nitrox_dev.h" | ||
5 | #include "nitrox_req.h" | ||
6 | |||
7 | int nitrox_crypto_register(void); | ||
8 | void nitrox_crypto_unregister(void); | ||
9 | void *crypto_alloc_context(struct nitrox_device *ndev); | ||
10 | void crypto_free_context(void *ctx); | ||
11 | struct nitrox_device *nitrox_get_first_device(void); | ||
12 | void nitrox_put_device(struct nitrox_device *ndev); | ||
13 | |||
14 | void nitrox_pf_cleanup_isr(struct nitrox_device *ndev); | ||
15 | int nitrox_pf_init_isr(struct nitrox_device *ndev); | ||
16 | |||
17 | int nitrox_common_sw_init(struct nitrox_device *ndev); | ||
18 | void nitrox_common_sw_cleanup(struct nitrox_device *ndev); | ||
19 | |||
20 | void pkt_slc_resp_handler(unsigned long data); | ||
21 | int nitrox_process_se_request(struct nitrox_device *ndev, | ||
22 | struct se_crypto_request *req, | ||
23 | completion_t cb, | ||
24 | struct skcipher_request *skreq); | ||
25 | void backlog_qflush_work(struct work_struct *work); | ||
26 | |||
27 | void nitrox_config_emu_unit(struct nitrox_device *ndev); | ||
28 | void nitrox_config_pkt_input_rings(struct nitrox_device *ndev); | ||
29 | void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev); | ||
30 | void nitrox_config_vfmode(struct nitrox_device *ndev, int mode); | ||
31 | void nitrox_config_nps_unit(struct nitrox_device *ndev); | ||
32 | void nitrox_config_pom_unit(struct nitrox_device *ndev); | ||
33 | void nitrox_config_rand_unit(struct nitrox_device *ndev); | ||
34 | void nitrox_config_efl_unit(struct nitrox_device *ndev); | ||
35 | void nitrox_config_bmi_unit(struct nitrox_device *ndev); | ||
36 | void nitrox_config_bmo_unit(struct nitrox_device *ndev); | ||
37 | void nitrox_config_lbc_unit(struct nitrox_device *ndev); | ||
38 | void invalidate_lbc(struct nitrox_device *ndev); | ||
39 | void enable_pkt_input_ring(struct nitrox_device *ndev, int ring); | ||
40 | void enable_pkt_solicit_port(struct nitrox_device *ndev, int port); | ||
41 | |||
42 | #endif /* __NITROX_COMMON_H */ | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h new file mode 100644 index 000000000000..30b04c4c6076 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h | |||
@@ -0,0 +1,1084 @@ | |||
1 | #ifndef __NITROX_CSR_H | ||
2 | #define __NITROX_CSR_H | ||
3 | |||
4 | #include <asm/byteorder.h> | ||
5 | #include <linux/types.h> | ||
6 | |||
7 | /* EMU clusters */ | ||
8 | #define NR_CLUSTERS 4 | ||
9 | #define AE_CORES_PER_CLUSTER 20 | ||
10 | #define SE_CORES_PER_CLUSTER 16 | ||
11 | |||
12 | /* BIST registers */ | ||
13 | #define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000)) | ||
14 | #define UCD_BIST_STATUS 0x12C0070 | ||
15 | #define NPS_CORE_BIST_REG 0x10000E8 | ||
16 | #define NPS_CORE_NPC_BIST_REG 0x1000128 | ||
17 | #define NPS_PKT_SLC_BIST_REG 0x1040088 | ||
18 | #define NPS_PKT_IN_BIST_REG 0x1040100 | ||
19 | #define POM_BIST_REG 0x11C0100 | ||
20 | #define BMI_BIST_REG 0x1140080 | ||
21 | #define EFL_CORE_BIST_REGX(_i) (0x1240100 + ((_i) * 0x400)) | ||
22 | #define EFL_TOP_BIST_STAT 0x1241090 | ||
23 | #define BMO_BIST_REG 0x1180080 | ||
24 | #define LBC_BIST_STATUS 0x1200020 | ||
25 | #define PEM_BIST_STATUSX(_i) (0x1080468 | ((_i) << 18)) | ||
26 | |||
27 | /* EMU registers */ | ||
28 | #define EMU_SE_ENABLEX(_i) (0x1400000 + ((_i) * 0x40000)) | ||
29 | #define EMU_AE_ENABLEX(_i) (0x1400008 + ((_i) * 0x40000)) | ||
30 | #define EMU_WD_INT_ENA_W1SX(_i) (0x1402318 + ((_i) * 0x40000)) | ||
31 | #define EMU_GE_INT_ENA_W1SX(_i) (0x1402518 + ((_i) * 0x40000)) | ||
32 | #define EMU_FUSE_MAPX(_i) (0x1402708 + ((_i) * 0x40000)) | ||
33 | |||
34 | /* UCD registers */ | ||
35 | #define UCD_UCODE_LOAD_BLOCK_NUM 0x12C0010 | ||
36 | #define UCD_UCODE_LOAD_IDX_DATAX(_i) (0x12C0018 + ((_i) * 0x20)) | ||
37 | #define UCD_SE_EID_UCODE_BLOCK_NUMX(_i) (0x12C0000 + ((_i) * 0x1000)) | ||
38 | |||
39 | /* NPS core registers */ | ||
40 | #define NPS_CORE_GBL_VFCFG 0x1000000 | ||
41 | #define NPS_CORE_CONTROL 0x1000008 | ||
42 | #define NPS_CORE_INT_ACTIVE 0x1000080 | ||
43 | #define NPS_CORE_INT 0x10000A0 | ||
44 | #define NPS_CORE_INT_ENA_W1S 0x10000B8 | ||
45 | #define NPS_STATS_PKT_DMA_RD_CNT 0x1000180 | ||
46 | #define NPS_STATS_PKT_DMA_WR_CNT 0x1000190 | ||
47 | |||
48 | /* NPS packet registers */ | ||
49 | #define NPS_PKT_INT 0x1040018 | ||
50 | #define NPS_PKT_IN_RERR_HI 0x1040108 | ||
51 | #define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120 | ||
52 | #define NPS_PKT_IN_RERR_LO 0x1040128 | ||
53 | #define NPS_PKT_IN_RERR_LO_ENA_W1S 0x1040140 | ||
54 | #define NPS_PKT_IN_ERR_TYPE 0x1040148 | ||
55 | #define NPS_PKT_IN_ERR_TYPE_ENA_W1S 0x1040160 | ||
56 | #define NPS_PKT_IN_INSTR_CTLX(_i) (0x10060 + ((_i) * 0x40000)) | ||
57 | #define NPS_PKT_IN_INSTR_BADDRX(_i) (0x10068 + ((_i) * 0x40000)) | ||
58 | #define NPS_PKT_IN_INSTR_RSIZEX(_i) (0x10070 + ((_i) * 0x40000)) | ||
59 | #define NPS_PKT_IN_DONE_CNTSX(_i) (0x10080 + ((_i) * 0x40000)) | ||
60 | #define NPS_PKT_IN_INSTR_BAOFF_DBELLX(_i) (0x10078 + ((_i) * 0x40000)) | ||
61 | #define NPS_PKT_IN_INT_LEVELSX(_i) (0x10088 + ((_i) * 0x40000)) | ||
62 | |||
63 | #define NPS_PKT_SLC_RERR_HI 0x1040208 | ||
64 | #define NPS_PKT_SLC_RERR_HI_ENA_W1S 0x1040220 | ||
65 | #define NPS_PKT_SLC_RERR_LO 0x1040228 | ||
66 | #define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240 | ||
67 | #define NPS_PKT_SLC_ERR_TYPE 0x1040248 | ||
68 | #define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260 | ||
69 | #define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000)) | ||
70 | #define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000)) | ||
71 | #define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000)) | ||
72 | |||
73 | /* POM registers */ | ||
74 | #define POM_INT_ENA_W1S 0x11C0018 | ||
75 | #define POM_GRP_EXECMASKX(_i) (0x11C1100 | ((_i) * 8)) | ||
76 | #define POM_INT 0x11C0000 | ||
77 | #define POM_PERF_CTL 0x11CC400 | ||
78 | |||
79 | /* BMI registers */ | ||
80 | #define BMI_INT 0x1140000 | ||
81 | #define BMI_CTL 0x1140020 | ||
82 | #define BMI_INT_ENA_W1S 0x1140018 | ||
83 | #define BMI_NPS_PKT_CNT 0x1140070 | ||
84 | |||
85 | /* EFL registers */ | ||
86 | #define EFL_CORE_INT_ENA_W1SX(_i) (0x1240018 + ((_i) * 0x400)) | ||
87 | #define EFL_CORE_VF_ERR_INT0X(_i) (0x1240050 + ((_i) * 0x400)) | ||
88 | #define EFL_CORE_VF_ERR_INT0_ENA_W1SX(_i) (0x1240068 + ((_i) * 0x400)) | ||
89 | #define EFL_CORE_VF_ERR_INT1X(_i) (0x1240070 + ((_i) * 0x400)) | ||
90 | #define EFL_CORE_VF_ERR_INT1_ENA_W1SX(_i) (0x1240088 + ((_i) * 0x400)) | ||
91 | #define EFL_CORE_SE_ERR_INTX(_i) (0x12400A0 + ((_i) * 0x400)) | ||
92 | #define EFL_RNM_CTL_STATUS 0x1241800 | ||
93 | #define EFL_CORE_INTX(_i) (0x1240000 + ((_i) * 0x400)) | ||
94 | |||
95 | /* BMO registers */ | ||
96 | #define BMO_CTL2 0x1180028 | ||
97 | #define BMO_NPS_SLC_PKT_CNT 0x1180078 | ||
98 | |||
99 | /* LBC registers */ | ||
100 | #define LBC_INT 0x1200000 | ||
101 | #define LBC_INVAL_CTL 0x1201010 | ||
102 | #define LBC_PLM_VF1_64_INT 0x1202008 | ||
103 | #define LBC_INVAL_STATUS 0x1202010 | ||
104 | #define LBC_INT_ENA_W1S 0x1203000 | ||
105 | #define LBC_PLM_VF1_64_INT_ENA_W1S 0x1205008 | ||
106 | #define LBC_PLM_VF65_128_INT 0x1206008 | ||
107 | #define LBC_ELM_VF1_64_INT 0x1208000 | ||
108 | #define LBC_PLM_VF65_128_INT_ENA_W1S 0x1209008 | ||
109 | #define LBC_ELM_VF1_64_INT_ENA_W1S 0x120B000 | ||
110 | #define LBC_ELM_VF65_128_INT 0x120C000 | ||
111 | #define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000 | ||
112 | |||
113 | /* PEM registers */ | ||
114 | #define PEM0_INT 0x1080428 | ||
115 | |||
116 | /** | ||
117 | * struct emu_fuse_map - EMU Fuse Map Registers | ||
118 | * @ae_fuse: Fuse settings for AE 19..0 | ||
119 | * @se_fuse: Fuse settings for SE 15..0 | ||
120 | * | ||
121 | * A set bit indicates the unit is fuse disabled. | ||
122 | */ | ||
123 | union emu_fuse_map { | ||
124 | u64 value; | ||
125 | struct { | ||
126 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
127 | u64 valid : 1; | ||
128 | u64 raz_52_62 : 11; | ||
129 | u64 ae_fuse : 20; | ||
130 | u64 raz_16_31 : 16; | ||
131 | u64 se_fuse : 16; | ||
132 | #else | ||
133 | u64 se_fuse : 16; | ||
134 | u64 raz_16_31 : 16; | ||
135 | u64 ae_fuse : 20; | ||
136 | u64 raz_52_62 : 11; | ||
137 | u64 valid : 1; | ||
138 | #endif | ||
139 | } s; | ||
140 | }; | ||
141 | |||
142 | /** | ||
143 | * struct emu_se_enable - Symmetric Engine Enable Registers | ||
144 | * @enable: Individual enables for each of the clusters | ||
145 | * 16 symmetric engines. | ||
146 | */ | ||
147 | union emu_se_enable { | ||
148 | u64 value; | ||
149 | struct { | ||
150 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
151 | u64 raz : 48; | ||
152 | u64 enable : 16; | ||
153 | #else | ||
154 | u64 enable : 16; | ||
155 | u64 raz : 48; | ||
156 | #endif | ||
157 | } s; | ||
158 | }; | ||
159 | |||
160 | /** | ||
161 | * struct emu_ae_enable - EMU Asymmetric engines. | ||
162 | * @enable: Individual enables for each of the cluster's | ||
163 | * 20 Asymmetric Engines. | ||
164 | */ | ||
165 | union emu_ae_enable { | ||
166 | u64 value; | ||
167 | struct { | ||
168 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
169 | u64 raz : 44; | ||
170 | u64 enable : 20; | ||
171 | #else | ||
172 | u64 enable : 20; | ||
173 | u64 raz : 44; | ||
174 | #endif | ||
175 | } s; | ||
176 | }; | ||
177 | |||
178 | /** | ||
179 | * struct emu_wd_int_ena_w1s - EMU Interrupt Enable Registers | ||
180 | * @ae_wd: Reads or sets enable for EMU(0..3)_WD_INT[AE_WD] | ||
181 | * @se_wd: Reads or sets enable for EMU(0..3)_WD_INT[SE_WD] | ||
182 | */ | ||
183 | union emu_wd_int_ena_w1s { | ||
184 | u64 value; | ||
185 | struct { | ||
186 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
187 | u64 raz2 : 12; | ||
188 | u64 ae_wd : 20; | ||
189 | u64 raz1 : 16; | ||
190 | u64 se_wd : 16; | ||
191 | #else | ||
192 | u64 se_wd : 16; | ||
193 | u64 raz1 : 16; | ||
194 | u64 ae_wd : 20; | ||
195 | u64 raz2 : 12; | ||
196 | #endif | ||
197 | } s; | ||
198 | }; | ||
199 | |||
200 | /** | ||
201 | * struct emu_ge_int_ena_w1s - EMU Interrupt Enable set registers | ||
202 | * @ae_ge: Reads or sets enable for EMU(0..3)_GE_INT[AE_GE] | ||
203 | * @se_ge: Reads or sets enable for EMU(0..3)_GE_INT[SE_GE] | ||
204 | */ | ||
205 | union emu_ge_int_ena_w1s { | ||
206 | u64 value; | ||
207 | struct { | ||
208 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
209 | u64 raz_52_63 : 12; | ||
210 | u64 ae_ge : 20; | ||
211 | u64 raz_16_31: 16; | ||
212 | u64 se_ge : 16; | ||
213 | #else | ||
214 | u64 se_ge : 16; | ||
215 | u64 raz_16_31: 16; | ||
216 | u64 ae_ge : 20; | ||
217 | u64 raz_52_63 : 12; | ||
218 | #endif | ||
219 | } s; | ||
220 | }; | ||
221 | |||
222 | /** | ||
223 | * struct nps_pkt_slc_ctl - Solicited Packet Out Control Registers | ||
224 | * @rh: Indicates whether to remove or include the response header | ||
225 | * 1 = Include, 0 = Remove | ||
226 | * @z: If set, 8 trailing 0x00 bytes will be added to the end of the | ||
227 | * outgoing packet. | ||
228 | * @enb: Enable for this port. | ||
229 | */ | ||
230 | union nps_pkt_slc_ctl { | ||
231 | u64 value; | ||
232 | struct { | ||
233 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
234 | u64 raz : 61; | ||
235 | u64 rh : 1; | ||
236 | u64 z : 1; | ||
237 | u64 enb : 1; | ||
238 | #else | ||
239 | u64 enb : 1; | ||
240 | u64 z : 1; | ||
241 | u64 rh : 1; | ||
242 | u64 raz : 61; | ||
243 | #endif | ||
244 | } s; | ||
245 | }; | ||
246 | |||
247 | /** | ||
248 | * struct nps_pkt_slc_cnts - Solicited Packet Out Count Registers | ||
249 | * @slc_int: Returns a 1 when: | ||
250 | * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or | ||
251 | * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET]. | ||
252 | * To clear the bit, the CNTS register must be written to clear. | ||
253 | * @in_int: Returns a 1 when: | ||
254 | * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]. | ||
255 | * To clear the bit, the DONE_CNTS register must be written to clear. | ||
256 | * @mbox_int: Returns a 1 when: | ||
257 | * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set. To clear the bit, | ||
258 | * write NPS_PKT_MBOX_PF_VF(i)_INT[INTR] with 1. | ||
259 | * @timer: Timer, incremented every 2048 coprocessor clock cycles | ||
260 | * when [CNT] is not zero. The hardware clears both [TIMER] and | ||
261 | * [INT] when [CNT] goes to 0. | ||
262 | * @cnt: Packet counter. Hardware adds to [CNT] as it sends packets out. | ||
263 | * On a write to this CSR, hardware subtracts the amount written to the | ||
264 | * [CNT] field from [CNT]. | ||
265 | */ | ||
266 | union nps_pkt_slc_cnts { | ||
267 | u64 value; | ||
268 | struct { | ||
269 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
270 | u64 slc_int : 1; | ||
271 | u64 uns_int : 1; | ||
272 | u64 in_int : 1; | ||
273 | u64 mbox_int : 1; | ||
274 | u64 resend : 1; | ||
275 | u64 raz : 5; | ||
276 | u64 timer : 22; | ||
277 | u64 cnt : 32; | ||
278 | #else | ||
279 | u64 cnt : 32; | ||
280 | u64 timer : 22; | ||
281 | u64 raz : 5; | ||
282 | u64 resend : 1; | ||
283 | u64 mbox_int : 1; | ||
284 | u64 in_int : 1; | ||
285 | u64 uns_int : 1; | ||
286 | u64 slc_int : 1; | ||
287 | #endif | ||
288 | } s; | ||
289 | }; | ||
290 | |||
291 | /** | ||
292 | * struct nps_pkt_slc_int_levels - Solicited Packet Out Interrupt Levels | ||
293 | * Registers. | ||
294 | * @bmode: Determines whether NPS_PKT_SLC_CNTS[CNT] is a byte or | ||
295 | * packet counter. | ||
296 | * @timet: Output port counter time interrupt threshold. | ||
297 | * @cnt: Output port counter interrupt threshold. | ||
298 | */ | ||
299 | union nps_pkt_slc_int_levels { | ||
300 | u64 value; | ||
301 | struct { | ||
302 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
303 | u64 bmode : 1; | ||
304 | u64 raz : 9; | ||
305 | u64 timet : 22; | ||
306 | u64 cnt : 32; | ||
307 | #else | ||
308 | u64 cnt : 32; | ||
309 | u64 timet : 22; | ||
310 | u64 raz : 9; | ||
311 | u64 bmode : 1; | ||
312 | #endif | ||
313 | } s; | ||
314 | }; | ||
315 | |||
316 | /** | ||
317 | * struct nps_pkt_inst - NPS Packet Interrupt Register | ||
318 | * @in_err: Set when any NPS_PKT_IN_RERR_HI/LO bit and | ||
319 | * corresponding NPS_PKT_IN_RERR_*_ENA_* bit are bot set. | ||
320 | * @uns_err: Set when any NSP_PKT_UNS_RERR_HI/LO bit and | ||
321 | * corresponding NPS_PKT_UNS_RERR_*_ENA_* bit are both set. | ||
322 | * @slc_er: Set when any NSP_PKT_SLC_RERR_HI/LO bit and | ||
323 | * corresponding NPS_PKT_SLC_RERR_*_ENA_* bit are both set. | ||
324 | */ | ||
325 | union nps_pkt_int { | ||
326 | u64 value; | ||
327 | struct { | ||
328 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
329 | u64 raz : 54; | ||
330 | u64 uns_wto : 1; | ||
331 | u64 in_err : 1; | ||
332 | u64 uns_err : 1; | ||
333 | u64 slc_err : 1; | ||
334 | u64 in_dbe : 1; | ||
335 | u64 in_sbe : 1; | ||
336 | u64 uns_dbe : 1; | ||
337 | u64 uns_sbe : 1; | ||
338 | u64 slc_dbe : 1; | ||
339 | u64 slc_sbe : 1; | ||
340 | #else | ||
341 | u64 slc_sbe : 1; | ||
342 | u64 slc_dbe : 1; | ||
343 | u64 uns_sbe : 1; | ||
344 | u64 uns_dbe : 1; | ||
345 | u64 in_sbe : 1; | ||
346 | u64 in_dbe : 1; | ||
347 | u64 slc_err : 1; | ||
348 | u64 uns_err : 1; | ||
349 | u64 in_err : 1; | ||
350 | u64 uns_wto : 1; | ||
351 | u64 raz : 54; | ||
352 | #endif | ||
353 | } s; | ||
354 | }; | ||
355 | |||
356 | /** | ||
357 | * struct nps_pkt_in_done_cnts - Input instruction ring counts registers | ||
358 | * @slc_cnt: Returns a 1 when: | ||
359 | * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or | ||
360 | * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SCL(i)_INT_LEVELS[TIMET] | ||
361 | * To clear the bit, the CNTS register must be | ||
362 | * written to clear the underlying condition | ||
363 | * @uns_int: Return a 1 when: | ||
364 | * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT], or | ||
365 | * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET] | ||
366 | * To clear the bit, the CNTS register must be | ||
367 | * written to clear the underlying condition | ||
368 | * @in_int: Returns a 1 when: | ||
369 | * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT] | ||
370 | * To clear the bit, the DONE_CNTS register | ||
371 | * must be written to clear the underlying condition | ||
372 | * @mbox_int: Returns a 1 when: | ||
373 | * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set. | ||
374 | * To clear the bit, write NPS_PKT_MBOX_PF_VF(i)_INT[INTR] | ||
375 | * with 1. | ||
376 | * @resend: A write of 1 will resend an MSI-X interrupt message if any | ||
377 | * of the following conditions are true for this ring "i". | ||
378 | * NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT] | ||
379 | * NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET] | ||
380 | * NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT] | ||
381 | * NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET] | ||
382 | * NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT] | ||
383 | * NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set | ||
384 | * @cnt: Packet counter. Hardware adds to [CNT] as it reads | ||
385 | * packets. On a write to this CSR, hardware substracts the | ||
386 | * amount written to the [CNT] field from [CNT], which will | ||
387 | * clear PKT_IN(i)_INT_STATUS[INTR] if [CNT] becomes <= | ||
388 | * NPS_PKT_IN(i)_INT_LEVELS[CNT]. This register should be | ||
389 | * cleared before enabling a ring by reading the current | ||
390 | * value and writing it back. | ||
391 | */ | ||
392 | union nps_pkt_in_done_cnts { | ||
393 | u64 value; | ||
394 | struct { | ||
395 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
396 | u64 slc_int : 1; | ||
397 | u64 uns_int : 1; | ||
398 | u64 in_int : 1; | ||
399 | u64 mbox_int : 1; | ||
400 | u64 resend : 1; | ||
401 | u64 raz : 27; | ||
402 | u64 cnt : 32; | ||
403 | #else | ||
404 | u64 cnt : 32; | ||
405 | u64 raz : 27; | ||
406 | u64 resend : 1; | ||
407 | u64 mbox_int : 1; | ||
408 | u64 in_int : 1; | ||
409 | u64 uns_int : 1; | ||
410 | u64 slc_int : 1; | ||
411 | #endif | ||
412 | } s; | ||
413 | }; | ||
414 | |||
415 | /** | ||
416 | * struct nps_pkt_in_instr_ctl - Input Instruction Ring Control Registers. | ||
417 | * @is64b: If 1, the ring uses 64-byte instructions. If 0, the | ||
418 | * ring uses 32-byte instructions. | ||
419 | * @enb: Enable for the input ring. | ||
420 | */ | ||
421 | union nps_pkt_in_instr_ctl { | ||
422 | u64 value; | ||
423 | struct { | ||
424 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
425 | u64 raz : 62; | ||
426 | u64 is64b : 1; | ||
427 | u64 enb : 1; | ||
428 | #else | ||
429 | u64 enb : 1; | ||
430 | u64 is64b : 1; | ||
431 | u64 raz : 62; | ||
432 | #endif | ||
433 | } s; | ||
434 | }; | ||
435 | |||
436 | /** | ||
437 | * struct nps_pkt_in_instr_rsize - Input instruction ring size registers | ||
438 | * @rsize: Ring size (number of instructions) | ||
439 | */ | ||
440 | union nps_pkt_in_instr_rsize { | ||
441 | u64 value; | ||
442 | struct { | ||
443 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
444 | u64 raz : 32; | ||
445 | u64 rsize : 32; | ||
446 | #else | ||
447 | u64 rsize : 32; | ||
448 | u64 raz : 32; | ||
449 | #endif | ||
450 | } s; | ||
451 | }; | ||
452 | |||
453 | /** | ||
454 | * struct nps_pkt_in_instr_baoff_dbell - Input instruction ring | ||
455 | * base address offset and doorbell registers | ||
456 | * @aoff: Address offset. The offset from the NPS_PKT_IN_INSTR_BADDR | ||
457 | * where the next pointer is read. | ||
458 | * @dbell: Pointer list doorbell count. Write operations to this field | ||
459 | * increments the present value here. Read operations return the | ||
460 | * present value. | ||
461 | */ | ||
462 | union nps_pkt_in_instr_baoff_dbell { | ||
463 | u64 value; | ||
464 | struct { | ||
465 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
466 | u64 aoff : 32; | ||
467 | u64 dbell : 32; | ||
468 | #else | ||
469 | u64 dbell : 32; | ||
470 | u64 aoff : 32; | ||
471 | #endif | ||
472 | } s; | ||
473 | }; | ||
474 | |||
475 | /** | ||
476 | * struct nps_core_int_ena_w1s - NPS core interrupt enable set register | ||
477 | * @host_nps_wr_err: Reads or sets enable for | ||
478 | * NPS_CORE_INT[HOST_NPS_WR_ERR]. | ||
479 | * @npco_dma_malform: Reads or sets enable for | ||
480 | * NPS_CORE_INT[NPCO_DMA_MALFORM]. | ||
481 | * @exec_wr_timeout: Reads or sets enable for | ||
482 | * NPS_CORE_INT[EXEC_WR_TIMEOUT]. | ||
483 | * @host_wr_timeout: Reads or sets enable for | ||
484 | * NPS_CORE_INT[HOST_WR_TIMEOUT]. | ||
485 | * @host_wr_err: Reads or sets enable for | ||
486 | * NPS_CORE_INT[HOST_WR_ERR] | ||
487 | */ | ||
488 | union nps_core_int_ena_w1s { | ||
489 | u64 value; | ||
490 | struct { | ||
491 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
492 | u64 raz4 : 55; | ||
493 | u64 host_nps_wr_err : 1; | ||
494 | u64 npco_dma_malform : 1; | ||
495 | u64 exec_wr_timeout : 1; | ||
496 | u64 host_wr_timeout : 1; | ||
497 | u64 host_wr_err : 1; | ||
498 | u64 raz3 : 1; | ||
499 | u64 raz2 : 1; | ||
500 | u64 raz1 : 1; | ||
501 | u64 raz0 : 1; | ||
502 | #else | ||
503 | u64 raz0 : 1; | ||
504 | u64 raz1 : 1; | ||
505 | u64 raz2 : 1; | ||
506 | u64 raz3 : 1; | ||
507 | u64 host_wr_err : 1; | ||
508 | u64 host_wr_timeout : 1; | ||
509 | u64 exec_wr_timeout : 1; | ||
510 | u64 npco_dma_malform : 1; | ||
511 | u64 host_nps_wr_err : 1; | ||
512 | u64 raz4 : 55; | ||
513 | #endif | ||
514 | } s; | ||
515 | }; | ||
516 | |||
517 | /** | ||
518 | * struct nps_core_gbl_vfcfg - Global VF Configuration Register. | ||
519 | * @ilk_disable: When set, this bit indicates that the ILK interface has | ||
520 | * been disabled. | ||
521 | * @obaf: BMO allocation control | ||
522 | * 0 = allocate per queue | ||
523 | * 1 = allocate per VF | ||
524 | * @ibaf: BMI allocation control | ||
525 | * 0 = allocate per queue | ||
526 | * 1 = allocate per VF | ||
527 | * @zaf: ZIP allocation control | ||
528 | * 0 = allocate per queue | ||
529 | * 1 = allocate per VF | ||
530 | * @aeaf: AE allocation control | ||
531 | * 0 = allocate per queue | ||
532 | * 1 = allocate per VF | ||
533 | * @seaf: SE allocation control | ||
534 | * 0 = allocation per queue | ||
535 | * 1 = allocate per VF | ||
536 | * @cfg: VF/PF mode. | ||
537 | */ | ||
538 | union nps_core_gbl_vfcfg { | ||
539 | u64 value; | ||
540 | struct { | ||
541 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
542 | u64 raz :55; | ||
543 | u64 ilk_disable :1; | ||
544 | u64 obaf :1; | ||
545 | u64 ibaf :1; | ||
546 | u64 zaf :1; | ||
547 | u64 aeaf :1; | ||
548 | u64 seaf :1; | ||
549 | u64 cfg :3; | ||
550 | #else | ||
551 | u64 cfg :3; | ||
552 | u64 seaf :1; | ||
553 | u64 aeaf :1; | ||
554 | u64 zaf :1; | ||
555 | u64 ibaf :1; | ||
556 | u64 obaf :1; | ||
557 | u64 ilk_disable :1; | ||
558 | u64 raz :55; | ||
559 | #endif | ||
560 | } s; | ||
561 | }; | ||
562 | |||
563 | /** | ||
564 | * struct nps_core_int_active - NPS Core Interrupt Active Register | ||
565 | * @resend: Resend MSI-X interrupt if needs to handle interrupts | ||
566 | * Sofware can set this bit and then exit the ISR. | ||
567 | * @ocla: Set when any OCLA(0)_INT and corresponding OCLA(0_INT_ENA_W1C | ||
568 | * bit are set | ||
569 | * @mbox: Set when any NPS_PKT_MBOX_INT_LO/HI and corresponding | ||
570 | * NPS_PKT_MBOX_INT_LO_ENA_W1C/HI_ENA_W1C bits are set | ||
571 | * @emu: bit i is set in [EMU] when any EMU(i)_INT bit is set | ||
572 | * @bmo: Set when any BMO_INT bit is set | ||
573 | * @bmi: Set when any BMI_INT bit is set or when any non-RO | ||
574 | * BMI_INT and corresponding BMI_INT_ENA_W1C bits are both set | ||
575 | * @aqm: Set when any AQM_INT bit is set | ||
576 | * @zqm: Set when any ZQM_INT bit is set | ||
577 | * @efl: Set when any EFL_INT RO bit is set or when any non-RO EFL_INT | ||
578 | * and corresponding EFL_INT_ENA_W1C bits are both set | ||
579 | * @ilk: Set when any ILK_INT bit is set | ||
580 | * @lbc: Set when any LBC_INT RO bit is set or when any non-RO LBC_INT | ||
581 | * and corresponding LBC_INT_ENA_W1C bits are bot set | ||
582 | * @pem: Set when any PEM(0)_INT RO bit is set or when any non-RO | ||
583 | * PEM(0)_INT and corresponding PEM(0)_INT_ENA_W1C bit are both set | ||
584 | * @ucd: Set when any UCD_INT bit is set | ||
585 | * @zctl: Set when any ZIP_INT RO bit is set or when any non-RO ZIP_INT | ||
586 | * and corresponding ZIP_INT_ENA_W1C bits are both set | ||
587 | * @lbm: Set when any LBM_INT bit is set | ||
588 | * @nps_pkt: Set when any NPS_PKT_INT bit is set | ||
589 | * @nps_core: Set when any NPS_CORE_INT RO bit is set or when non-RO | ||
590 | * NPS_CORE_INT and corresponding NSP_CORE_INT_ENA_W1C bits are both set | ||
591 | */ | ||
592 | union nps_core_int_active { | ||
593 | u64 value; | ||
594 | struct { | ||
595 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
596 | u64 resend : 1; | ||
597 | u64 raz : 43; | ||
598 | u64 ocla : 1; | ||
599 | u64 mbox : 1; | ||
600 | u64 emu : 4; | ||
601 | u64 bmo : 1; | ||
602 | u64 bmi : 1; | ||
603 | u64 aqm : 1; | ||
604 | u64 zqm : 1; | ||
605 | u64 efl : 1; | ||
606 | u64 ilk : 1; | ||
607 | u64 lbc : 1; | ||
608 | u64 pem : 1; | ||
609 | u64 pom : 1; | ||
610 | u64 ucd : 1; | ||
611 | u64 zctl : 1; | ||
612 | u64 lbm : 1; | ||
613 | u64 nps_pkt : 1; | ||
614 | u64 nps_core : 1; | ||
615 | #else | ||
616 | u64 nps_core : 1; | ||
617 | u64 nps_pkt : 1; | ||
618 | u64 lbm : 1; | ||
619 | u64 zctl: 1; | ||
620 | u64 ucd : 1; | ||
621 | u64 pom : 1; | ||
622 | u64 pem : 1; | ||
623 | u64 lbc : 1; | ||
624 | u64 ilk : 1; | ||
625 | u64 efl : 1; | ||
626 | u64 zqm : 1; | ||
627 | u64 aqm : 1; | ||
628 | u64 bmi : 1; | ||
629 | u64 bmo : 1; | ||
630 | u64 emu : 4; | ||
631 | u64 mbox : 1; | ||
632 | u64 ocla : 1; | ||
633 | u64 raz : 43; | ||
634 | u64 resend : 1; | ||
635 | #endif | ||
636 | } s; | ||
637 | }; | ||
638 | |||
639 | /** | ||
640 | * struct efl_core_int - EFL Interrupt Registers | ||
641 | * @epci_decode_err: EPCI decoded a transacation that was unknown | ||
642 | * This error should only occurred when there is a micrcode/SE error | ||
643 | * and should be considered fatal | ||
644 | * @ae_err: An AE uncorrectable error occurred. | ||
645 | * See EFL_CORE(0..3)_AE_ERR_INT | ||
646 | * @se_err: An SE uncorrectable error occurred. | ||
647 | * See EFL_CORE(0..3)_SE_ERR_INT | ||
648 | * @dbe: Double-bit error occurred in EFL | ||
649 | * @sbe: Single-bit error occurred in EFL | ||
650 | * @d_left: Asserted when new POM-Header-BMI-data is | ||
651 | * being sent to an Exec, and that Exec has Not read all BMI | ||
652 | * data associated with the previous POM header | ||
653 | * @len_ovr: Asserted when an Exec-Read is issued that is more than | ||
654 | * 14 greater in length that the BMI data left to be read | ||
655 | */ | ||
656 | union efl_core_int { | ||
657 | u64 value; | ||
658 | struct { | ||
659 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
660 | u64 raz : 57; | ||
661 | u64 epci_decode_err : 1; | ||
662 | u64 ae_err : 1; | ||
663 | u64 se_err : 1; | ||
664 | u64 dbe : 1; | ||
665 | u64 sbe : 1; | ||
666 | u64 d_left : 1; | ||
667 | u64 len_ovr : 1; | ||
668 | #else | ||
669 | u64 len_ovr : 1; | ||
670 | u64 d_left : 1; | ||
671 | u64 sbe : 1; | ||
672 | u64 dbe : 1; | ||
673 | u64 se_err : 1; | ||
674 | u64 ae_err : 1; | ||
675 | u64 epci_decode_err : 1; | ||
676 | u64 raz : 57; | ||
677 | #endif | ||
678 | } s; | ||
679 | }; | ||
680 | |||
681 | /** | ||
682 | * struct efl_core_int_ena_w1s - EFL core interrupt enable set register | ||
683 | * @epci_decode_err: Reads or sets enable for | ||
684 | * EFL_CORE(0..3)_INT[EPCI_DECODE_ERR]. | ||
685 | * @d_left: Reads or sets enable for | ||
686 | * EFL_CORE(0..3)_INT[D_LEFT]. | ||
687 | * @len_ovr: Reads or sets enable for | ||
688 | * EFL_CORE(0..3)_INT[LEN_OVR]. | ||
689 | */ | ||
690 | union efl_core_int_ena_w1s { | ||
691 | u64 value; | ||
692 | struct { | ||
693 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
694 | u64 raz_7_63 : 57; | ||
695 | u64 epci_decode_err : 1; | ||
696 | u64 raz_2_5 : 4; | ||
697 | u64 d_left : 1; | ||
698 | u64 len_ovr : 1; | ||
699 | #else | ||
700 | u64 len_ovr : 1; | ||
701 | u64 d_left : 1; | ||
702 | u64 raz_2_5 : 4; | ||
703 | u64 epci_decode_err : 1; | ||
704 | u64 raz_7_63 : 57; | ||
705 | #endif | ||
706 | } s; | ||
707 | }; | ||
708 | |||
709 | /** | ||
710 | * struct efl_rnm_ctl_status - RNM Control and Status Register | ||
711 | * @ent_sel: Select input to RNM FIFO | ||
712 | * @exp_ent: Exported entropy enable for random number generator | ||
713 | * @rng_rst: Reset to RNG. Setting this bit to 1 cancels the generation | ||
714 | * of the current random number. | ||
715 | * @rnm_rst: Reset the RNM. Setting this bit to 1 clears all sorted numbers | ||
716 | * in the random number memory. | ||
717 | * @rng_en: Enabled the output of the RNG. | ||
718 | * @ent_en: Entropy enable for random number generator. | ||
719 | */ | ||
720 | union efl_rnm_ctl_status { | ||
721 | u64 value; | ||
722 | struct { | ||
723 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
724 | u64 raz_9_63 : 55; | ||
725 | u64 ent_sel : 4; | ||
726 | u64 exp_ent : 1; | ||
727 | u64 rng_rst : 1; | ||
728 | u64 rnm_rst : 1; | ||
729 | u64 rng_en : 1; | ||
730 | u64 ent_en : 1; | ||
731 | #else | ||
732 | u64 ent_en : 1; | ||
733 | u64 rng_en : 1; | ||
734 | u64 rnm_rst : 1; | ||
735 | u64 rng_rst : 1; | ||
736 | u64 exp_ent : 1; | ||
737 | u64 ent_sel : 4; | ||
738 | u64 raz_9_63 : 55; | ||
739 | #endif | ||
740 | } s; | ||
741 | }; | ||
742 | |||
743 | /** | ||
744 | * struct bmi_ctl - BMI control register | ||
745 | * @ilk_hdrq_thrsh: Maximum number of header queue locations | ||
746 | * that ILK packets may consume. When the threshold is | ||
747 | * exceeded ILK_XOFF is sent to the BMI_X2P_ARB. | ||
748 | * @nps_hdrq_thrsh: Maximum number of header queue locations | ||
749 | * that NPS packets may consume. When the threshold is | ||
750 | * exceeded NPS_XOFF is sent to the BMI_X2P_ARB. | ||
751 | * @totl_hdrq_thrsh: Maximum number of header queue locations | ||
752 | * that the sum of ILK and NPS packets may consume. | ||
753 | * @ilk_free_thrsh: Maximum number of buffers that ILK packet | ||
754 | * flows may consume before ILK_XOFF is sent to the BMI_X2P_ARB. | ||
755 | * @nps_free_thrsh: Maximum number of buffers that NPS packet | ||
756 | * flows may consume before NPS XOFF is sent to the BMI_X2p_ARB. | ||
757 | * @totl_free_thrsh: Maximum number of buffers that bot ILK and NPS | ||
758 | * packet flows may consume before both NPS_XOFF and ILK_XOFF | ||
759 | * are asserted to the BMI_X2P_ARB. | ||
760 | * @max_pkt_len: Maximum packet length, integral number of 256B | ||
761 | * buffers. | ||
762 | */ | ||
763 | union bmi_ctl { | ||
764 | u64 value; | ||
765 | struct { | ||
766 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
767 | u64 raz_56_63 : 8; | ||
768 | u64 ilk_hdrq_thrsh : 8; | ||
769 | u64 nps_hdrq_thrsh : 8; | ||
770 | u64 totl_hdrq_thrsh : 8; | ||
771 | u64 ilk_free_thrsh : 8; | ||
772 | u64 nps_free_thrsh : 8; | ||
773 | u64 totl_free_thrsh : 8; | ||
774 | u64 max_pkt_len : 8; | ||
775 | #else | ||
776 | u64 max_pkt_len : 8; | ||
777 | u64 totl_free_thrsh : 8; | ||
778 | u64 nps_free_thrsh : 8; | ||
779 | u64 ilk_free_thrsh : 8; | ||
780 | u64 totl_hdrq_thrsh : 8; | ||
781 | u64 nps_hdrq_thrsh : 8; | ||
782 | u64 ilk_hdrq_thrsh : 8; | ||
783 | u64 raz_56_63 : 8; | ||
784 | #endif | ||
785 | } s; | ||
786 | }; | ||
787 | |||
788 | /** | ||
789 | * struct bmi_int_ena_w1s - BMI interrupt enable set register | ||
790 | * @ilk_req_oflw: Reads or sets enable for | ||
791 | * BMI_INT[ILK_REQ_OFLW]. | ||
792 | * @nps_req_oflw: Reads or sets enable for | ||
793 | * BMI_INT[NPS_REQ_OFLW]. | ||
794 | * @fpf_undrrn: Reads or sets enable for | ||
795 | * BMI_INT[FPF_UNDRRN]. | ||
796 | * @eop_err_ilk: Reads or sets enable for | ||
797 | * BMI_INT[EOP_ERR_ILK]. | ||
798 | * @eop_err_nps: Reads or sets enable for | ||
799 | * BMI_INT[EOP_ERR_NPS]. | ||
800 | * @sop_err_ilk: Reads or sets enable for | ||
801 | * BMI_INT[SOP_ERR_ILK]. | ||
802 | * @sop_err_nps: Reads or sets enable for | ||
803 | * BMI_INT[SOP_ERR_NPS]. | ||
804 | * @pkt_rcv_err_ilk: Reads or sets enable for | ||
805 | * BMI_INT[PKT_RCV_ERR_ILK]. | ||
806 | * @pkt_rcv_err_nps: Reads or sets enable for | ||
807 | * BMI_INT[PKT_RCV_ERR_NPS]. | ||
808 | * @max_len_err_ilk: Reads or sets enable for | ||
809 | * BMI_INT[MAX_LEN_ERR_ILK]. | ||
810 | * @max_len_err_nps: Reads or sets enable for | ||
811 | * BMI_INT[MAX_LEN_ERR_NPS]. | ||
812 | */ | ||
813 | union bmi_int_ena_w1s { | ||
814 | u64 value; | ||
815 | struct { | ||
816 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
817 | u64 raz_13_63 : 51; | ||
818 | u64 ilk_req_oflw : 1; | ||
819 | u64 nps_req_oflw : 1; | ||
820 | u64 raz_10 : 1; | ||
821 | u64 raz_9 : 1; | ||
822 | u64 fpf_undrrn : 1; | ||
823 | u64 eop_err_ilk : 1; | ||
824 | u64 eop_err_nps : 1; | ||
825 | u64 sop_err_ilk : 1; | ||
826 | u64 sop_err_nps : 1; | ||
827 | u64 pkt_rcv_err_ilk : 1; | ||
828 | u64 pkt_rcv_err_nps : 1; | ||
829 | u64 max_len_err_ilk : 1; | ||
830 | u64 max_len_err_nps : 1; | ||
831 | #else | ||
832 | u64 max_len_err_nps : 1; | ||
833 | u64 max_len_err_ilk : 1; | ||
834 | u64 pkt_rcv_err_nps : 1; | ||
835 | u64 pkt_rcv_err_ilk : 1; | ||
836 | u64 sop_err_nps : 1; | ||
837 | u64 sop_err_ilk : 1; | ||
838 | u64 eop_err_nps : 1; | ||
839 | u64 eop_err_ilk : 1; | ||
840 | u64 fpf_undrrn : 1; | ||
841 | u64 raz_9 : 1; | ||
842 | u64 raz_10 : 1; | ||
843 | u64 nps_req_oflw : 1; | ||
844 | u64 ilk_req_oflw : 1; | ||
845 | u64 raz_13_63 : 51; | ||
846 | #endif | ||
847 | } s; | ||
848 | }; | ||
849 | |||
850 | /** | ||
851 | * struct bmo_ctl2 - BMO Control2 Register | ||
852 | * @arb_sel: Determines P2X Arbitration | ||
853 | * @ilk_buf_thrsh: Maximum number of buffers that the | ||
854 | * ILK packet flows may consume before ILK XOFF is | ||
855 | * asserted to the POM. | ||
856 | * @nps_slc_buf_thrsh: Maximum number of buffers that the | ||
857 | * NPS_SLC packet flow may consume before NPS_SLC XOFF is | ||
858 | * asserted to the POM. | ||
859 | * @nps_uns_buf_thrsh: Maximum number of buffers that the | ||
860 | * NPS_UNS packet flow may consume before NPS_UNS XOFF is | ||
861 | * asserted to the POM. | ||
862 | * @totl_buf_thrsh: Maximum number of buffers that ILK, NPS_UNS and | ||
863 | * NPS_SLC packet flows may consume before NPS_UNS XOFF, NSP_SLC and | ||
864 | * ILK_XOFF are all asserted POM. | ||
865 | */ | ||
866 | union bmo_ctl2 { | ||
867 | u64 value; | ||
868 | struct { | ||
869 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
870 | u64 arb_sel : 1; | ||
871 | u64 raz_32_62 : 31; | ||
872 | u64 ilk_buf_thrsh : 8; | ||
873 | u64 nps_slc_buf_thrsh : 8; | ||
874 | u64 nps_uns_buf_thrsh : 8; | ||
875 | u64 totl_buf_thrsh : 8; | ||
876 | #else | ||
877 | u64 totl_buf_thrsh : 8; | ||
878 | u64 nps_uns_buf_thrsh : 8; | ||
879 | u64 nps_slc_buf_thrsh : 8; | ||
880 | u64 ilk_buf_thrsh : 8; | ||
881 | u64 raz_32_62 : 31; | ||
882 | u64 arb_sel : 1; | ||
883 | #endif | ||
884 | } s; | ||
885 | }; | ||
886 | |||
887 | /** | ||
888 | * struct pom_int_ena_w1s - POM interrupt enable set register | ||
889 | * @illegal_intf: Reads or sets enable for POM_INT[ILLEGAL_INTF]. | ||
890 | * @illegal_dport: Reads or sets enable for POM_INT[ILLEGAL_DPORT]. | ||
891 | */ | ||
892 | union pom_int_ena_w1s { | ||
893 | u64 value; | ||
894 | struct { | ||
895 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
896 | u64 raz2 : 60; | ||
897 | u64 illegal_intf : 1; | ||
898 | u64 illegal_dport : 1; | ||
899 | u64 raz1 : 1; | ||
900 | u64 raz0 : 1; | ||
901 | #else | ||
902 | u64 raz0 : 1; | ||
903 | u64 raz1 : 1; | ||
904 | u64 illegal_dport : 1; | ||
905 | u64 illegal_intf : 1; | ||
906 | u64 raz2 : 60; | ||
907 | #endif | ||
908 | } s; | ||
909 | }; | ||
910 | |||
911 | /** | ||
912 | * struct lbc_inval_ctl - LBC invalidation control register | ||
913 | * @wait_timer: Wait timer for wait state. [WAIT_TIMER] must | ||
914 | * always be written with its reset value. | ||
915 | * @cam_inval_start: Software should write [CAM_INVAL_START]=1 | ||
916 | * to initiate an LBC cache invalidation. After this, software | ||
917 | * should read LBC_INVAL_STATUS until LBC_INVAL_STATUS[DONE] is set. | ||
918 | * LBC hardware clears [CAVM_INVAL_START] before software can | ||
919 | * observed LBC_INVAL_STATUS[DONE] to be set | ||
920 | */ | ||
921 | union lbc_inval_ctl { | ||
922 | u64 value; | ||
923 | struct { | ||
924 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
925 | u64 raz2 : 48; | ||
926 | u64 wait_timer : 8; | ||
927 | u64 raz1 : 6; | ||
928 | u64 cam_inval_start : 1; | ||
929 | u64 raz0 : 1; | ||
930 | #else | ||
931 | u64 raz0 : 1; | ||
932 | u64 cam_inval_start : 1; | ||
933 | u64 raz1 : 6; | ||
934 | u64 wait_timer : 8; | ||
935 | u64 raz2 : 48; | ||
936 | #endif | ||
937 | } s; | ||
938 | }; | ||
939 | |||
940 | /** | ||
941 | * struct lbc_int_ena_w1s - LBC interrupt enable set register | ||
942 | * @cam_hard_err: Reads or sets enable for LBC_INT[CAM_HARD_ERR]. | ||
943 | * @cam_inval_abort: Reads or sets enable for LBC_INT[CAM_INVAL_ABORT]. | ||
944 | * @over_fetch_err: Reads or sets enable for LBC_INT[OVER_FETCH_ERR]. | ||
945 | * @cache_line_to_err: Reads or sets enable for | ||
946 | * LBC_INT[CACHE_LINE_TO_ERR]. | ||
947 | * @cam_soft_err: Reads or sets enable for | ||
948 | * LBC_INT[CAM_SOFT_ERR]. | ||
949 | * @dma_rd_err: Reads or sets enable for | ||
950 | * LBC_INT[DMA_RD_ERR]. | ||
951 | */ | ||
952 | union lbc_int_ena_w1s { | ||
953 | u64 value; | ||
954 | struct { | ||
955 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
956 | u64 raz_10_63 : 54; | ||
957 | u64 cam_hard_err : 1; | ||
958 | u64 cam_inval_abort : 1; | ||
959 | u64 over_fetch_err : 1; | ||
960 | u64 cache_line_to_err : 1; | ||
961 | u64 raz_2_5 : 4; | ||
962 | u64 cam_soft_err : 1; | ||
963 | u64 dma_rd_err : 1; | ||
964 | #else | ||
965 | u64 dma_rd_err : 1; | ||
966 | u64 cam_soft_err : 1; | ||
967 | u64 raz_2_5 : 4; | ||
968 | u64 cache_line_to_err : 1; | ||
969 | u64 over_fetch_err : 1; | ||
970 | u64 cam_inval_abort : 1; | ||
971 | u64 cam_hard_err : 1; | ||
972 | u64 raz_10_63 : 54; | ||
973 | #endif | ||
974 | } s; | ||
975 | }; | ||
976 | |||
977 | /** | ||
978 | * struct lbc_int - LBC interrupt summary register | ||
979 | * @cam_hard_err: indicates a fatal hardware error. | ||
980 | * It requires system reset. | ||
981 | * When [CAM_HARD_ERR] is set, LBC stops logging any new information in | ||
982 | * LBC_POM_MISS_INFO_LOG, | ||
983 | * LBC_POM_MISS_ADDR_LOG, | ||
984 | * LBC_EFL_MISS_INFO_LOG, and | ||
985 | * LBC_EFL_MISS_ADDR_LOG. | ||
986 | * Software should sample them. | ||
987 | * @cam_inval_abort: indicates a fatal hardware error. | ||
988 | * System reset is required. | ||
989 | * @over_fetch_err: indicates a fatal hardware error | ||
990 | * System reset is required | ||
991 | * @cache_line_to_err: is a debug feature. | ||
992 | * This timeout interrupt bit tells the software that | ||
993 | * a cacheline in LBC has non-zero usage and the context | ||
994 | * has not been used for greater than the | ||
995 | * LBC_TO_CNT[TO_CNT] time interval. | ||
996 | * @sbe: Memory SBE error. This is recoverable via ECC. | ||
997 | * See LBC_ECC_INT for more details. | ||
998 | * @dbe: Memory DBE error. This is a fatal and requires a | ||
999 | * system reset. | ||
1000 | * @pref_dat_len_mismatch_err: Summary bit for context length | ||
1001 | * mismatch errors. | ||
1002 | * @rd_dat_len_mismatch_err: Summary bit for SE read data length | ||
1003 | * greater than data prefect length errors. | ||
1004 | * @cam_soft_err: is recoverable. Software must complete a | ||
1005 | * LBC_INVAL_CTL[CAM_INVAL_START] invalidation sequence and | ||
1006 | * then clear [CAM_SOFT_ERR]. | ||
1007 | * @dma_rd_err: A context prefect read of host memory returned with | ||
1008 | * a read error. | ||
1009 | */ | ||
1010 | union lbc_int { | ||
1011 | u64 value; | ||
1012 | struct { | ||
1013 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
1014 | u64 raz_10_63 : 54; | ||
1015 | u64 cam_hard_err : 1; | ||
1016 | u64 cam_inval_abort : 1; | ||
1017 | u64 over_fetch_err : 1; | ||
1018 | u64 cache_line_to_err : 1; | ||
1019 | u64 sbe : 1; | ||
1020 | u64 dbe : 1; | ||
1021 | u64 pref_dat_len_mismatch_err : 1; | ||
1022 | u64 rd_dat_len_mismatch_err : 1; | ||
1023 | u64 cam_soft_err : 1; | ||
1024 | u64 dma_rd_err : 1; | ||
1025 | #else | ||
1026 | u64 dma_rd_err : 1; | ||
1027 | u64 cam_soft_err : 1; | ||
1028 | u64 rd_dat_len_mismatch_err : 1; | ||
1029 | u64 pref_dat_len_mismatch_err : 1; | ||
1030 | u64 dbe : 1; | ||
1031 | u64 sbe : 1; | ||
1032 | u64 cache_line_to_err : 1; | ||
1033 | u64 over_fetch_err : 1; | ||
1034 | u64 cam_inval_abort : 1; | ||
1035 | u64 cam_hard_err : 1; | ||
1036 | u64 raz_10_63 : 54; | ||
1037 | #endif | ||
1038 | } s; | ||
1039 | }; | ||
1040 | |||
1041 | /** | ||
1042 | * struct lbc_inval_status: LBC Invalidation status register | ||
1043 | * @cam_clean_entry_complete_cnt: The number of entries that are | ||
1044 | * cleaned up successfully. | ||
1045 | * @cam_clean_entry_cnt: The number of entries that have the CAM | ||
1046 | * inval command issued. | ||
1047 | * @cam_inval_state: cam invalidation FSM state | ||
1048 | * @cam_inval_abort: cam invalidation abort | ||
1049 | * @cam_rst_rdy: lbc_cam reset ready | ||
1050 | * @done: LBC clears [DONE] when | ||
1051 | * LBC_INVAL_CTL[CAM_INVAL_START] is written with a one, | ||
1052 | * and sets [DONE] when it completes the invalidation | ||
1053 | * sequence. | ||
1054 | */ | ||
1055 | union lbc_inval_status { | ||
1056 | u64 value; | ||
1057 | struct { | ||
1058 | #if (defined(__BIG_ENDIAN_BITFIELD)) | ||
1059 | u64 raz3 : 23; | ||
1060 | u64 cam_clean_entry_complete_cnt : 9; | ||
1061 | u64 raz2 : 7; | ||
1062 | u64 cam_clean_entry_cnt : 9; | ||
1063 | u64 raz1 : 5; | ||
1064 | u64 cam_inval_state : 3; | ||
1065 | u64 raz0 : 5; | ||
1066 | u64 cam_inval_abort : 1; | ||
1067 | u64 cam_rst_rdy : 1; | ||
1068 | u64 done : 1; | ||
1069 | #else | ||
1070 | u64 done : 1; | ||
1071 | u64 cam_rst_rdy : 1; | ||
1072 | u64 cam_inval_abort : 1; | ||
1073 | u64 raz0 : 5; | ||
1074 | u64 cam_inval_state : 3; | ||
1075 | u64 raz1 : 5; | ||
1076 | u64 cam_clean_entry_cnt : 9; | ||
1077 | u64 raz2 : 7; | ||
1078 | u64 cam_clean_entry_complete_cnt : 9; | ||
1079 | u64 raz3 : 23; | ||
1080 | #endif | ||
1081 | } s; | ||
1082 | }; | ||
1083 | |||
1084 | #endif /* __NITROX_CSR_H */ | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h new file mode 100644 index 000000000000..57858b04f165 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h | |||
@@ -0,0 +1,179 @@ | |||
1 | #ifndef __NITROX_DEV_H | ||
2 | #define __NITROX_DEV_H | ||
3 | |||
4 | #include <linux/dma-mapping.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | #include <linux/pci.h> | ||
7 | |||
8 | #define VERSION_LEN 32 | ||
9 | |||
10 | struct nitrox_cmdq { | ||
11 | /* command queue lock */ | ||
12 | spinlock_t cmdq_lock; | ||
13 | /* response list lock */ | ||
14 | spinlock_t response_lock; | ||
15 | /* backlog list lock */ | ||
16 | spinlock_t backlog_lock; | ||
17 | |||
18 | /* request submitted to chip, in progress */ | ||
19 | struct list_head response_head; | ||
20 | /* hw queue full, hold in backlog list */ | ||
21 | struct list_head backlog_head; | ||
22 | |||
23 | /* doorbell address */ | ||
24 | u8 __iomem *dbell_csr_addr; | ||
25 | /* base address of the queue */ | ||
26 | u8 *head; | ||
27 | |||
28 | struct nitrox_device *ndev; | ||
29 | /* flush pending backlog commands */ | ||
30 | struct work_struct backlog_qflush; | ||
31 | |||
32 | /* requests posted waiting for completion */ | ||
33 | atomic_t pending_count; | ||
34 | /* requests in backlog queues */ | ||
35 | atomic_t backlog_count; | ||
36 | |||
37 | /* command size 32B/64B */ | ||
38 | u8 instr_size; | ||
39 | u8 qno; | ||
40 | u32 qsize; | ||
41 | |||
42 | /* unaligned addresses */ | ||
43 | u8 *head_unaligned; | ||
44 | dma_addr_t dma_unaligned; | ||
45 | /* dma address of the base */ | ||
46 | dma_addr_t dma; | ||
47 | }; | ||
48 | |||
49 | struct nitrox_hw { | ||
50 | /* firmware version */ | ||
51 | char fw_name[VERSION_LEN]; | ||
52 | |||
53 | u16 vendor_id; | ||
54 | u16 device_id; | ||
55 | u8 revision_id; | ||
56 | |||
57 | /* CNN55XX cores */ | ||
58 | u8 se_cores; | ||
59 | u8 ae_cores; | ||
60 | u8 zip_cores; | ||
61 | }; | ||
62 | |||
63 | #define MAX_MSIX_VECTOR_NAME 20 | ||
64 | /** | ||
65 | * vectors for queues (64 AE, 64 SE and 64 ZIP) and | ||
66 | * error condition/mailbox. | ||
67 | */ | ||
68 | #define MAX_MSIX_VECTORS 192 | ||
69 | |||
70 | struct nitrox_msix { | ||
71 | struct msix_entry *entries; | ||
72 | char **names; | ||
73 | DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS); | ||
74 | u32 nr_entries; | ||
75 | }; | ||
76 | |||
77 | struct bh_data { | ||
78 | /* slc port completion count address */ | ||
79 | u8 __iomem *completion_cnt_csr_addr; | ||
80 | |||
81 | struct nitrox_cmdq *cmdq; | ||
82 | struct tasklet_struct resp_handler; | ||
83 | }; | ||
84 | |||
85 | struct nitrox_bh { | ||
86 | struct bh_data *slc; | ||
87 | }; | ||
88 | |||
89 | /* NITROX-5 driver state */ | ||
90 | #define NITROX_UCODE_LOADED 0 | ||
91 | #define NITROX_READY 1 | ||
92 | |||
93 | /* command queue size */ | ||
94 | #define DEFAULT_CMD_QLEN 2048 | ||
95 | /* command timeout in milliseconds */ | ||
96 | #define CMD_TIMEOUT 2000 | ||
97 | |||
98 | #define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev)) | ||
99 | #define PF_MODE 0 | ||
100 | |||
101 | #define NITROX_CSR_ADDR(ndev, offset) \ | ||
102 | ((ndev)->bar_addr + (offset)) | ||
103 | |||
104 | /** | ||
105 | * struct nitrox_device - NITROX Device Information. | ||
106 | * @list: pointer to linked list of devices | ||
107 | * @bar_addr: iomap address | ||
108 | * @pdev: PCI device information | ||
109 | * @status: NITROX status | ||
110 | * @timeout: Request timeout in jiffies | ||
111 | * @refcnt: Device usage count | ||
112 | * @idx: device index (0..N) | ||
113 | * @node: NUMA node id attached | ||
114 | * @qlen: Command queue length | ||
115 | * @nr_queues: Number of command queues | ||
116 | * @ctx_pool: DMA pool for crypto context | ||
117 | * @pkt_cmdqs: SE Command queues | ||
118 | * @msix: MSI-X information | ||
119 | * @bh: post processing work | ||
120 | * @hw: hardware information | ||
121 | * @debugfs_dir: debugfs directory | ||
122 | */ | ||
123 | struct nitrox_device { | ||
124 | struct list_head list; | ||
125 | |||
126 | u8 __iomem *bar_addr; | ||
127 | struct pci_dev *pdev; | ||
128 | |||
129 | unsigned long status; | ||
130 | unsigned long timeout; | ||
131 | refcount_t refcnt; | ||
132 | |||
133 | u8 idx; | ||
134 | int node; | ||
135 | u16 qlen; | ||
136 | u16 nr_queues; | ||
137 | |||
138 | struct dma_pool *ctx_pool; | ||
139 | struct nitrox_cmdq *pkt_cmdqs; | ||
140 | |||
141 | struct nitrox_msix msix; | ||
142 | struct nitrox_bh bh; | ||
143 | |||
144 | struct nitrox_hw hw; | ||
145 | #if IS_ENABLED(CONFIG_DEBUG_FS) | ||
146 | struct dentry *debugfs_dir; | ||
147 | #endif | ||
148 | }; | ||
149 | |||
150 | /** | ||
151 | * nitrox_read_csr - Read from device register | ||
152 | * @ndev: NITROX device | ||
153 | * @offset: offset of the register to read | ||
154 | * | ||
155 | * Returns: value read | ||
156 | */ | ||
157 | static inline u64 nitrox_read_csr(struct nitrox_device *ndev, u64 offset) | ||
158 | { | ||
159 | return readq(ndev->bar_addr + offset); | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * nitrox_write_csr - Write to device register | ||
164 | * @ndev: NITROX device | ||
165 | * @offset: offset of the register to write | ||
166 | * @value: value to write | ||
167 | */ | ||
168 | static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset, | ||
169 | u64 value) | ||
170 | { | ||
171 | writeq(value, (ndev->bar_addr + offset)); | ||
172 | } | ||
173 | |||
174 | static inline int nitrox_ready(struct nitrox_device *ndev) | ||
175 | { | ||
176 | return test_bit(NITROX_READY, &ndev->status); | ||
177 | } | ||
178 | |||
179 | #endif /* __NITROX_DEV_H */ | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c new file mode 100644 index 000000000000..f0655f82fa7d --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c | |||
@@ -0,0 +1,401 @@ | |||
1 | #include <linux/delay.h> | ||
2 | |||
3 | #include "nitrox_dev.h" | ||
4 | #include "nitrox_csr.h" | ||
5 | |||
6 | /** | ||
7 | * emu_enable_cores - Enable EMU cluster cores. | ||
8 | * @ndev: N5 device | ||
9 | */ | ||
10 | static void emu_enable_cores(struct nitrox_device *ndev) | ||
11 | { | ||
12 | union emu_se_enable emu_se; | ||
13 | union emu_ae_enable emu_ae; | ||
14 | int i; | ||
15 | |||
16 | /* AE cores 20 per cluster */ | ||
17 | emu_ae.value = 0; | ||
18 | emu_ae.s.enable = 0xfffff; | ||
19 | |||
20 | /* SE cores 16 per cluster */ | ||
21 | emu_se.value = 0; | ||
22 | emu_se.s.enable = 0xffff; | ||
23 | |||
24 | /* enable per cluster cores */ | ||
25 | for (i = 0; i < NR_CLUSTERS; i++) { | ||
26 | nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value); | ||
27 | nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value); | ||
28 | } | ||
29 | } | ||
30 | |||
31 | /** | ||
32 | * nitrox_config_emu_unit - configure EMU unit. | ||
33 | * @ndev: N5 device | ||
34 | */ | ||
35 | void nitrox_config_emu_unit(struct nitrox_device *ndev) | ||
36 | { | ||
37 | union emu_wd_int_ena_w1s emu_wd_int; | ||
38 | union emu_ge_int_ena_w1s emu_ge_int; | ||
39 | u64 offset; | ||
40 | int i; | ||
41 | |||
42 | /* enable cores */ | ||
43 | emu_enable_cores(ndev); | ||
44 | |||
45 | /* enable general error and watch dog interrupts */ | ||
46 | emu_ge_int.value = 0; | ||
47 | emu_ge_int.s.se_ge = 0xffff; | ||
48 | emu_ge_int.s.ae_ge = 0xfffff; | ||
49 | emu_wd_int.value = 0; | ||
50 | emu_wd_int.s.se_wd = 1; | ||
51 | |||
52 | for (i = 0; i < NR_CLUSTERS; i++) { | ||
53 | offset = EMU_WD_INT_ENA_W1SX(i); | ||
54 | nitrox_write_csr(ndev, offset, emu_wd_int.value); | ||
55 | offset = EMU_GE_INT_ENA_W1SX(i); | ||
56 | nitrox_write_csr(ndev, offset, emu_ge_int.value); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring) | ||
61 | { | ||
62 | union nps_pkt_in_instr_ctl pkt_in_ctl; | ||
63 | union nps_pkt_in_instr_baoff_dbell pkt_in_dbell; | ||
64 | union nps_pkt_in_done_cnts pkt_in_cnts; | ||
65 | u64 offset; | ||
66 | |||
67 | offset = NPS_PKT_IN_INSTR_CTLX(ring); | ||
68 | /* disable the ring */ | ||
69 | pkt_in_ctl.value = nitrox_read_csr(ndev, offset); | ||
70 | pkt_in_ctl.s.enb = 0; | ||
71 | nitrox_write_csr(ndev, offset, pkt_in_ctl.value); | ||
72 | usleep_range(100, 150); | ||
73 | |||
74 | /* wait to clear [ENB] */ | ||
75 | do { | ||
76 | pkt_in_ctl.value = nitrox_read_csr(ndev, offset); | ||
77 | } while (pkt_in_ctl.s.enb); | ||
78 | |||
79 | /* clear off door bell counts */ | ||
80 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring); | ||
81 | pkt_in_dbell.value = 0; | ||
82 | pkt_in_dbell.s.dbell = 0xffffffff; | ||
83 | nitrox_write_csr(ndev, offset, pkt_in_dbell.value); | ||
84 | |||
85 | /* clear done counts */ | ||
86 | offset = NPS_PKT_IN_DONE_CNTSX(ring); | ||
87 | pkt_in_cnts.value = nitrox_read_csr(ndev, offset); | ||
88 | nitrox_write_csr(ndev, offset, pkt_in_cnts.value); | ||
89 | usleep_range(50, 100); | ||
90 | } | ||
91 | |||
92 | void enable_pkt_input_ring(struct nitrox_device *ndev, int ring) | ||
93 | { | ||
94 | union nps_pkt_in_instr_ctl pkt_in_ctl; | ||
95 | u64 offset; | ||
96 | |||
97 | /* 64-byte instruction size */ | ||
98 | offset = NPS_PKT_IN_INSTR_CTLX(ring); | ||
99 | pkt_in_ctl.value = nitrox_read_csr(ndev, offset); | ||
100 | pkt_in_ctl.s.is64b = 1; | ||
101 | pkt_in_ctl.s.enb = 1; | ||
102 | nitrox_write_csr(ndev, offset, pkt_in_ctl.value); | ||
103 | |||
104 | /* wait for set [ENB] */ | ||
105 | do { | ||
106 | pkt_in_ctl.value = nitrox_read_csr(ndev, offset); | ||
107 | } while (!pkt_in_ctl.s.enb); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * nitrox_config_pkt_input_rings - configure Packet Input Rings | ||
112 | * @ndev: N5 device | ||
113 | */ | ||
114 | void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) | ||
115 | { | ||
116 | int i; | ||
117 | |||
118 | for (i = 0; i < ndev->nr_queues; i++) { | ||
119 | struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i]; | ||
120 | union nps_pkt_in_instr_rsize pkt_in_rsize; | ||
121 | u64 offset; | ||
122 | |||
123 | reset_pkt_input_ring(ndev, i); | ||
124 | |||
125 | /* configure ring base address 16-byte aligned, | ||
126 | * size and interrupt threshold. | ||
127 | */ | ||
128 | offset = NPS_PKT_IN_INSTR_BADDRX(i); | ||
129 | nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma); | ||
130 | |||
131 | /* configure ring size */ | ||
132 | offset = NPS_PKT_IN_INSTR_RSIZEX(i); | ||
133 | pkt_in_rsize.value = 0; | ||
134 | pkt_in_rsize.s.rsize = ndev->qlen; | ||
135 | nitrox_write_csr(ndev, offset, pkt_in_rsize.value); | ||
136 | |||
137 | /* set high threshold for pkt input ring interrupts */ | ||
138 | offset = NPS_PKT_IN_INT_LEVELSX(i); | ||
139 | nitrox_write_csr(ndev, offset, 0xffffffff); | ||
140 | |||
141 | enable_pkt_input_ring(ndev, i); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port) | ||
146 | { | ||
147 | union nps_pkt_slc_ctl pkt_slc_ctl; | ||
148 | union nps_pkt_slc_cnts pkt_slc_cnts; | ||
149 | u64 offset; | ||
150 | |||
151 | /* disable slc port */ | ||
152 | offset = NPS_PKT_SLC_CTLX(port); | ||
153 | pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); | ||
154 | pkt_slc_ctl.s.enb = 0; | ||
155 | nitrox_write_csr(ndev, offset, pkt_slc_ctl.value); | ||
156 | usleep_range(100, 150); | ||
157 | |||
158 | /* wait to clear [ENB] */ | ||
159 | do { | ||
160 | pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); | ||
161 | } while (pkt_slc_ctl.s.enb); | ||
162 | |||
163 | /* clear slc counters */ | ||
164 | offset = NPS_PKT_SLC_CNTSX(port); | ||
165 | pkt_slc_cnts.value = nitrox_read_csr(ndev, offset); | ||
166 | nitrox_write_csr(ndev, offset, pkt_slc_cnts.value); | ||
167 | usleep_range(50, 100); | ||
168 | } | ||
169 | |||
170 | void enable_pkt_solicit_port(struct nitrox_device *ndev, int port) | ||
171 | { | ||
172 | union nps_pkt_slc_ctl pkt_slc_ctl; | ||
173 | u64 offset; | ||
174 | |||
175 | offset = NPS_PKT_SLC_CTLX(port); | ||
176 | pkt_slc_ctl.value = 0; | ||
177 | pkt_slc_ctl.s.enb = 1; | ||
178 | |||
179 | /* | ||
180 | * 8 trailing 0x00 bytes will be added | ||
181 | * to the end of the outgoing packet. | ||
182 | */ | ||
183 | pkt_slc_ctl.s.z = 1; | ||
184 | /* enable response header */ | ||
185 | pkt_slc_ctl.s.rh = 1; | ||
186 | nitrox_write_csr(ndev, offset, pkt_slc_ctl.value); | ||
187 | |||
188 | /* wait to set [ENB] */ | ||
189 | do { | ||
190 | pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); | ||
191 | } while (!pkt_slc_ctl.s.enb); | ||
192 | } | ||
193 | |||
194 | static void config_single_pkt_solicit_port(struct nitrox_device *ndev, | ||
195 | int port) | ||
196 | { | ||
197 | union nps_pkt_slc_int_levels pkt_slc_int; | ||
198 | u64 offset; | ||
199 | |||
200 | reset_pkt_solicit_port(ndev, port); | ||
201 | |||
202 | offset = NPS_PKT_SLC_INT_LEVELSX(port); | ||
203 | pkt_slc_int.value = 0; | ||
204 | /* time interrupt threshold */ | ||
205 | pkt_slc_int.s.timet = 0x3fffff; | ||
206 | nitrox_write_csr(ndev, offset, pkt_slc_int.value); | ||
207 | |||
208 | enable_pkt_solicit_port(ndev, port); | ||
209 | } | ||
210 | |||
211 | void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev) | ||
212 | { | ||
213 | int i; | ||
214 | |||
215 | for (i = 0; i < ndev->nr_queues; i++) | ||
216 | config_single_pkt_solicit_port(ndev, i); | ||
217 | } | ||
218 | |||
219 | /** | ||
220 | * enable_nps_interrupts - enable NPS interrutps | ||
221 | * @ndev: N5 device. | ||
222 | * | ||
223 | * This includes NPS core, packet in and slc interrupts. | ||
224 | */ | ||
225 | static void enable_nps_interrupts(struct nitrox_device *ndev) | ||
226 | { | ||
227 | union nps_core_int_ena_w1s core_int; | ||
228 | |||
229 | /* NPS core interrutps */ | ||
230 | core_int.value = 0; | ||
231 | core_int.s.host_wr_err = 1; | ||
232 | core_int.s.host_wr_timeout = 1; | ||
233 | core_int.s.exec_wr_timeout = 1; | ||
234 | core_int.s.npco_dma_malform = 1; | ||
235 | core_int.s.host_nps_wr_err = 1; | ||
236 | nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value); | ||
237 | |||
238 | /* NPS packet in ring interrupts */ | ||
239 | nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL)); | ||
240 | nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL)); | ||
241 | nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL)); | ||
242 | /* NPS packet slc port interrupts */ | ||
243 | nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL)); | ||
244 | nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL)); | ||
245 | nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL)); | ||
246 | } | ||
247 | |||
248 | void nitrox_config_nps_unit(struct nitrox_device *ndev) | ||
249 | { | ||
250 | union nps_core_gbl_vfcfg core_gbl_vfcfg; | ||
251 | |||
252 | /* endian control information */ | ||
253 | nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL); | ||
254 | |||
255 | /* disable ILK interface */ | ||
256 | core_gbl_vfcfg.value = 0; | ||
257 | core_gbl_vfcfg.s.ilk_disable = 1; | ||
258 | core_gbl_vfcfg.s.cfg = PF_MODE; | ||
259 | nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value); | ||
260 | /* config input and solicit ports */ | ||
261 | nitrox_config_pkt_input_rings(ndev); | ||
262 | nitrox_config_pkt_solicit_ports(ndev); | ||
263 | |||
264 | /* enable interrupts */ | ||
265 | enable_nps_interrupts(ndev); | ||
266 | } | ||
267 | |||
268 | void nitrox_config_pom_unit(struct nitrox_device *ndev) | ||
269 | { | ||
270 | union pom_int_ena_w1s pom_int; | ||
271 | int i; | ||
272 | |||
273 | /* enable pom interrupts */ | ||
274 | pom_int.value = 0; | ||
275 | pom_int.s.illegal_dport = 1; | ||
276 | nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value); | ||
277 | |||
278 | /* enable perf counters */ | ||
279 | for (i = 0; i < ndev->hw.se_cores; i++) | ||
280 | nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i)); | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * nitrox_config_rand_unit - enable N5 random number unit | ||
285 | * @ndev: N5 device | ||
286 | */ | ||
287 | void nitrox_config_rand_unit(struct nitrox_device *ndev) | ||
288 | { | ||
289 | union efl_rnm_ctl_status efl_rnm_ctl; | ||
290 | u64 offset; | ||
291 | |||
292 | offset = EFL_RNM_CTL_STATUS; | ||
293 | efl_rnm_ctl.value = nitrox_read_csr(ndev, offset); | ||
294 | efl_rnm_ctl.s.ent_en = 1; | ||
295 | efl_rnm_ctl.s.rng_en = 1; | ||
296 | nitrox_write_csr(ndev, offset, efl_rnm_ctl.value); | ||
297 | } | ||
298 | |||
299 | void nitrox_config_efl_unit(struct nitrox_device *ndev) | ||
300 | { | ||
301 | int i; | ||
302 | |||
303 | for (i = 0; i < NR_CLUSTERS; i++) { | ||
304 | union efl_core_int_ena_w1s efl_core_int; | ||
305 | u64 offset; | ||
306 | |||
307 | /* EFL core interrupts */ | ||
308 | offset = EFL_CORE_INT_ENA_W1SX(i); | ||
309 | efl_core_int.value = 0; | ||
310 | efl_core_int.s.len_ovr = 1; | ||
311 | efl_core_int.s.d_left = 1; | ||
312 | efl_core_int.s.epci_decode_err = 1; | ||
313 | nitrox_write_csr(ndev, offset, efl_core_int.value); | ||
314 | |||
315 | offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i); | ||
316 | nitrox_write_csr(ndev, offset, (~0ULL)); | ||
317 | offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i); | ||
318 | nitrox_write_csr(ndev, offset, (~0ULL)); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | void nitrox_config_bmi_unit(struct nitrox_device *ndev) | ||
323 | { | ||
324 | union bmi_ctl bmi_ctl; | ||
325 | union bmi_int_ena_w1s bmi_int_ena; | ||
326 | u64 offset; | ||
327 | |||
328 | /* no threshold limits for PCIe */ | ||
329 | offset = BMI_CTL; | ||
330 | bmi_ctl.value = nitrox_read_csr(ndev, offset); | ||
331 | bmi_ctl.s.max_pkt_len = 0xff; | ||
332 | bmi_ctl.s.nps_free_thrsh = 0xff; | ||
333 | bmi_ctl.s.nps_hdrq_thrsh = 0x7a; | ||
334 | nitrox_write_csr(ndev, offset, bmi_ctl.value); | ||
335 | |||
336 | /* enable interrupts */ | ||
337 | offset = BMI_INT_ENA_W1S; | ||
338 | bmi_int_ena.value = 0; | ||
339 | bmi_int_ena.s.max_len_err_nps = 1; | ||
340 | bmi_int_ena.s.pkt_rcv_err_nps = 1; | ||
341 | bmi_int_ena.s.fpf_undrrn = 1; | ||
342 | nitrox_write_csr(ndev, offset, bmi_int_ena.value); | ||
343 | } | ||
344 | |||
345 | void nitrox_config_bmo_unit(struct nitrox_device *ndev) | ||
346 | { | ||
347 | union bmo_ctl2 bmo_ctl2; | ||
348 | u64 offset; | ||
349 | |||
350 | /* no threshold limits for PCIe */ | ||
351 | offset = BMO_CTL2; | ||
352 | bmo_ctl2.value = nitrox_read_csr(ndev, offset); | ||
353 | bmo_ctl2.s.nps_slc_buf_thrsh = 0xff; | ||
354 | nitrox_write_csr(ndev, offset, bmo_ctl2.value); | ||
355 | } | ||
356 | |||
357 | void invalidate_lbc(struct nitrox_device *ndev) | ||
358 | { | ||
359 | union lbc_inval_ctl lbc_ctl; | ||
360 | union lbc_inval_status lbc_stat; | ||
361 | u64 offset; | ||
362 | |||
363 | /* invalidate LBC */ | ||
364 | offset = LBC_INVAL_CTL; | ||
365 | lbc_ctl.value = nitrox_read_csr(ndev, offset); | ||
366 | lbc_ctl.s.cam_inval_start = 1; | ||
367 | nitrox_write_csr(ndev, offset, lbc_ctl.value); | ||
368 | |||
369 | offset = LBC_INVAL_STATUS; | ||
370 | |||
371 | do { | ||
372 | lbc_stat.value = nitrox_read_csr(ndev, offset); | ||
373 | } while (!lbc_stat.s.done); | ||
374 | } | ||
375 | |||
376 | void nitrox_config_lbc_unit(struct nitrox_device *ndev) | ||
377 | { | ||
378 | union lbc_int_ena_w1s lbc_int_ena; | ||
379 | u64 offset; | ||
380 | |||
381 | invalidate_lbc(ndev); | ||
382 | |||
383 | /* enable interrupts */ | ||
384 | offset = LBC_INT_ENA_W1S; | ||
385 | lbc_int_ena.value = 0; | ||
386 | lbc_int_ena.s.dma_rd_err = 1; | ||
387 | lbc_int_ena.s.over_fetch_err = 1; | ||
388 | lbc_int_ena.s.cam_inval_abort = 1; | ||
389 | lbc_int_ena.s.cam_hard_err = 1; | ||
390 | nitrox_write_csr(ndev, offset, lbc_int_ena.value); | ||
391 | |||
392 | offset = LBC_PLM_VF1_64_INT_ENA_W1S; | ||
393 | nitrox_write_csr(ndev, offset, (~0ULL)); | ||
394 | offset = LBC_PLM_VF65_128_INT_ENA_W1S; | ||
395 | nitrox_write_csr(ndev, offset, (~0ULL)); | ||
396 | |||
397 | offset = LBC_ELM_VF1_64_INT_ENA_W1S; | ||
398 | nitrox_write_csr(ndev, offset, (~0ULL)); | ||
399 | offset = LBC_ELM_VF65_128_INT_ENA_W1S; | ||
400 | nitrox_write_csr(ndev, offset, (~0ULL)); | ||
401 | } | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c new file mode 100644 index 000000000000..71f934871a89 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c | |||
@@ -0,0 +1,467 @@ | |||
1 | #include <linux/pci.h> | ||
2 | #include <linux/printk.h> | ||
3 | #include <linux/slab.h> | ||
4 | |||
5 | #include "nitrox_dev.h" | ||
6 | #include "nitrox_csr.h" | ||
7 | #include "nitrox_common.h" | ||
8 | |||
9 | #define NR_RING_VECTORS 3 | ||
10 | #define NPS_CORE_INT_ACTIVE_ENTRY 192 | ||
11 | |||
12 | /** | ||
13 | * nps_pkt_slc_isr - IRQ handler for NPS solicit port | ||
14 | * @irq: irq number | ||
15 | * @data: argument | ||
16 | */ | ||
17 | static irqreturn_t nps_pkt_slc_isr(int irq, void *data) | ||
18 | { | ||
19 | struct bh_data *slc = data; | ||
20 | union nps_pkt_slc_cnts pkt_slc_cnts; | ||
21 | |||
22 | pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr); | ||
23 | /* New packet on SLC output port */ | ||
24 | if (pkt_slc_cnts.s.slc_int) | ||
25 | tasklet_hi_schedule(&slc->resp_handler); | ||
26 | |||
27 | return IRQ_HANDLED; | ||
28 | } | ||
29 | |||
30 | static void clear_nps_core_err_intr(struct nitrox_device *ndev) | ||
31 | { | ||
32 | u64 value; | ||
33 | |||
34 | /* Write 1 to clear */ | ||
35 | value = nitrox_read_csr(ndev, NPS_CORE_INT); | ||
36 | nitrox_write_csr(ndev, NPS_CORE_INT, value); | ||
37 | |||
38 | dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT 0x%016llx\n", value); | ||
39 | } | ||
40 | |||
41 | static void clear_nps_pkt_err_intr(struct nitrox_device *ndev) | ||
42 | { | ||
43 | union nps_pkt_int pkt_int; | ||
44 | unsigned long value, offset; | ||
45 | int i; | ||
46 | |||
47 | pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT); | ||
48 | dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT 0x%016llx\n", | ||
49 | pkt_int.value); | ||
50 | |||
51 | if (pkt_int.s.slc_err) { | ||
52 | offset = NPS_PKT_SLC_ERR_TYPE; | ||
53 | value = nitrox_read_csr(ndev, offset); | ||
54 | nitrox_write_csr(ndev, offset, value); | ||
55 | dev_err_ratelimited(DEV(ndev), | ||
56 | "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value); | ||
57 | |||
58 | offset = NPS_PKT_SLC_RERR_LO; | ||
59 | value = nitrox_read_csr(ndev, offset); | ||
60 | nitrox_write_csr(ndev, offset, value); | ||
61 | /* enable the solicit ports */ | ||
62 | for_each_set_bit(i, &value, BITS_PER_LONG) | ||
63 | enable_pkt_solicit_port(ndev, i); | ||
64 | |||
65 | dev_err_ratelimited(DEV(ndev), | ||
66 | "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value); | ||
67 | |||
68 | offset = NPS_PKT_SLC_RERR_HI; | ||
69 | value = nitrox_read_csr(ndev, offset); | ||
70 | nitrox_write_csr(ndev, offset, value); | ||
71 | dev_err_ratelimited(DEV(ndev), | ||
72 | "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value); | ||
73 | } | ||
74 | |||
75 | if (pkt_int.s.in_err) { | ||
76 | offset = NPS_PKT_IN_ERR_TYPE; | ||
77 | value = nitrox_read_csr(ndev, offset); | ||
78 | nitrox_write_csr(ndev, offset, value); | ||
79 | dev_err_ratelimited(DEV(ndev), | ||
80 | "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value); | ||
81 | offset = NPS_PKT_IN_RERR_LO; | ||
82 | value = nitrox_read_csr(ndev, offset); | ||
83 | nitrox_write_csr(ndev, offset, value); | ||
84 | /* enable the input ring */ | ||
85 | for_each_set_bit(i, &value, BITS_PER_LONG) | ||
86 | enable_pkt_input_ring(ndev, i); | ||
87 | |||
88 | dev_err_ratelimited(DEV(ndev), | ||
89 | "NPS_PKT_IN_RERR_LO 0x%016lx\n", value); | ||
90 | |||
91 | offset = NPS_PKT_IN_RERR_HI; | ||
92 | value = nitrox_read_csr(ndev, offset); | ||
93 | nitrox_write_csr(ndev, offset, value); | ||
94 | dev_err_ratelimited(DEV(ndev), | ||
95 | "NPS_PKT_IN_RERR_HI 0x%016lx\n", value); | ||
96 | } | ||
97 | } | ||
98 | |||
99 | static void clear_pom_err_intr(struct nitrox_device *ndev) | ||
100 | { | ||
101 | u64 value; | ||
102 | |||
103 | value = nitrox_read_csr(ndev, POM_INT); | ||
104 | nitrox_write_csr(ndev, POM_INT, value); | ||
105 | dev_err_ratelimited(DEV(ndev), "POM_INT 0x%016llx\n", value); | ||
106 | } | ||
107 | |||
108 | static void clear_pem_err_intr(struct nitrox_device *ndev) | ||
109 | { | ||
110 | u64 value; | ||
111 | |||
112 | value = nitrox_read_csr(ndev, PEM0_INT); | ||
113 | nitrox_write_csr(ndev, PEM0_INT, value); | ||
114 | dev_err_ratelimited(DEV(ndev), "PEM(0)_INT 0x%016llx\n", value); | ||
115 | } | ||
116 | |||
117 | static void clear_lbc_err_intr(struct nitrox_device *ndev) | ||
118 | { | ||
119 | union lbc_int lbc_int; | ||
120 | u64 value, offset; | ||
121 | int i; | ||
122 | |||
123 | lbc_int.value = nitrox_read_csr(ndev, LBC_INT); | ||
124 | dev_err_ratelimited(DEV(ndev), "LBC_INT 0x%016llx\n", lbc_int.value); | ||
125 | |||
126 | if (lbc_int.s.dma_rd_err) { | ||
127 | for (i = 0; i < NR_CLUSTERS; i++) { | ||
128 | offset = EFL_CORE_VF_ERR_INT0X(i); | ||
129 | value = nitrox_read_csr(ndev, offset); | ||
130 | nitrox_write_csr(ndev, offset, value); | ||
131 | offset = EFL_CORE_VF_ERR_INT1X(i); | ||
132 | value = nitrox_read_csr(ndev, offset); | ||
133 | nitrox_write_csr(ndev, offset, value); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | if (lbc_int.s.cam_soft_err) { | ||
138 | dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n"); | ||
139 | invalidate_lbc(ndev); | ||
140 | } | ||
141 | |||
142 | if (lbc_int.s.pref_dat_len_mismatch_err) { | ||
143 | offset = LBC_PLM_VF1_64_INT; | ||
144 | value = nitrox_read_csr(ndev, offset); | ||
145 | nitrox_write_csr(ndev, offset, value); | ||
146 | offset = LBC_PLM_VF65_128_INT; | ||
147 | value = nitrox_read_csr(ndev, offset); | ||
148 | nitrox_write_csr(ndev, offset, value); | ||
149 | } | ||
150 | |||
151 | if (lbc_int.s.rd_dat_len_mismatch_err) { | ||
152 | offset = LBC_ELM_VF1_64_INT; | ||
153 | value = nitrox_read_csr(ndev, offset); | ||
154 | nitrox_write_csr(ndev, offset, value); | ||
155 | offset = LBC_ELM_VF65_128_INT; | ||
156 | value = nitrox_read_csr(ndev, offset); | ||
157 | nitrox_write_csr(ndev, offset, value); | ||
158 | } | ||
159 | nitrox_write_csr(ndev, LBC_INT, lbc_int.value); | ||
160 | } | ||
161 | |||
162 | static void clear_efl_err_intr(struct nitrox_device *ndev) | ||
163 | { | ||
164 | int i; | ||
165 | |||
166 | for (i = 0; i < NR_CLUSTERS; i++) { | ||
167 | union efl_core_int core_int; | ||
168 | u64 value, offset; | ||
169 | |||
170 | offset = EFL_CORE_INTX(i); | ||
171 | core_int.value = nitrox_read_csr(ndev, offset); | ||
172 | nitrox_write_csr(ndev, offset, core_int.value); | ||
173 | dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT 0x%016llx\n", | ||
174 | i, core_int.value); | ||
175 | if (core_int.s.se_err) { | ||
176 | offset = EFL_CORE_SE_ERR_INTX(i); | ||
177 | value = nitrox_read_csr(ndev, offset); | ||
178 | nitrox_write_csr(ndev, offset, value); | ||
179 | } | ||
180 | } | ||
181 | } | ||
182 | |||
183 | static void clear_bmi_err_intr(struct nitrox_device *ndev) | ||
184 | { | ||
185 | u64 value; | ||
186 | |||
187 | value = nitrox_read_csr(ndev, BMI_INT); | ||
188 | nitrox_write_csr(ndev, BMI_INT, value); | ||
189 | dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value); | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts | ||
194 | * @ndev: NITROX device | ||
195 | */ | ||
196 | static void clear_nps_core_int_active(struct nitrox_device *ndev) | ||
197 | { | ||
198 | union nps_core_int_active core_int_active; | ||
199 | |||
200 | core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE); | ||
201 | |||
202 | if (core_int_active.s.nps_core) | ||
203 | clear_nps_core_err_intr(ndev); | ||
204 | |||
205 | if (core_int_active.s.nps_pkt) | ||
206 | clear_nps_pkt_err_intr(ndev); | ||
207 | |||
208 | if (core_int_active.s.pom) | ||
209 | clear_pom_err_intr(ndev); | ||
210 | |||
211 | if (core_int_active.s.pem) | ||
212 | clear_pem_err_intr(ndev); | ||
213 | |||
214 | if (core_int_active.s.lbc) | ||
215 | clear_lbc_err_intr(ndev); | ||
216 | |||
217 | if (core_int_active.s.efl) | ||
218 | clear_efl_err_intr(ndev); | ||
219 | |||
220 | if (core_int_active.s.bmi) | ||
221 | clear_bmi_err_intr(ndev); | ||
222 | |||
223 | /* If more work callback the ISR, set resend */ | ||
224 | core_int_active.s.resend = 1; | ||
225 | nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value); | ||
226 | } | ||
227 | |||
228 | static irqreturn_t nps_core_int_isr(int irq, void *data) | ||
229 | { | ||
230 | struct nitrox_device *ndev = data; | ||
231 | |||
232 | clear_nps_core_int_active(ndev); | ||
233 | |||
234 | return IRQ_HANDLED; | ||
235 | } | ||
236 | |||
237 | static int nitrox_enable_msix(struct nitrox_device *ndev) | ||
238 | { | ||
239 | struct msix_entry *entries; | ||
240 | char **names; | ||
241 | int i, nr_entries, ret; | ||
242 | |||
243 | /* | ||
244 | * PF MSI-X vectors | ||
245 | * | ||
246 | * Entry 0: NPS PKT ring 0 | ||
247 | * Entry 1: AQMQ ring 0 | ||
248 | * Entry 2: ZQM ring 0 | ||
249 | * Entry 3: NPS PKT ring 1 | ||
250 | * Entry 4: AQMQ ring 1 | ||
251 | * Entry 5: ZQM ring 1 | ||
252 | * .... | ||
253 | * Entry 192: NPS_CORE_INT_ACTIVE | ||
254 | */ | ||
255 | nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1; | ||
256 | entries = kzalloc_node(nr_entries * sizeof(struct msix_entry), | ||
257 | GFP_KERNEL, ndev->node); | ||
258 | if (!entries) | ||
259 | return -ENOMEM; | ||
260 | |||
261 | names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL); | ||
262 | if (!names) { | ||
263 | kfree(entries); | ||
264 | return -ENOMEM; | ||
265 | } | ||
266 | |||
267 | /* fill entires */ | ||
268 | for (i = 0; i < (nr_entries - 1); i++) | ||
269 | entries[i].entry = i; | ||
270 | |||
271 | entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY; | ||
272 | |||
273 | for (i = 0; i < nr_entries; i++) { | ||
274 | *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL); | ||
275 | if (!(*(names + i))) { | ||
276 | ret = -ENOMEM; | ||
277 | goto msix_fail; | ||
278 | } | ||
279 | } | ||
280 | ndev->msix.entries = entries; | ||
281 | ndev->msix.names = names; | ||
282 | ndev->msix.nr_entries = nr_entries; | ||
283 | |||
284 | ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries, | ||
285 | ndev->msix.nr_entries); | ||
286 | if (ret) { | ||
287 | dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n", | ||
288 | ret); | ||
289 | goto msix_fail; | ||
290 | } | ||
291 | return 0; | ||
292 | |||
293 | msix_fail: | ||
294 | for (i = 0; i < nr_entries; i++) | ||
295 | kfree(*(names + i)); | ||
296 | |||
297 | kfree(entries); | ||
298 | kfree(names); | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev) | ||
303 | { | ||
304 | int i; | ||
305 | |||
306 | if (!ndev->bh.slc) | ||
307 | return; | ||
308 | |||
309 | for (i = 0; i < ndev->nr_queues; i++) { | ||
310 | struct bh_data *bh = &ndev->bh.slc[i]; | ||
311 | |||
312 | tasklet_disable(&bh->resp_handler); | ||
313 | tasklet_kill(&bh->resp_handler); | ||
314 | } | ||
315 | kfree(ndev->bh.slc); | ||
316 | ndev->bh.slc = NULL; | ||
317 | } | ||
318 | |||
319 | static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev) | ||
320 | { | ||
321 | u32 size; | ||
322 | int i; | ||
323 | |||
324 | size = ndev->nr_queues * sizeof(struct bh_data); | ||
325 | ndev->bh.slc = kzalloc(size, GFP_KERNEL); | ||
326 | if (!ndev->bh.slc) | ||
327 | return -ENOMEM; | ||
328 | |||
329 | for (i = 0; i < ndev->nr_queues; i++) { | ||
330 | struct bh_data *bh = &ndev->bh.slc[i]; | ||
331 | u64 offset; | ||
332 | |||
333 | offset = NPS_PKT_SLC_CNTSX(i); | ||
334 | /* pre calculate completion count address */ | ||
335 | bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); | ||
336 | bh->cmdq = &ndev->pkt_cmdqs[i]; | ||
337 | |||
338 | tasklet_init(&bh->resp_handler, pkt_slc_resp_handler, | ||
339 | (unsigned long)bh); | ||
340 | } | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static int nitrox_request_irqs(struct nitrox_device *ndev) | ||
346 | { | ||
347 | struct pci_dev *pdev = ndev->pdev; | ||
348 | struct msix_entry *msix_ent = ndev->msix.entries; | ||
349 | int nr_ring_vectors, i = 0, ring, cpu, ret; | ||
350 | char *name; | ||
351 | |||
352 | /* | ||
353 | * PF MSI-X vectors | ||
354 | * | ||
355 | * Entry 0: NPS PKT ring 0 | ||
356 | * Entry 1: AQMQ ring 0 | ||
357 | * Entry 2: ZQM ring 0 | ||
358 | * Entry 3: NPS PKT ring 1 | ||
359 | * .... | ||
360 | * Entry 192: NPS_CORE_INT_ACTIVE | ||
361 | */ | ||
362 | nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS; | ||
363 | |||
364 | /* request irq for pkt ring/ports only */ | ||
365 | while (i < nr_ring_vectors) { | ||
366 | name = *(ndev->msix.names + i); | ||
367 | ring = (i / NR_RING_VECTORS); | ||
368 | snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d", | ||
369 | ndev->idx, ring); | ||
370 | |||
371 | ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0, | ||
372 | name, &ndev->bh.slc[ring]); | ||
373 | if (ret) { | ||
374 | dev_err(&pdev->dev, "failed to get irq %d for %s\n", | ||
375 | msix_ent[i].vector, name); | ||
376 | return ret; | ||
377 | } | ||
378 | cpu = ring % num_online_cpus(); | ||
379 | irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu)); | ||
380 | |||
381 | set_bit(i, ndev->msix.irqs); | ||
382 | i += NR_RING_VECTORS; | ||
383 | } | ||
384 | |||
385 | /* Request IRQ for NPS_CORE_INT_ACTIVE */ | ||
386 | name = *(ndev->msix.names + i); | ||
387 | snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx); | ||
388 | ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev); | ||
389 | if (ret) { | ||
390 | dev_err(&pdev->dev, "failed to get irq %d for %s\n", | ||
391 | msix_ent[i].vector, name); | ||
392 | return ret; | ||
393 | } | ||
394 | set_bit(i, ndev->msix.irqs); | ||
395 | |||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static void nitrox_disable_msix(struct nitrox_device *ndev) | ||
400 | { | ||
401 | struct msix_entry *msix_ent = ndev->msix.entries; | ||
402 | char **names = ndev->msix.names; | ||
403 | int i = 0, ring, nr_ring_vectors; | ||
404 | |||
405 | nr_ring_vectors = ndev->msix.nr_entries - 1; | ||
406 | |||
407 | /* clear pkt ring irqs */ | ||
408 | while (i < nr_ring_vectors) { | ||
409 | if (test_and_clear_bit(i, ndev->msix.irqs)) { | ||
410 | ring = (i / NR_RING_VECTORS); | ||
411 | irq_set_affinity_hint(msix_ent[i].vector, NULL); | ||
412 | free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]); | ||
413 | } | ||
414 | i += NR_RING_VECTORS; | ||
415 | } | ||
416 | irq_set_affinity_hint(msix_ent[i].vector, NULL); | ||
417 | free_irq(msix_ent[i].vector, ndev); | ||
418 | clear_bit(i, ndev->msix.irqs); | ||
419 | |||
420 | kfree(ndev->msix.entries); | ||
421 | for (i = 0; i < ndev->msix.nr_entries; i++) | ||
422 | kfree(*(names + i)); | ||
423 | |||
424 | kfree(names); | ||
425 | pci_disable_msix(ndev->pdev); | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ | ||
430 | * @ndev: NITROX device | ||
431 | */ | ||
432 | void nitrox_pf_cleanup_isr(struct nitrox_device *ndev) | ||
433 | { | ||
434 | nitrox_disable_msix(ndev); | ||
435 | nitrox_cleanup_pkt_slc_bh(ndev); | ||
436 | } | ||
437 | |||
438 | /** | ||
439 | * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ | ||
440 | * @ndev: NITROX device | ||
441 | * | ||
442 | * Return: 0 on success, a negative value on failure. | ||
443 | */ | ||
444 | int nitrox_pf_init_isr(struct nitrox_device *ndev) | ||
445 | { | ||
446 | int err; | ||
447 | |||
448 | err = nitrox_setup_pkt_slc_bh(ndev); | ||
449 | if (err) | ||
450 | return err; | ||
451 | |||
452 | err = nitrox_enable_msix(ndev); | ||
453 | if (err) | ||
454 | goto msix_fail; | ||
455 | |||
456 | err = nitrox_request_irqs(ndev); | ||
457 | if (err) | ||
458 | goto irq_fail; | ||
459 | |||
460 | return 0; | ||
461 | |||
462 | irq_fail: | ||
463 | nitrox_disable_msix(ndev); | ||
464 | msix_fail: | ||
465 | nitrox_cleanup_pkt_slc_bh(ndev); | ||
466 | return err; | ||
467 | } | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c new file mode 100644 index 000000000000..b4a391adb9b6 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c | |||
@@ -0,0 +1,210 @@ | |||
1 | #include <linux/cpumask.h> | ||
2 | #include <linux/dma-mapping.h> | ||
3 | #include <linux/dmapool.h> | ||
4 | #include <linux/delay.h> | ||
5 | #include <linux/gfp.h> | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/pci_regs.h> | ||
9 | #include <linux/vmalloc.h> | ||
10 | #include <linux/pci.h> | ||
11 | |||
12 | #include "nitrox_dev.h" | ||
13 | #include "nitrox_common.h" | ||
14 | #include "nitrox_req.h" | ||
15 | #include "nitrox_csr.h" | ||
16 | |||
17 | #define CRYPTO_CTX_SIZE 256 | ||
18 | |||
19 | /* command queue alignments */ | ||
20 | #define PKT_IN_ALIGN 16 | ||
21 | |||
22 | static int cmdq_common_init(struct nitrox_cmdq *cmdq) | ||
23 | { | ||
24 | struct nitrox_device *ndev = cmdq->ndev; | ||
25 | u32 qsize; | ||
26 | |||
27 | qsize = (ndev->qlen) * cmdq->instr_size; | ||
28 | cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev), | ||
29 | (qsize + PKT_IN_ALIGN), | ||
30 | &cmdq->dma_unaligned, | ||
31 | GFP_KERNEL); | ||
32 | if (!cmdq->head_unaligned) | ||
33 | return -ENOMEM; | ||
34 | |||
35 | cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); | ||
36 | cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); | ||
37 | cmdq->qsize = (qsize + PKT_IN_ALIGN); | ||
38 | |||
39 | spin_lock_init(&cmdq->response_lock); | ||
40 | spin_lock_init(&cmdq->cmdq_lock); | ||
41 | spin_lock_init(&cmdq->backlog_lock); | ||
42 | |||
43 | INIT_LIST_HEAD(&cmdq->response_head); | ||
44 | INIT_LIST_HEAD(&cmdq->backlog_head); | ||
45 | INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work); | ||
46 | |||
47 | atomic_set(&cmdq->pending_count, 0); | ||
48 | atomic_set(&cmdq->backlog_count, 0); | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq) | ||
53 | { | ||
54 | struct nitrox_device *ndev = cmdq->ndev; | ||
55 | |||
56 | cancel_work_sync(&cmdq->backlog_qflush); | ||
57 | |||
58 | dma_free_coherent(DEV(ndev), cmdq->qsize, | ||
59 | cmdq->head_unaligned, cmdq->dma_unaligned); | ||
60 | |||
61 | atomic_set(&cmdq->pending_count, 0); | ||
62 | atomic_set(&cmdq->backlog_count, 0); | ||
63 | |||
64 | cmdq->dbell_csr_addr = NULL; | ||
65 | cmdq->head = NULL; | ||
66 | cmdq->dma = 0; | ||
67 | cmdq->qsize = 0; | ||
68 | cmdq->instr_size = 0; | ||
69 | } | ||
70 | |||
71 | static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev) | ||
72 | { | ||
73 | int i; | ||
74 | |||
75 | for (i = 0; i < ndev->nr_queues; i++) { | ||
76 | struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i]; | ||
77 | |||
78 | cmdq_common_cleanup(cmdq); | ||
79 | } | ||
80 | kfree(ndev->pkt_cmdqs); | ||
81 | ndev->pkt_cmdqs = NULL; | ||
82 | } | ||
83 | |||
84 | static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev) | ||
85 | { | ||
86 | int i, err, size; | ||
87 | |||
88 | size = ndev->nr_queues * sizeof(struct nitrox_cmdq); | ||
89 | ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL); | ||
90 | if (!ndev->pkt_cmdqs) | ||
91 | return -ENOMEM; | ||
92 | |||
93 | for (i = 0; i < ndev->nr_queues; i++) { | ||
94 | struct nitrox_cmdq *cmdq; | ||
95 | u64 offset; | ||
96 | |||
97 | cmdq = &ndev->pkt_cmdqs[i]; | ||
98 | cmdq->ndev = ndev; | ||
99 | cmdq->qno = i; | ||
100 | cmdq->instr_size = sizeof(struct nps_pkt_instr); | ||
101 | |||
102 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); | ||
103 | /* SE ring doorbell address for this queue */ | ||
104 | cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); | ||
105 | |||
106 | err = cmdq_common_init(cmdq); | ||
107 | if (err) | ||
108 | goto pkt_cmdq_fail; | ||
109 | } | ||
110 | return 0; | ||
111 | |||
112 | pkt_cmdq_fail: | ||
113 | nitrox_cleanup_pkt_cmdqs(ndev); | ||
114 | return err; | ||
115 | } | ||
116 | |||
117 | static int create_crypto_dma_pool(struct nitrox_device *ndev) | ||
118 | { | ||
119 | size_t size; | ||
120 | |||
121 | /* Crypto context pool, 16 byte aligned */ | ||
122 | size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr); | ||
123 | ndev->ctx_pool = dma_pool_create("crypto-context", | ||
124 | DEV(ndev), size, 16, 0); | ||
125 | if (!ndev->ctx_pool) | ||
126 | return -ENOMEM; | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static void destroy_crypto_dma_pool(struct nitrox_device *ndev) | ||
132 | { | ||
133 | if (!ndev->ctx_pool) | ||
134 | return; | ||
135 | |||
136 | dma_pool_destroy(ndev->ctx_pool); | ||
137 | ndev->ctx_pool = NULL; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * crypto_alloc_context - Allocate crypto context from pool | ||
142 | * @ndev: NITROX Device | ||
143 | */ | ||
144 | void *crypto_alloc_context(struct nitrox_device *ndev) | ||
145 | { | ||
146 | struct ctx_hdr *ctx; | ||
147 | void *vaddr; | ||
148 | dma_addr_t dma; | ||
149 | |||
150 | vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_ATOMIC | __GFP_ZERO), &dma); | ||
151 | if (!vaddr) | ||
152 | return NULL; | ||
153 | |||
154 | /* fill meta data */ | ||
155 | ctx = vaddr; | ||
156 | ctx->pool = ndev->ctx_pool; | ||
157 | ctx->dma = dma; | ||
158 | ctx->ctx_dma = dma + sizeof(struct ctx_hdr); | ||
159 | |||
160 | return ((u8 *)vaddr + sizeof(struct ctx_hdr)); | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * crypto_free_context - Free crypto context to pool | ||
165 | * @ctx: context to free | ||
166 | */ | ||
167 | void crypto_free_context(void *ctx) | ||
168 | { | ||
169 | struct ctx_hdr *ctxp; | ||
170 | |||
171 | if (!ctx) | ||
172 | return; | ||
173 | |||
174 | ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr)); | ||
175 | dma_pool_free(ctxp->pool, ctxp, ctxp->dma); | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * nitrox_common_sw_init - allocate software resources. | ||
180 | * @ndev: NITROX device | ||
181 | * | ||
182 | * Allocates crypto context pools and command queues etc. | ||
183 | * | ||
184 | * Return: 0 on success, or a negative error code on error. | ||
185 | */ | ||
186 | int nitrox_common_sw_init(struct nitrox_device *ndev) | ||
187 | { | ||
188 | int err = 0; | ||
189 | |||
190 | /* per device crypto context pool */ | ||
191 | err = create_crypto_dma_pool(ndev); | ||
192 | if (err) | ||
193 | return err; | ||
194 | |||
195 | err = nitrox_init_pkt_cmdqs(ndev); | ||
196 | if (err) | ||
197 | destroy_crypto_dma_pool(ndev); | ||
198 | |||
199 | return err; | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * nitrox_common_sw_cleanup - free software resources. | ||
204 | * @ndev: NITROX device | ||
205 | */ | ||
206 | void nitrox_common_sw_cleanup(struct nitrox_device *ndev) | ||
207 | { | ||
208 | nitrox_cleanup_pkt_cmdqs(ndev); | ||
209 | destroy_crypto_dma_pool(ndev); | ||
210 | } | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c new file mode 100644 index 000000000000..ae44a464cd2d --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c | |||
@@ -0,0 +1,640 @@ | |||
1 | #include <linux/aer.h> | ||
2 | #include <linux/delay.h> | ||
3 | #include <linux/debugfs.h> | ||
4 | #include <linux/firmware.h> | ||
5 | #include <linux/list.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/mutex.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/pci_ids.h> | ||
10 | |||
11 | #include "nitrox_dev.h" | ||
12 | #include "nitrox_common.h" | ||
13 | #include "nitrox_csr.h" | ||
14 | |||
15 | #define CNN55XX_DEV_ID 0x12 | ||
16 | #define MAX_PF_QUEUES 64 | ||
17 | #define UCODE_HLEN 48 | ||
18 | #define SE_GROUP 0 | ||
19 | |||
20 | #define DRIVER_VERSION "1.0" | ||
21 | /* SE microcode */ | ||
22 | #define SE_FW "cnn55xx_se.fw" | ||
23 | |||
24 | static const char nitrox_driver_name[] = "CNN55XX"; | ||
25 | |||
26 | static LIST_HEAD(ndevlist); | ||
27 | static DEFINE_MUTEX(devlist_lock); | ||
28 | static unsigned int num_devices; | ||
29 | |||
30 | /** | ||
31 | * nitrox_pci_tbl - PCI Device ID Table | ||
32 | */ | ||
33 | static const struct pci_device_id nitrox_pci_tbl[] = { | ||
34 | {PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0}, | ||
35 | /* required last entry */ | ||
36 | {0, } | ||
37 | }; | ||
38 | MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl); | ||
39 | |||
40 | static unsigned int qlen = DEFAULT_CMD_QLEN; | ||
41 | module_param(qlen, uint, 0644); | ||
42 | MODULE_PARM_DESC(qlen, "Command queue length - default 2048"); | ||
43 | |||
44 | /** | ||
45 | * struct ucode - Firmware Header | ||
46 | * @id: microcode ID | ||
47 | * @version: firmware version | ||
48 | * @code_size: code section size | ||
49 | * @raz: alignment | ||
50 | * @code: code section | ||
51 | */ | ||
52 | struct ucode { | ||
53 | u8 id; | ||
54 | char version[VERSION_LEN - 1]; | ||
55 | __be32 code_size; | ||
56 | u8 raz[12]; | ||
57 | u64 code[0]; | ||
58 | }; | ||
59 | |||
60 | /** | ||
61 | * write_to_ucd_unit - Write Firmware to NITROX UCD unit | ||
62 | */ | ||
63 | static void write_to_ucd_unit(struct nitrox_device *ndev, | ||
64 | struct ucode *ucode) | ||
65 | { | ||
66 | u32 code_size = be32_to_cpu(ucode->code_size) * 2; | ||
67 | u64 offset, data; | ||
68 | int i = 0; | ||
69 | |||
70 | /* | ||
71 | * UCD structure | ||
72 | * | ||
73 | * ------------- | ||
74 | * | BLK 7 | | ||
75 | * ------------- | ||
76 | * | BLK 6 | | ||
77 | * ------------- | ||
78 | * | ... | | ||
79 | * ------------- | ||
80 | * | BLK 0 | | ||
81 | * ------------- | ||
82 | * Total of 8 blocks, each size 32KB | ||
83 | */ | ||
84 | |||
85 | /* set the block number */ | ||
86 | offset = UCD_UCODE_LOAD_BLOCK_NUM; | ||
87 | nitrox_write_csr(ndev, offset, 0); | ||
88 | |||
89 | code_size = roundup(code_size, 8); | ||
90 | while (code_size) { | ||
91 | data = ucode->code[i]; | ||
92 | /* write 8 bytes at a time */ | ||
93 | offset = UCD_UCODE_LOAD_IDX_DATAX(i); | ||
94 | nitrox_write_csr(ndev, offset, data); | ||
95 | code_size -= 8; | ||
96 | i++; | ||
97 | } | ||
98 | |||
99 | /* put all SE cores in group 0 */ | ||
100 | offset = POM_GRP_EXECMASKX(SE_GROUP); | ||
101 | nitrox_write_csr(ndev, offset, (~0ULL)); | ||
102 | |||
103 | for (i = 0; i < ndev->hw.se_cores; i++) { | ||
104 | /* | ||
105 | * write block number and firware length | ||
106 | * bit:<2:0> block number | ||
107 | * bit:3 is set SE uses 32KB microcode | ||
108 | * bit:3 is clear SE uses 64KB microcode | ||
109 | */ | ||
110 | offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i); | ||
111 | nitrox_write_csr(ndev, offset, 0x8); | ||
112 | } | ||
113 | usleep_range(300, 400); | ||
114 | } | ||
115 | |||
116 | static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name) | ||
117 | { | ||
118 | const struct firmware *fw; | ||
119 | struct ucode *ucode; | ||
120 | int ret; | ||
121 | |||
122 | dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); | ||
123 | |||
124 | ret = request_firmware(&fw, fw_name, DEV(ndev)); | ||
125 | if (ret < 0) { | ||
126 | dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | ucode = (struct ucode *)fw->data; | ||
131 | /* copy the firmware version */ | ||
132 | memcpy(ndev->hw.fw_name, ucode->version, (VERSION_LEN - 2)); | ||
133 | ndev->hw.fw_name[VERSION_LEN - 1] = '\0'; | ||
134 | |||
135 | write_to_ucd_unit(ndev, ucode); | ||
136 | release_firmware(fw); | ||
137 | |||
138 | set_bit(NITROX_UCODE_LOADED, &ndev->status); | ||
139 | /* barrier to sync with other cpus */ | ||
140 | smp_mb__after_atomic(); | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * nitrox_add_to_devlist - add NITROX device to global device list | ||
146 | * @ndev: NITROX device | ||
147 | */ | ||
148 | static int nitrox_add_to_devlist(struct nitrox_device *ndev) | ||
149 | { | ||
150 | struct nitrox_device *dev; | ||
151 | int ret = 0; | ||
152 | |||
153 | INIT_LIST_HEAD(&ndev->list); | ||
154 | refcount_set(&ndev->refcnt, 1); | ||
155 | |||
156 | mutex_lock(&devlist_lock); | ||
157 | list_for_each_entry(dev, &ndevlist, list) { | ||
158 | if (dev == ndev) { | ||
159 | ret = -EEXIST; | ||
160 | goto unlock; | ||
161 | } | ||
162 | } | ||
163 | ndev->idx = num_devices++; | ||
164 | list_add_tail(&ndev->list, &ndevlist); | ||
165 | unlock: | ||
166 | mutex_unlock(&devlist_lock); | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * nitrox_remove_from_devlist - remove NITROX device from | ||
172 | * global device list | ||
173 | * @ndev: NITROX device | ||
174 | */ | ||
175 | static void nitrox_remove_from_devlist(struct nitrox_device *ndev) | ||
176 | { | ||
177 | mutex_lock(&devlist_lock); | ||
178 | list_del(&ndev->list); | ||
179 | num_devices--; | ||
180 | mutex_unlock(&devlist_lock); | ||
181 | } | ||
182 | |||
183 | struct nitrox_device *nitrox_get_first_device(void) | ||
184 | { | ||
185 | struct nitrox_device *ndev = NULL; | ||
186 | |||
187 | mutex_lock(&devlist_lock); | ||
188 | list_for_each_entry(ndev, &ndevlist, list) { | ||
189 | if (nitrox_ready(ndev)) | ||
190 | break; | ||
191 | } | ||
192 | mutex_unlock(&devlist_lock); | ||
193 | if (!ndev) | ||
194 | return NULL; | ||
195 | |||
196 | refcount_inc(&ndev->refcnt); | ||
197 | /* barrier to sync with other cpus */ | ||
198 | smp_mb__after_atomic(); | ||
199 | return ndev; | ||
200 | } | ||
201 | |||
202 | void nitrox_put_device(struct nitrox_device *ndev) | ||
203 | { | ||
204 | if (!ndev) | ||
205 | return; | ||
206 | |||
207 | refcount_dec(&ndev->refcnt); | ||
208 | /* barrier to sync with other cpus */ | ||
209 | smp_mb__after_atomic(); | ||
210 | } | ||
211 | |||
212 | static int nitrox_reset_device(struct pci_dev *pdev) | ||
213 | { | ||
214 | int pos = 0; | ||
215 | |||
216 | pos = pci_save_state(pdev); | ||
217 | if (pos) { | ||
218 | dev_err(&pdev->dev, "Failed to save pci state\n"); | ||
219 | return -ENOMEM; | ||
220 | } | ||
221 | |||
222 | pos = pci_pcie_cap(pdev); | ||
223 | if (!pos) | ||
224 | return -ENOTTY; | ||
225 | |||
226 | if (!pci_wait_for_pending_transaction(pdev)) | ||
227 | dev_err(&pdev->dev, "waiting for pending transaction\n"); | ||
228 | |||
229 | pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); | ||
230 | msleep(100); | ||
231 | pci_restore_state(pdev); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | static int nitrox_pf_sw_init(struct nitrox_device *ndev) | ||
237 | { | ||
238 | int err; | ||
239 | |||
240 | err = nitrox_common_sw_init(ndev); | ||
241 | if (err) | ||
242 | return err; | ||
243 | |||
244 | err = nitrox_pf_init_isr(ndev); | ||
245 | if (err) | ||
246 | nitrox_common_sw_cleanup(ndev); | ||
247 | |||
248 | return err; | ||
249 | } | ||
250 | |||
251 | static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev) | ||
252 | { | ||
253 | nitrox_pf_cleanup_isr(ndev); | ||
254 | nitrox_common_sw_cleanup(ndev); | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * nitrox_bist_check - Check NITORX BIST registers status | ||
259 | * @ndev: NITROX device | ||
260 | */ | ||
261 | static int nitrox_bist_check(struct nitrox_device *ndev) | ||
262 | { | ||
263 | u64 value = 0; | ||
264 | int i; | ||
265 | |||
266 | for (i = 0; i < NR_CLUSTERS; i++) { | ||
267 | value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i)); | ||
268 | value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i)); | ||
269 | } | ||
270 | value += nitrox_read_csr(ndev, UCD_BIST_STATUS); | ||
271 | value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG); | ||
272 | value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG); | ||
273 | value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG); | ||
274 | value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG); | ||
275 | value += nitrox_read_csr(ndev, POM_BIST_REG); | ||
276 | value += nitrox_read_csr(ndev, BMI_BIST_REG); | ||
277 | value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT); | ||
278 | value += nitrox_read_csr(ndev, BMO_BIST_REG); | ||
279 | value += nitrox_read_csr(ndev, LBC_BIST_STATUS); | ||
280 | value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0)); | ||
281 | if (value) | ||
282 | return -EIO; | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static void nitrox_get_hwinfo(struct nitrox_device *ndev) | ||
287 | { | ||
288 | union emu_fuse_map emu_fuse; | ||
289 | u64 offset; | ||
290 | int i; | ||
291 | |||
292 | for (i = 0; i < NR_CLUSTERS; i++) { | ||
293 | u8 dead_cores; | ||
294 | |||
295 | offset = EMU_FUSE_MAPX(i); | ||
296 | emu_fuse.value = nitrox_read_csr(ndev, offset); | ||
297 | if (emu_fuse.s.valid) { | ||
298 | dead_cores = hweight32(emu_fuse.s.ae_fuse); | ||
299 | ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores; | ||
300 | dead_cores = hweight16(emu_fuse.s.se_fuse); | ||
301 | ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores; | ||
302 | } | ||
303 | } | ||
304 | } | ||
305 | |||
306 | static int nitrox_pf_hw_init(struct nitrox_device *ndev) | ||
307 | { | ||
308 | int err; | ||
309 | |||
310 | err = nitrox_bist_check(ndev); | ||
311 | if (err) { | ||
312 | dev_err(&ndev->pdev->dev, "BIST check failed\n"); | ||
313 | return err; | ||
314 | } | ||
315 | /* get cores information */ | ||
316 | nitrox_get_hwinfo(ndev); | ||
317 | |||
318 | nitrox_config_nps_unit(ndev); | ||
319 | nitrox_config_pom_unit(ndev); | ||
320 | nitrox_config_efl_unit(ndev); | ||
321 | /* configure IO units */ | ||
322 | nitrox_config_bmi_unit(ndev); | ||
323 | nitrox_config_bmo_unit(ndev); | ||
324 | /* configure Local Buffer Cache */ | ||
325 | nitrox_config_lbc_unit(ndev); | ||
326 | nitrox_config_rand_unit(ndev); | ||
327 | |||
328 | /* load firmware on SE cores */ | ||
329 | err = nitrox_load_fw(ndev, SE_FW); | ||
330 | if (err) | ||
331 | return err; | ||
332 | |||
333 | nitrox_config_emu_unit(ndev); | ||
334 | |||
335 | return 0; | ||
336 | } | ||
337 | |||
338 | #if IS_ENABLED(CONFIG_DEBUG_FS) | ||
339 | static int registers_show(struct seq_file *s, void *v) | ||
340 | { | ||
341 | struct nitrox_device *ndev = s->private; | ||
342 | u64 offset; | ||
343 | |||
344 | /* NPS DMA stats */ | ||
345 | offset = NPS_STATS_PKT_DMA_RD_CNT; | ||
346 | seq_printf(s, "NPS_STATS_PKT_DMA_RD_CNT 0x%016llx\n", | ||
347 | nitrox_read_csr(ndev, offset)); | ||
348 | offset = NPS_STATS_PKT_DMA_WR_CNT; | ||
349 | seq_printf(s, "NPS_STATS_PKT_DMA_WR_CNT 0x%016llx\n", | ||
350 | nitrox_read_csr(ndev, offset)); | ||
351 | |||
352 | /* BMI/BMO stats */ | ||
353 | offset = BMI_NPS_PKT_CNT; | ||
354 | seq_printf(s, "BMI_NPS_PKT_CNT 0x%016llx\n", | ||
355 | nitrox_read_csr(ndev, offset)); | ||
356 | offset = BMO_NPS_SLC_PKT_CNT; | ||
357 | seq_printf(s, "BMO_NPS_PKT_CNT 0x%016llx\n", | ||
358 | nitrox_read_csr(ndev, offset)); | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static int registers_open(struct inode *inode, struct file *file) | ||
364 | { | ||
365 | return single_open(file, registers_show, inode->i_private); | ||
366 | } | ||
367 | |||
368 | static const struct file_operations register_fops = { | ||
369 | .owner = THIS_MODULE, | ||
370 | .open = registers_open, | ||
371 | .read = seq_read, | ||
372 | .llseek = seq_lseek, | ||
373 | .release = single_release, | ||
374 | }; | ||
375 | |||
376 | static int firmware_show(struct seq_file *s, void *v) | ||
377 | { | ||
378 | struct nitrox_device *ndev = s->private; | ||
379 | |||
380 | seq_printf(s, "Version: %s\n", ndev->hw.fw_name); | ||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | static int firmware_open(struct inode *inode, struct file *file) | ||
385 | { | ||
386 | return single_open(file, firmware_show, inode->i_private); | ||
387 | } | ||
388 | |||
389 | static const struct file_operations firmware_fops = { | ||
390 | .owner = THIS_MODULE, | ||
391 | .open = firmware_open, | ||
392 | .read = seq_read, | ||
393 | .llseek = seq_lseek, | ||
394 | .release = single_release, | ||
395 | }; | ||
396 | |||
397 | static int nitrox_show(struct seq_file *s, void *v) | ||
398 | { | ||
399 | struct nitrox_device *ndev = s->private; | ||
400 | |||
401 | seq_printf(s, "NITROX-5 [idx: %d]\n", ndev->idx); | ||
402 | seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id); | ||
403 | seq_printf(s, " Cores [AE: %u SE: %u]\n", | ||
404 | ndev->hw.ae_cores, ndev->hw.se_cores); | ||
405 | seq_printf(s, " Number of Queues: %u\n", ndev->nr_queues); | ||
406 | seq_printf(s, " Queue length: %u\n", ndev->qlen); | ||
407 | seq_printf(s, " Node: %u\n", ndev->node); | ||
408 | |||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | static int nitrox_open(struct inode *inode, struct file *file) | ||
413 | { | ||
414 | return single_open(file, nitrox_show, inode->i_private); | ||
415 | } | ||
416 | |||
417 | static const struct file_operations nitrox_fops = { | ||
418 | .owner = THIS_MODULE, | ||
419 | .open = nitrox_open, | ||
420 | .read = seq_read, | ||
421 | .llseek = seq_lseek, | ||
422 | .release = single_release, | ||
423 | }; | ||
424 | |||
425 | static void nitrox_debugfs_exit(struct nitrox_device *ndev) | ||
426 | { | ||
427 | debugfs_remove_recursive(ndev->debugfs_dir); | ||
428 | ndev->debugfs_dir = NULL; | ||
429 | } | ||
430 | |||
431 | static int nitrox_debugfs_init(struct nitrox_device *ndev) | ||
432 | { | ||
433 | struct dentry *dir, *f; | ||
434 | |||
435 | dir = debugfs_create_dir(KBUILD_MODNAME, NULL); | ||
436 | if (!dir) | ||
437 | return -ENOMEM; | ||
438 | |||
439 | ndev->debugfs_dir = dir; | ||
440 | f = debugfs_create_file("counters", 0400, dir, ndev, ®ister_fops); | ||
441 | if (!f) | ||
442 | goto err; | ||
443 | f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops); | ||
444 | if (!f) | ||
445 | goto err; | ||
446 | f = debugfs_create_file("nitrox", 0400, dir, ndev, &nitrox_fops); | ||
447 | if (!f) | ||
448 | goto err; | ||
449 | |||
450 | return 0; | ||
451 | |||
452 | err: | ||
453 | nitrox_debugfs_exit(ndev); | ||
454 | return -ENODEV; | ||
455 | } | ||
456 | #else | ||
457 | static int nitrox_debugfs_init(struct nitrox_device *ndev) | ||
458 | { | ||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | static void nitrox_debugfs_exit(struct nitrox_device *ndev) | ||
463 | { | ||
464 | } | ||
465 | #endif | ||
466 | |||
467 | /** | ||
468 | * nitrox_probe - NITROX Initialization function. | ||
469 | * @pdev: PCI device information struct | ||
470 | * @id: entry in nitrox_pci_tbl | ||
471 | * | ||
472 | * Return: 0, if the driver is bound to the device, or | ||
473 | * a negative error if there is failure. | ||
474 | */ | ||
475 | static int nitrox_probe(struct pci_dev *pdev, | ||
476 | const struct pci_device_id *id) | ||
477 | { | ||
478 | struct nitrox_device *ndev; | ||
479 | int err; | ||
480 | |||
481 | dev_info_once(&pdev->dev, "%s driver version %s\n", | ||
482 | nitrox_driver_name, DRIVER_VERSION); | ||
483 | |||
484 | err = pci_enable_device_mem(pdev); | ||
485 | if (err) | ||
486 | return err; | ||
487 | |||
488 | /* do FLR */ | ||
489 | err = nitrox_reset_device(pdev); | ||
490 | if (err) { | ||
491 | dev_err(&pdev->dev, "FLR failed\n"); | ||
492 | pci_disable_device(pdev); | ||
493 | return err; | ||
494 | } | ||
495 | |||
496 | if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { | ||
497 | dev_dbg(&pdev->dev, "DMA to 64-BIT address\n"); | ||
498 | } else { | ||
499 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | ||
500 | if (err) { | ||
501 | dev_err(&pdev->dev, "DMA configuration failed\n"); | ||
502 | pci_disable_device(pdev); | ||
503 | return err; | ||
504 | } | ||
505 | } | ||
506 | |||
507 | err = pci_request_mem_regions(pdev, nitrox_driver_name); | ||
508 | if (err) { | ||
509 | pci_disable_device(pdev); | ||
510 | return err; | ||
511 | } | ||
512 | pci_set_master(pdev); | ||
513 | |||
514 | ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); | ||
515 | if (!ndev) | ||
516 | goto ndev_fail; | ||
517 | |||
518 | pci_set_drvdata(pdev, ndev); | ||
519 | ndev->pdev = pdev; | ||
520 | |||
521 | /* add to device list */ | ||
522 | nitrox_add_to_devlist(ndev); | ||
523 | |||
524 | ndev->hw.vendor_id = pdev->vendor; | ||
525 | ndev->hw.device_id = pdev->device; | ||
526 | ndev->hw.revision_id = pdev->revision; | ||
527 | /* command timeout in jiffies */ | ||
528 | ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT); | ||
529 | ndev->node = dev_to_node(&pdev->dev); | ||
530 | if (ndev->node == NUMA_NO_NODE) | ||
531 | ndev->node = 0; | ||
532 | |||
533 | ndev->bar_addr = ioremap(pci_resource_start(pdev, 0), | ||
534 | pci_resource_len(pdev, 0)); | ||
535 | if (!ndev->bar_addr) { | ||
536 | err = -EIO; | ||
537 | goto ioremap_err; | ||
538 | } | ||
539 | /* allocate command queus based on cpus, max queues are 64 */ | ||
540 | ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus()); | ||
541 | ndev->qlen = qlen; | ||
542 | |||
543 | err = nitrox_pf_sw_init(ndev); | ||
544 | if (err) | ||
545 | goto ioremap_err; | ||
546 | |||
547 | err = nitrox_pf_hw_init(ndev); | ||
548 | if (err) | ||
549 | goto pf_hw_fail; | ||
550 | |||
551 | err = nitrox_debugfs_init(ndev); | ||
552 | if (err) | ||
553 | goto pf_hw_fail; | ||
554 | |||
555 | set_bit(NITROX_READY, &ndev->status); | ||
556 | /* barrier to sync with other cpus */ | ||
557 | smp_mb__after_atomic(); | ||
558 | |||
559 | err = nitrox_crypto_register(); | ||
560 | if (err) | ||
561 | goto crypto_fail; | ||
562 | |||
563 | return 0; | ||
564 | |||
565 | crypto_fail: | ||
566 | nitrox_debugfs_exit(ndev); | ||
567 | clear_bit(NITROX_READY, &ndev->status); | ||
568 | /* barrier to sync with other cpus */ | ||
569 | smp_mb__after_atomic(); | ||
570 | pf_hw_fail: | ||
571 | nitrox_pf_sw_cleanup(ndev); | ||
572 | ioremap_err: | ||
573 | nitrox_remove_from_devlist(ndev); | ||
574 | kfree(ndev); | ||
575 | pci_set_drvdata(pdev, NULL); | ||
576 | ndev_fail: | ||
577 | pci_release_mem_regions(pdev); | ||
578 | pci_disable_device(pdev); | ||
579 | return err; | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * nitrox_remove - Unbind the driver from the device. | ||
584 | * @pdev: PCI device information struct | ||
585 | */ | ||
586 | static void nitrox_remove(struct pci_dev *pdev) | ||
587 | { | ||
588 | struct nitrox_device *ndev = pci_get_drvdata(pdev); | ||
589 | |||
590 | if (!ndev) | ||
591 | return; | ||
592 | |||
593 | if (!refcount_dec_and_test(&ndev->refcnt)) { | ||
594 | dev_err(DEV(ndev), "Device refcnt not zero (%d)\n", | ||
595 | refcount_read(&ndev->refcnt)); | ||
596 | return; | ||
597 | } | ||
598 | |||
599 | dev_info(DEV(ndev), "Removing Device %x:%x\n", | ||
600 | ndev->hw.vendor_id, ndev->hw.device_id); | ||
601 | |||
602 | clear_bit(NITROX_READY, &ndev->status); | ||
603 | /* barrier to sync with other cpus */ | ||
604 | smp_mb__after_atomic(); | ||
605 | |||
606 | nitrox_remove_from_devlist(ndev); | ||
607 | nitrox_crypto_unregister(); | ||
608 | nitrox_debugfs_exit(ndev); | ||
609 | nitrox_pf_sw_cleanup(ndev); | ||
610 | |||
611 | iounmap(ndev->bar_addr); | ||
612 | kfree(ndev); | ||
613 | |||
614 | pci_set_drvdata(pdev, NULL); | ||
615 | pci_release_mem_regions(pdev); | ||
616 | pci_disable_device(pdev); | ||
617 | } | ||
618 | |||
619 | static void nitrox_shutdown(struct pci_dev *pdev) | ||
620 | { | ||
621 | pci_set_drvdata(pdev, NULL); | ||
622 | pci_release_mem_regions(pdev); | ||
623 | pci_disable_device(pdev); | ||
624 | } | ||
625 | |||
626 | static struct pci_driver nitrox_driver = { | ||
627 | .name = nitrox_driver_name, | ||
628 | .id_table = nitrox_pci_tbl, | ||
629 | .probe = nitrox_probe, | ||
630 | .remove = nitrox_remove, | ||
631 | .shutdown = nitrox_shutdown, | ||
632 | }; | ||
633 | |||
634 | module_pci_driver(nitrox_driver); | ||
635 | |||
636 | MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>"); | ||
637 | MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " "); | ||
638 | MODULE_LICENSE("GPL"); | ||
639 | MODULE_VERSION(DRIVER_VERSION); | ||
640 | MODULE_FIRMWARE(SE_FW); | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h new file mode 100644 index 000000000000..74f4c20dc87d --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_req.h | |||
@@ -0,0 +1,445 @@ | |||
1 | #ifndef __NITROX_REQ_H | ||
2 | #define __NITROX_REQ_H | ||
3 | |||
4 | #include <linux/dma-mapping.h> | ||
5 | #include <crypto/aes.h> | ||
6 | |||
7 | #include "nitrox_dev.h" | ||
8 | |||
9 | /** | ||
10 | * struct gphdr - General purpose Header | ||
11 | * @param0: first parameter. | ||
12 | * @param1: second parameter. | ||
13 | * @param2: third parameter. | ||
14 | * @param3: fourth parameter. | ||
15 | * | ||
16 | * Params tell the iv and enc/dec data offsets. | ||
17 | */ | ||
18 | struct gphdr { | ||
19 | __be16 param0; | ||
20 | __be16 param1; | ||
21 | __be16 param2; | ||
22 | __be16 param3; | ||
23 | }; | ||
24 | |||
25 | /** | ||
26 | * struct se_req_ctrl - SE request information. | ||
27 | * @arg: Minor number of the opcode | ||
28 | * @ctxc: Context control. | ||
29 | * @unca: Uncertainity enabled. | ||
30 | * @info: Additional information for SE cores. | ||
31 | * @ctxl: Context length in bytes. | ||
32 | * @uddl: User defined data length | ||
33 | */ | ||
34 | union se_req_ctrl { | ||
35 | u64 value; | ||
36 | struct { | ||
37 | u64 raz : 22; | ||
38 | u64 arg : 8; | ||
39 | u64 ctxc : 2; | ||
40 | u64 unca : 1; | ||
41 | u64 info : 3; | ||
42 | u64 unc : 8; | ||
43 | u64 ctxl : 12; | ||
44 | u64 uddl : 8; | ||
45 | } s; | ||
46 | }; | ||
47 | |||
48 | struct nitrox_sglist { | ||
49 | u16 len; | ||
50 | u16 raz0; | ||
51 | u32 raz1; | ||
52 | dma_addr_t dma; | ||
53 | }; | ||
54 | |||
55 | #define MAX_IV_LEN 16 | ||
56 | |||
57 | /** | ||
58 | * struct se_crypto_request - SE crypto request structure. | ||
59 | * @opcode: Request opcode (enc/dec) | ||
60 | * @flags: flags from crypto subsystem | ||
61 | * @ctx_handle: Crypto context handle. | ||
62 | * @gph: GP Header | ||
63 | * @ctrl: Request Information. | ||
64 | * @in: Input sglist | ||
65 | * @out: Output sglist | ||
66 | */ | ||
67 | struct se_crypto_request { | ||
68 | u8 opcode; | ||
69 | gfp_t gfp; | ||
70 | u32 flags; | ||
71 | u64 ctx_handle; | ||
72 | |||
73 | struct gphdr gph; | ||
74 | union se_req_ctrl ctrl; | ||
75 | |||
76 | u8 iv[MAX_IV_LEN]; | ||
77 | u16 ivsize; | ||
78 | |||
79 | struct scatterlist *src; | ||
80 | struct scatterlist *dst; | ||
81 | }; | ||
82 | |||
83 | /* Crypto opcodes */ | ||
84 | #define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33 | ||
85 | #define ENCRYPT 0 | ||
86 | #define DECRYPT 1 | ||
87 | |||
88 | /* IV from context */ | ||
89 | #define IV_FROM_CTX 0 | ||
90 | /* IV from Input data */ | ||
91 | #define IV_FROM_DPTR 1 | ||
92 | |||
93 | /** | ||
94 | * cipher opcodes for firmware | ||
95 | */ | ||
96 | enum flexi_cipher { | ||
97 | CIPHER_NULL = 0, | ||
98 | CIPHER_3DES_CBC, | ||
99 | CIPHER_3DES_ECB, | ||
100 | CIPHER_AES_CBC, | ||
101 | CIPHER_AES_ECB, | ||
102 | CIPHER_AES_CFB, | ||
103 | CIPHER_AES_CTR, | ||
104 | CIPHER_AES_GCM, | ||
105 | CIPHER_AES_XTS, | ||
106 | CIPHER_AES_CCM, | ||
107 | CIPHER_AES_CBC_CTS, | ||
108 | CIPHER_AES_ECB_CTS, | ||
109 | CIPHER_INVALID | ||
110 | }; | ||
111 | |||
112 | /** | ||
113 | * struct crypto_keys - Crypto keys | ||
114 | * @key: Encryption key or KEY1 for AES-XTS | ||
115 | * @iv: Encryption IV or Tweak for AES-XTS | ||
116 | */ | ||
117 | struct crypto_keys { | ||
118 | union { | ||
119 | u8 key[AES_MAX_KEY_SIZE]; | ||
120 | u8 key1[AES_MAX_KEY_SIZE]; | ||
121 | } u; | ||
122 | u8 iv[AES_BLOCK_SIZE]; | ||
123 | }; | ||
124 | |||
125 | /** | ||
126 | * struct auth_keys - Authentication keys | ||
127 | * @ipad: IPAD or KEY2 for AES-XTS | ||
128 | * @opad: OPAD or AUTH KEY if auth_input_type = 1 | ||
129 | */ | ||
130 | struct auth_keys { | ||
131 | union { | ||
132 | u8 ipad[64]; | ||
133 | u8 key2[64]; | ||
134 | } u; | ||
135 | u8 opad[64]; | ||
136 | }; | ||
137 | |||
138 | /** | ||
139 | * struct flexi_crypto_context - Crypto context | ||
140 | * @cipher_type: Encryption cipher type | ||
141 | * @aes_keylen: AES key length | ||
142 | * @iv_source: Encryption IV source | ||
143 | * @hash_type: Authentication type | ||
144 | * @auth_input_type: Authentication input type | ||
145 | * 1 - Authentication IV and KEY, microcode calculates OPAD/IPAD | ||
146 | * 0 - Authentication OPAD/IPAD | ||
147 | * @mac_len: mac length | ||
148 | * @crypto: Crypto keys | ||
149 | * @auth: Authentication keys | ||
150 | */ | ||
151 | struct flexi_crypto_context { | ||
152 | union { | ||
153 | __be64 flags; | ||
154 | struct { | ||
155 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
156 | u64 cipher_type : 4; | ||
157 | u64 reserved_59 : 1; | ||
158 | u64 aes_keylen : 2; | ||
159 | u64 iv_source : 1; | ||
160 | u64 hash_type : 4; | ||
161 | u64 reserved_49_51 : 3; | ||
162 | u64 auth_input_type: 1; | ||
163 | u64 mac_len : 8; | ||
164 | u64 reserved_0_39 : 40; | ||
165 | #else | ||
166 | u64 reserved_0_39 : 40; | ||
167 | u64 mac_len : 8; | ||
168 | u64 auth_input_type: 1; | ||
169 | u64 reserved_49_51 : 3; | ||
170 | u64 hash_type : 4; | ||
171 | u64 iv_source : 1; | ||
172 | u64 aes_keylen : 2; | ||
173 | u64 reserved_59 : 1; | ||
174 | u64 cipher_type : 4; | ||
175 | #endif | ||
176 | } w0; | ||
177 | }; | ||
178 | |||
179 | struct crypto_keys crypto; | ||
180 | struct auth_keys auth; | ||
181 | }; | ||
182 | |||
183 | struct nitrox_crypto_ctx { | ||
184 | struct nitrox_device *ndev; | ||
185 | union { | ||
186 | u64 ctx_handle; | ||
187 | struct flexi_crypto_context *fctx; | ||
188 | } u; | ||
189 | }; | ||
190 | |||
191 | struct nitrox_kcrypt_request { | ||
192 | struct se_crypto_request creq; | ||
193 | struct nitrox_crypto_ctx *nctx; | ||
194 | struct skcipher_request *skreq; | ||
195 | }; | ||
196 | |||
197 | /** | ||
198 | * struct pkt_instr_hdr - Packet Instruction Header | ||
199 | * @g: Gather used | ||
200 | * When [G] is set and [GSZ] != 0, the instruction is | ||
201 | * indirect gather instruction. | ||
202 | * When [G] is set and [GSZ] = 0, the instruction is | ||
203 | * direct gather instruction. | ||
204 | * @gsz: Number of pointers in the indirect gather list | ||
205 | * @ihi: When set hardware duplicates the 1st 8 bytes of pkt_instr_hdr | ||
206 | * and adds them to the packet after the pkt_instr_hdr but before any UDD | ||
207 | * @ssz: Not used by the input hardware. But can become slc_store_int[SSZ] | ||
208 | * when [IHI] is set. | ||
209 | * @fsz: The number of front data bytes directly included in the | ||
210 | * PCIe instruction. | ||
211 | * @tlen: The length of the input packet in bytes, include: | ||
212 | * - 16B pkt_hdr | ||
213 | * - Inline context bytes if any, | ||
214 | * - UDD if any, | ||
215 | * - packet payload bytes | ||
216 | */ | ||
217 | union pkt_instr_hdr { | ||
218 | u64 value; | ||
219 | struct { | ||
220 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
221 | u64 raz_48_63 : 16; | ||
222 | u64 g : 1; | ||
223 | u64 gsz : 7; | ||
224 | u64 ihi : 1; | ||
225 | u64 ssz : 7; | ||
226 | u64 raz_30_31 : 2; | ||
227 | u64 fsz : 6; | ||
228 | u64 raz_16_23 : 8; | ||
229 | u64 tlen : 16; | ||
230 | #else | ||
231 | u64 tlen : 16; | ||
232 | u64 raz_16_23 : 8; | ||
233 | u64 fsz : 6; | ||
234 | u64 raz_30_31 : 2; | ||
235 | u64 ssz : 7; | ||
236 | u64 ihi : 1; | ||
237 | u64 gsz : 7; | ||
238 | u64 g : 1; | ||
239 | u64 raz_48_63 : 16; | ||
240 | #endif | ||
241 | } s; | ||
242 | }; | ||
243 | |||
244 | /** | ||
245 | * struct pkt_hdr - Packet Input Header | ||
246 | * @opcode: Request opcode (Major) | ||
247 | * @arg: Request opcode (Minor) | ||
248 | * @ctxc: Context control. | ||
249 | * @unca: When set [UNC] is the uncertainty count for an input packet. | ||
250 | * The hardware uses uncertainty counts to predict | ||
251 | * output buffer use and avoid deadlock. | ||
252 | * @info: Not used by input hardware. Available for use | ||
253 | * during SE processing. | ||
254 | * @destport: The expected destination port/ring/channel for the packet. | ||
255 | * @unc: Uncertainty count for an input packet. | ||
256 | * @grp: SE group that will process the input packet. | ||
257 | * @ctxl: Context Length in 64-bit words. | ||
258 | * @uddl: User-defined data (UDD) length in bytes. | ||
259 | * @ctxp: Context pointer. CTXP<63,2:0> must be zero in all cases. | ||
260 | */ | ||
261 | union pkt_hdr { | ||
262 | u64 value[2]; | ||
263 | struct { | ||
264 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
265 | u64 opcode : 8; | ||
266 | u64 arg : 8; | ||
267 | u64 ctxc : 2; | ||
268 | u64 unca : 1; | ||
269 | u64 raz_44 : 1; | ||
270 | u64 info : 3; | ||
271 | u64 destport : 9; | ||
272 | u64 unc : 8; | ||
273 | u64 raz_19_23 : 5; | ||
274 | u64 grp : 3; | ||
275 | u64 raz_15 : 1; | ||
276 | u64 ctxl : 7; | ||
277 | u64 uddl : 8; | ||
278 | #else | ||
279 | u64 uddl : 8; | ||
280 | u64 ctxl : 7; | ||
281 | u64 raz_15 : 1; | ||
282 | u64 grp : 3; | ||
283 | u64 raz_19_23 : 5; | ||
284 | u64 unc : 8; | ||
285 | u64 destport : 9; | ||
286 | u64 info : 3; | ||
287 | u64 raz_44 : 1; | ||
288 | u64 unca : 1; | ||
289 | u64 ctxc : 2; | ||
290 | u64 arg : 8; | ||
291 | u64 opcode : 8; | ||
292 | #endif | ||
293 | __be64 ctxp; | ||
294 | } s; | ||
295 | }; | ||
296 | |||
297 | /** | ||
298 | * struct slc_store_info - Solicited Paceket Output Store Information. | ||
299 | * @ssz: The number of scatterlist pointers for the solicited output port | ||
300 | * packet. | ||
301 | * @rptr: The result pointer for the solicited output port packet. | ||
302 | * If [SSZ]=0, [RPTR] must point directly to a buffer on the remote | ||
303 | * host that is large enough to hold the entire output packet. | ||
304 | * If [SSZ]!=0, [RPTR] must point to an array of ([SSZ]+3)/4 | ||
305 | * sglist components at [RPTR] on the remote host. | ||
306 | */ | ||
307 | union slc_store_info { | ||
308 | u64 value[2]; | ||
309 | struct { | ||
310 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
311 | u64 raz_39_63 : 25; | ||
312 | u64 ssz : 7; | ||
313 | u64 raz_0_31 : 32; | ||
314 | #else | ||
315 | u64 raz_0_31 : 32; | ||
316 | u64 ssz : 7; | ||
317 | u64 raz_39_63 : 25; | ||
318 | #endif | ||
319 | __be64 rptr; | ||
320 | } s; | ||
321 | }; | ||
322 | |||
323 | /** | ||
324 | * struct nps_pkt_instr - NPS Packet Instruction of SE cores. | ||
325 | * @dptr0 : Input pointer points to buffer in remote host. | ||
326 | * @ih: Packet Instruction Header (8 bytes) | ||
327 | * @irh: Packet Input Header (16 bytes) | ||
328 | * @slc: Solicited Packet Output Store Information (16 bytes) | ||
329 | * @fdata: Front data | ||
330 | * | ||
331 | * 64-Byte Instruction Format | ||
332 | */ | ||
333 | struct nps_pkt_instr { | ||
334 | __be64 dptr0; | ||
335 | union pkt_instr_hdr ih; | ||
336 | union pkt_hdr irh; | ||
337 | union slc_store_info slc; | ||
338 | u64 fdata[2]; | ||
339 | }; | ||
340 | |||
341 | /** | ||
342 | * struct ctx_hdr - Book keeping data about the crypto context | ||
343 | * @pool: Pool used to allocate crypto context | ||
344 | * @dma: Base DMA address of the cypto context | ||
345 | * @ctx_dma: Actual usable crypto context for NITROX | ||
346 | */ | ||
347 | struct ctx_hdr { | ||
348 | struct dma_pool *pool; | ||
349 | dma_addr_t dma; | ||
350 | dma_addr_t ctx_dma; | ||
351 | }; | ||
352 | |||
353 | /* | ||
354 | * struct sglist_component - SG list component format | ||
355 | * @len0: The number of bytes at [PTR0] on the remote host. | ||
356 | * @len1: The number of bytes at [PTR1] on the remote host. | ||
357 | * @len2: The number of bytes at [PTR2] on the remote host. | ||
358 | * @len3: The number of bytes at [PTR3] on the remote host. | ||
359 | * @dma0: First pointer point to buffer in remote host. | ||
360 | * @dma1: Second pointer point to buffer in remote host. | ||
361 | * @dma2: Third pointer point to buffer in remote host. | ||
362 | * @dma3: Fourth pointer point to buffer in remote host. | ||
363 | */ | ||
364 | struct nitrox_sgcomp { | ||
365 | __be16 len[4]; | ||
366 | __be64 dma[4]; | ||
367 | }; | ||
368 | |||
369 | /* | ||
370 | * strutct nitrox_sgtable - SG list information | ||
371 | * @map_cnt: Number of buffers mapped | ||
372 | * @nr_comp: Number of sglist components | ||
373 | * @total_bytes: Total bytes in sglist. | ||
374 | * @len: Total sglist components length. | ||
375 | * @dma: DMA address of sglist component. | ||
376 | * @dir: DMA direction. | ||
377 | * @buf: crypto request buffer. | ||
378 | * @sglist: SG list of input/output buffers. | ||
379 | * @sgcomp: sglist component for NITROX. | ||
380 | */ | ||
381 | struct nitrox_sgtable { | ||
382 | u8 map_bufs_cnt; | ||
383 | u8 nr_sgcomp; | ||
384 | u16 total_bytes; | ||
385 | u32 len; | ||
386 | dma_addr_t dma; | ||
387 | enum dma_data_direction dir; | ||
388 | |||
389 | struct scatterlist *buf; | ||
390 | struct nitrox_sglist *sglist; | ||
391 | struct nitrox_sgcomp *sgcomp; | ||
392 | }; | ||
393 | |||
394 | /* Response Header Length */ | ||
395 | #define ORH_HLEN 8 | ||
396 | /* Completion bytes Length */ | ||
397 | #define COMP_HLEN 8 | ||
398 | |||
399 | struct resp_hdr { | ||
400 | u64 orh; | ||
401 | dma_addr_t orh_dma; | ||
402 | u64 completion; | ||
403 | dma_addr_t completion_dma; | ||
404 | }; | ||
405 | |||
406 | typedef void (*completion_t)(struct skcipher_request *skreq, int err); | ||
407 | |||
408 | /** | ||
409 | * struct nitrox_softreq - Represents the NIROX Request. | ||
410 | * @response: response list entry | ||
411 | * @backlog: Backlog list entry | ||
412 | * @ndev: Device used to submit the request | ||
413 | * @cmdq: Command queue for submission | ||
414 | * @resp: Response headers | ||
415 | * @instr: 64B instruction | ||
416 | * @in: SG table for input | ||
417 | * @out SG table for output | ||
418 | * @tstamp: Request submitted time in jiffies | ||
419 | * @callback: callback after request completion/timeout | ||
420 | * @cb_arg: callback argument | ||
421 | */ | ||
422 | struct nitrox_softreq { | ||
423 | struct list_head response; | ||
424 | struct list_head backlog; | ||
425 | |||
426 | u32 flags; | ||
427 | gfp_t gfp; | ||
428 | atomic_t status; | ||
429 | bool inplace; | ||
430 | |||
431 | struct nitrox_device *ndev; | ||
432 | struct nitrox_cmdq *cmdq; | ||
433 | |||
434 | struct nps_pkt_instr instr; | ||
435 | struct resp_hdr resp; | ||
436 | struct nitrox_sgtable in; | ||
437 | struct nitrox_sgtable out; | ||
438 | |||
439 | unsigned long tstamp; | ||
440 | |||
441 | completion_t callback; | ||
442 | struct skcipher_request *skreq; | ||
443 | }; | ||
444 | |||
445 | #endif /* __NITROX_REQ_H */ | ||
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c new file mode 100644 index 000000000000..4bb4377c5ac0 --- /dev/null +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
@@ -0,0 +1,735 @@ | |||
1 | #include <linux/gfp.h> | ||
2 | #include <linux/workqueue.h> | ||
3 | #include <crypto/internal/skcipher.h> | ||
4 | |||
5 | #include "nitrox_dev.h" | ||
6 | #include "nitrox_req.h" | ||
7 | #include "nitrox_csr.h" | ||
8 | #include "nitrox_req.h" | ||
9 | |||
10 | /* SLC_STORE_INFO */ | ||
11 | #define MIN_UDD_LEN 16 | ||
12 | /* PKT_IN_HDR + SLC_STORE_INFO */ | ||
13 | #define FDATA_SIZE 32 | ||
14 | /* Base destination port for the solicited requests */ | ||
15 | #define SOLICIT_BASE_DPORT 256 | ||
16 | #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL | ||
17 | |||
18 | #define REQ_NOT_POSTED 1 | ||
19 | #define REQ_BACKLOG 2 | ||
20 | #define REQ_POSTED 3 | ||
21 | |||
22 | /** | ||
23 | * Response codes from SE microcode | ||
24 | * 0x00 - Success | ||
25 | * Completion with no error | ||
26 | * 0x43 - ERR_GC_DATA_LEN_INVALID | ||
27 | * Invalid Data length if Encryption Data length is | ||
28 | * less than 16 bytes for AES-XTS and AES-CTS. | ||
29 | * 0x45 - ERR_GC_CTX_LEN_INVALID | ||
30 | * Invalid context length: CTXL != 23 words. | ||
31 | * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID | ||
32 | * DOCSIS support is enabled with other than | ||
33 | * AES/DES-CBC mode encryption. | ||
34 | * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID | ||
35 | * Authentication offset is other than 0 with | ||
36 | * Encryption IV source = 0. | ||
37 | * Authentication offset is other than 8 (DES)/16 (AES) | ||
38 | * with Encryption IV source = 1 | ||
39 | * 0x51 - ERR_GC_CRC32_INVALID_SELECTION | ||
40 | * CRC32 is enabled for other than DOCSIS encryption. | ||
41 | * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID | ||
42 | * Invalid flag options in AES-CCM IV. | ||
43 | */ | ||
44 | |||
45 | /** | ||
46 | * dma_free_sglist - unmap and free the sg lists. | ||
47 | * @ndev: N5 device | ||
48 | * @sgtbl: SG table | ||
49 | */ | ||
50 | static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) | ||
51 | { | ||
52 | struct nitrox_device *ndev = sr->ndev; | ||
53 | struct device *dev = DEV(ndev); | ||
54 | struct nitrox_sglist *sglist; | ||
55 | |||
56 | /* unmap in sgbuf */ | ||
57 | sglist = sr->in.sglist; | ||
58 | if (!sglist) | ||
59 | goto out_unmap; | ||
60 | |||
61 | /* unmap iv */ | ||
62 | dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL); | ||
63 | /* unmpa src sglist */ | ||
64 | dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir); | ||
65 | /* unamp gather component */ | ||
66 | dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE); | ||
67 | kfree(sr->in.sglist); | ||
68 | kfree(sr->in.sgcomp); | ||
69 | sr->in.sglist = NULL; | ||
70 | sr->in.buf = NULL; | ||
71 | sr->in.map_bufs_cnt = 0; | ||
72 | |||
73 | out_unmap: | ||
74 | /* unmap out sgbuf */ | ||
75 | sglist = sr->out.sglist; | ||
76 | if (!sglist) | ||
77 | return; | ||
78 | |||
79 | /* unmap orh */ | ||
80 | dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); | ||
81 | |||
82 | /* unmap dst sglist */ | ||
83 | if (!sr->inplace) { | ||
84 | dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3), | ||
85 | sr->out.dir); | ||
86 | } | ||
87 | /* unmap completion */ | ||
88 | dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); | ||
89 | |||
90 | /* unmap scatter component */ | ||
91 | dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE); | ||
92 | kfree(sr->out.sglist); | ||
93 | kfree(sr->out.sgcomp); | ||
94 | sr->out.sglist = NULL; | ||
95 | sr->out.buf = NULL; | ||
96 | sr->out.map_bufs_cnt = 0; | ||
97 | } | ||
98 | |||
99 | static void softreq_destroy(struct nitrox_softreq *sr) | ||
100 | { | ||
101 | softreq_unmap_sgbufs(sr); | ||
102 | kfree(sr); | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * create_sg_component - create SG componets for N5 device. | ||
107 | * @sr: Request structure | ||
108 | * @sgtbl: SG table | ||
109 | * @nr_comp: total number of components required | ||
110 | * | ||
111 | * Component structure | ||
112 | * | ||
113 | * 63 48 47 32 31 16 15 0 | ||
114 | * -------------------------------------- | ||
115 | * | LEN0 | LEN1 | LEN2 | LEN3 | | ||
116 | * |------------------------------------- | ||
117 | * | PTR0 | | ||
118 | * -------------------------------------- | ||
119 | * | PTR1 | | ||
120 | * -------------------------------------- | ||
121 | * | PTR2 | | ||
122 | * -------------------------------------- | ||
123 | * | PTR3 | | ||
124 | * -------------------------------------- | ||
125 | * | ||
126 | * Returns 0 if success or a negative errno code on error. | ||
127 | */ | ||
128 | static int create_sg_component(struct nitrox_softreq *sr, | ||
129 | struct nitrox_sgtable *sgtbl, int map_nents) | ||
130 | { | ||
131 | struct nitrox_device *ndev = sr->ndev; | ||
132 | struct nitrox_sgcomp *sgcomp; | ||
133 | struct nitrox_sglist *sglist; | ||
134 | dma_addr_t dma; | ||
135 | size_t sz_comp; | ||
136 | int i, j, nr_sgcomp; | ||
137 | |||
138 | nr_sgcomp = roundup(map_nents, 4) / 4; | ||
139 | |||
140 | /* each component holds 4 dma pointers */ | ||
141 | sz_comp = nr_sgcomp * sizeof(*sgcomp); | ||
142 | sgcomp = kzalloc(sz_comp, sr->gfp); | ||
143 | if (!sgcomp) | ||
144 | return -ENOMEM; | ||
145 | |||
146 | sgtbl->sgcomp = sgcomp; | ||
147 | sgtbl->nr_sgcomp = nr_sgcomp; | ||
148 | |||
149 | sglist = sgtbl->sglist; | ||
150 | /* populate device sg component */ | ||
151 | for (i = 0; i < nr_sgcomp; i++) { | ||
152 | for (j = 0; j < 4; j++) { | ||
153 | sgcomp->len[j] = cpu_to_be16(sglist->len); | ||
154 | sgcomp->dma[j] = cpu_to_be64(sglist->dma); | ||
155 | sglist++; | ||
156 | } | ||
157 | sgcomp++; | ||
158 | } | ||
159 | /* map the device sg component */ | ||
160 | dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE); | ||
161 | if (dma_mapping_error(DEV(ndev), dma)) { | ||
162 | kfree(sgtbl->sgcomp); | ||
163 | sgtbl->sgcomp = NULL; | ||
164 | return -ENOMEM; | ||
165 | } | ||
166 | |||
167 | sgtbl->dma = dma; | ||
168 | sgtbl->len = sz_comp; | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * dma_map_inbufs - DMA map input sglist and creates sglist component | ||
175 | * for N5 device. | ||
176 | * @sr: Request structure | ||
177 | * @req: Crypto request structre | ||
178 | * | ||
179 | * Returns 0 if successful or a negative errno code on error. | ||
180 | */ | ||
181 | static int dma_map_inbufs(struct nitrox_softreq *sr, | ||
182 | struct se_crypto_request *req) | ||
183 | { | ||
184 | struct device *dev = DEV(sr->ndev); | ||
185 | struct scatterlist *sg = req->src; | ||
186 | struct nitrox_sglist *glist; | ||
187 | int i, nents, ret = 0; | ||
188 | dma_addr_t dma; | ||
189 | size_t sz; | ||
190 | |||
191 | nents = sg_nents(req->src); | ||
192 | |||
193 | /* creater gather list IV and src entries */ | ||
194 | sz = roundup((1 + nents), 4) * sizeof(*glist); | ||
195 | glist = kzalloc(sz, sr->gfp); | ||
196 | if (!glist) | ||
197 | return -ENOMEM; | ||
198 | |||
199 | sr->in.sglist = glist; | ||
200 | /* map IV */ | ||
201 | dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL); | ||
202 | if (dma_mapping_error(dev, dma)) { | ||
203 | ret = -EINVAL; | ||
204 | goto iv_map_err; | ||
205 | } | ||
206 | |||
207 | sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | ||
208 | /* map src entries */ | ||
209 | nents = dma_map_sg(dev, req->src, nents, sr->in.dir); | ||
210 | if (!nents) { | ||
211 | ret = -EINVAL; | ||
212 | goto src_map_err; | ||
213 | } | ||
214 | sr->in.buf = req->src; | ||
215 | |||
216 | /* store the mappings */ | ||
217 | glist->len = req->ivsize; | ||
218 | glist->dma = dma; | ||
219 | glist++; | ||
220 | sr->in.total_bytes += req->ivsize; | ||
221 | |||
222 | for_each_sg(req->src, sg, nents, i) { | ||
223 | glist->len = sg_dma_len(sg); | ||
224 | glist->dma = sg_dma_address(sg); | ||
225 | sr->in.total_bytes += glist->len; | ||
226 | glist++; | ||
227 | } | ||
228 | /* roundup map count to align with entires in sg component */ | ||
229 | sr->in.map_bufs_cnt = (1 + nents); | ||
230 | |||
231 | /* create NITROX gather component */ | ||
232 | ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt); | ||
233 | if (ret) | ||
234 | goto incomp_err; | ||
235 | |||
236 | return 0; | ||
237 | |||
238 | incomp_err: | ||
239 | dma_unmap_sg(dev, req->src, nents, sr->in.dir); | ||
240 | sr->in.map_bufs_cnt = 0; | ||
241 | src_map_err: | ||
242 | dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL); | ||
243 | iv_map_err: | ||
244 | kfree(sr->in.sglist); | ||
245 | sr->in.sglist = NULL; | ||
246 | return ret; | ||
247 | } | ||
248 | |||
249 | static int dma_map_outbufs(struct nitrox_softreq *sr, | ||
250 | struct se_crypto_request *req) | ||
251 | { | ||
252 | struct device *dev = DEV(sr->ndev); | ||
253 | struct nitrox_sglist *glist = sr->in.sglist; | ||
254 | struct nitrox_sglist *slist; | ||
255 | struct scatterlist *sg; | ||
256 | int i, nents, map_bufs_cnt, ret = 0; | ||
257 | size_t sz; | ||
258 | |||
259 | nents = sg_nents(req->dst); | ||
260 | |||
261 | /* create scatter list ORH, IV, dst entries and Completion header */ | ||
262 | sz = roundup((3 + nents), 4) * sizeof(*slist); | ||
263 | slist = kzalloc(sz, sr->gfp); | ||
264 | if (!slist) | ||
265 | return -ENOMEM; | ||
266 | |||
267 | sr->out.sglist = slist; | ||
268 | sr->out.dir = DMA_BIDIRECTIONAL; | ||
269 | /* map ORH */ | ||
270 | sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN, | ||
271 | sr->out.dir); | ||
272 | if (dma_mapping_error(dev, sr->resp.orh_dma)) { | ||
273 | ret = -EINVAL; | ||
274 | goto orh_map_err; | ||
275 | } | ||
276 | |||
277 | /* map completion */ | ||
278 | sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion, | ||
279 | COMP_HLEN, sr->out.dir); | ||
280 | if (dma_mapping_error(dev, sr->resp.completion_dma)) { | ||
281 | ret = -EINVAL; | ||
282 | goto compl_map_err; | ||
283 | } | ||
284 | |||
285 | sr->inplace = (req->src == req->dst) ? true : false; | ||
286 | /* out place */ | ||
287 | if (!sr->inplace) { | ||
288 | nents = dma_map_sg(dev, req->dst, nents, sr->out.dir); | ||
289 | if (!nents) { | ||
290 | ret = -EINVAL; | ||
291 | goto dst_map_err; | ||
292 | } | ||
293 | } | ||
294 | sr->out.buf = req->dst; | ||
295 | |||
296 | /* store the mappings */ | ||
297 | /* orh */ | ||
298 | slist->len = ORH_HLEN; | ||
299 | slist->dma = sr->resp.orh_dma; | ||
300 | slist++; | ||
301 | |||
302 | /* copy the glist mappings */ | ||
303 | if (sr->inplace) { | ||
304 | nents = sr->in.map_bufs_cnt - 1; | ||
305 | map_bufs_cnt = sr->in.map_bufs_cnt; | ||
306 | while (map_bufs_cnt--) { | ||
307 | slist->len = glist->len; | ||
308 | slist->dma = glist->dma; | ||
309 | slist++; | ||
310 | glist++; | ||
311 | } | ||
312 | } else { | ||
313 | /* copy iv mapping */ | ||
314 | slist->len = glist->len; | ||
315 | slist->dma = glist->dma; | ||
316 | slist++; | ||
317 | /* copy remaining maps */ | ||
318 | for_each_sg(req->dst, sg, nents, i) { | ||
319 | slist->len = sg_dma_len(sg); | ||
320 | slist->dma = sg_dma_address(sg); | ||
321 | slist++; | ||
322 | } | ||
323 | } | ||
324 | |||
325 | /* completion */ | ||
326 | slist->len = COMP_HLEN; | ||
327 | slist->dma = sr->resp.completion_dma; | ||
328 | |||
329 | sr->out.map_bufs_cnt = (3 + nents); | ||
330 | |||
331 | ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt); | ||
332 | if (ret) | ||
333 | goto outcomp_map_err; | ||
334 | |||
335 | return 0; | ||
336 | |||
337 | outcomp_map_err: | ||
338 | if (!sr->inplace) | ||
339 | dma_unmap_sg(dev, req->dst, nents, sr->out.dir); | ||
340 | sr->out.map_bufs_cnt = 0; | ||
341 | sr->out.buf = NULL; | ||
342 | dst_map_err: | ||
343 | dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir); | ||
344 | sr->resp.completion_dma = 0; | ||
345 | compl_map_err: | ||
346 | dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir); | ||
347 | sr->resp.orh_dma = 0; | ||
348 | orh_map_err: | ||
349 | kfree(sr->out.sglist); | ||
350 | sr->out.sglist = NULL; | ||
351 | return ret; | ||
352 | } | ||
353 | |||
354 | static inline int softreq_map_iobuf(struct nitrox_softreq *sr, | ||
355 | struct se_crypto_request *creq) | ||
356 | { | ||
357 | int ret; | ||
358 | |||
359 | ret = dma_map_inbufs(sr, creq); | ||
360 | if (ret) | ||
361 | return ret; | ||
362 | |||
363 | ret = dma_map_outbufs(sr, creq); | ||
364 | if (ret) | ||
365 | softreq_unmap_sgbufs(sr); | ||
366 | |||
367 | return ret; | ||
368 | } | ||
369 | |||
370 | static inline void backlog_list_add(struct nitrox_softreq *sr, | ||
371 | struct nitrox_cmdq *cmdq) | ||
372 | { | ||
373 | INIT_LIST_HEAD(&sr->backlog); | ||
374 | |||
375 | spin_lock_bh(&cmdq->backlog_lock); | ||
376 | list_add_tail(&sr->backlog, &cmdq->backlog_head); | ||
377 | atomic_inc(&cmdq->backlog_count); | ||
378 | atomic_set(&sr->status, REQ_BACKLOG); | ||
379 | spin_unlock_bh(&cmdq->backlog_lock); | ||
380 | } | ||
381 | |||
382 | static inline void response_list_add(struct nitrox_softreq *sr, | ||
383 | struct nitrox_cmdq *cmdq) | ||
384 | { | ||
385 | INIT_LIST_HEAD(&sr->response); | ||
386 | |||
387 | spin_lock_bh(&cmdq->response_lock); | ||
388 | list_add_tail(&sr->response, &cmdq->response_head); | ||
389 | spin_unlock_bh(&cmdq->response_lock); | ||
390 | } | ||
391 | |||
392 | static inline void response_list_del(struct nitrox_softreq *sr, | ||
393 | struct nitrox_cmdq *cmdq) | ||
394 | { | ||
395 | spin_lock_bh(&cmdq->response_lock); | ||
396 | list_del(&sr->response); | ||
397 | spin_unlock_bh(&cmdq->response_lock); | ||
398 | } | ||
399 | |||
400 | static struct nitrox_softreq * | ||
401 | get_first_response_entry(struct nitrox_cmdq *cmdq) | ||
402 | { | ||
403 | return list_first_entry_or_null(&cmdq->response_head, | ||
404 | struct nitrox_softreq, response); | ||
405 | } | ||
406 | |||
407 | static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen) | ||
408 | { | ||
409 | if (atomic_inc_return(&cmdq->pending_count) > qlen) { | ||
410 | atomic_dec(&cmdq->pending_count); | ||
411 | /* sync with other cpus */ | ||
412 | smp_mb__after_atomic(); | ||
413 | return true; | ||
414 | } | ||
415 | return false; | ||
416 | } | ||
417 | |||
418 | /** | ||
419 | * post_se_instr - Post SE instruction to Packet Input ring | ||
420 | * @sr: Request structure | ||
421 | * | ||
422 | * Returns 0 if successful or a negative error code, | ||
423 | * if no space in ring. | ||
424 | */ | ||
425 | static void post_se_instr(struct nitrox_softreq *sr, | ||
426 | struct nitrox_cmdq *cmdq) | ||
427 | { | ||
428 | struct nitrox_device *ndev = sr->ndev; | ||
429 | union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; | ||
430 | u64 offset; | ||
431 | u8 *ent; | ||
432 | |||
433 | spin_lock_bh(&cmdq->cmdq_lock); | ||
434 | |||
435 | /* get the next write offset */ | ||
436 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno); | ||
437 | pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset); | ||
438 | /* copy the instruction */ | ||
439 | ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; | ||
440 | memcpy(ent, &sr->instr, cmdq->instr_size); | ||
441 | /* flush the command queue updates */ | ||
442 | dma_wmb(); | ||
443 | |||
444 | sr->tstamp = jiffies; | ||
445 | atomic_set(&sr->status, REQ_POSTED); | ||
446 | response_list_add(sr, cmdq); | ||
447 | |||
448 | /* Ring doorbell with count 1 */ | ||
449 | writeq(1, cmdq->dbell_csr_addr); | ||
450 | /* orders the doorbell rings */ | ||
451 | mmiowb(); | ||
452 | |||
453 | spin_unlock_bh(&cmdq->cmdq_lock); | ||
454 | } | ||
455 | |||
456 | static int post_backlog_cmds(struct nitrox_cmdq *cmdq) | ||
457 | { | ||
458 | struct nitrox_device *ndev = cmdq->ndev; | ||
459 | struct nitrox_softreq *sr, *tmp; | ||
460 | int ret = 0; | ||
461 | |||
462 | spin_lock_bh(&cmdq->backlog_lock); | ||
463 | |||
464 | list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { | ||
465 | struct skcipher_request *skreq; | ||
466 | |||
467 | /* submit until space available */ | ||
468 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | ||
469 | ret = -EBUSY; | ||
470 | break; | ||
471 | } | ||
472 | /* delete from backlog list */ | ||
473 | list_del(&sr->backlog); | ||
474 | atomic_dec(&cmdq->backlog_count); | ||
475 | /* sync with other cpus */ | ||
476 | smp_mb__after_atomic(); | ||
477 | |||
478 | skreq = sr->skreq; | ||
479 | /* post the command */ | ||
480 | post_se_instr(sr, cmdq); | ||
481 | |||
482 | /* backlog requests are posted, wakeup with -EINPROGRESS */ | ||
483 | skcipher_request_complete(skreq, -EINPROGRESS); | ||
484 | } | ||
485 | spin_unlock_bh(&cmdq->backlog_lock); | ||
486 | |||
487 | return ret; | ||
488 | } | ||
489 | |||
490 | static int nitrox_enqueue_request(struct nitrox_softreq *sr) | ||
491 | { | ||
492 | struct nitrox_cmdq *cmdq = sr->cmdq; | ||
493 | struct nitrox_device *ndev = sr->ndev; | ||
494 | int ret = -EBUSY; | ||
495 | |||
496 | if (unlikely(cmdq_full(cmdq, ndev->qlen))) { | ||
497 | if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) | ||
498 | return -EAGAIN; | ||
499 | |||
500 | backlog_list_add(sr, cmdq); | ||
501 | } else { | ||
502 | ret = post_backlog_cmds(cmdq); | ||
503 | if (ret) { | ||
504 | backlog_list_add(sr, cmdq); | ||
505 | return ret; | ||
506 | } | ||
507 | post_se_instr(sr, cmdq); | ||
508 | ret = -EINPROGRESS; | ||
509 | } | ||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * nitrox_se_request - Send request to SE core | ||
515 | * @ndev: NITROX device | ||
516 | * @req: Crypto request | ||
517 | * | ||
518 | * Returns 0 on success, or a negative error code. | ||
519 | */ | ||
520 | int nitrox_process_se_request(struct nitrox_device *ndev, | ||
521 | struct se_crypto_request *req, | ||
522 | completion_t callback, | ||
523 | struct skcipher_request *skreq) | ||
524 | { | ||
525 | struct nitrox_softreq *sr; | ||
526 | dma_addr_t ctx_handle = 0; | ||
527 | int qno, ret = 0; | ||
528 | |||
529 | if (!nitrox_ready(ndev)) | ||
530 | return -ENODEV; | ||
531 | |||
532 | sr = kzalloc(sizeof(*sr), req->gfp); | ||
533 | if (!sr) | ||
534 | return -ENOMEM; | ||
535 | |||
536 | sr->ndev = ndev; | ||
537 | sr->flags = req->flags; | ||
538 | sr->gfp = req->gfp; | ||
539 | sr->callback = callback; | ||
540 | sr->skreq = skreq; | ||
541 | |||
542 | atomic_set(&sr->status, REQ_NOT_POSTED); | ||
543 | |||
544 | WRITE_ONCE(sr->resp.orh, PENDING_SIG); | ||
545 | WRITE_ONCE(sr->resp.completion, PENDING_SIG); | ||
546 | |||
547 | ret = softreq_map_iobuf(sr, req); | ||
548 | if (ret) { | ||
549 | kfree(sr); | ||
550 | return ret; | ||
551 | } | ||
552 | |||
553 | /* get the context handle */ | ||
554 | if (req->ctx_handle) { | ||
555 | struct ctx_hdr *hdr; | ||
556 | u8 *ctx_ptr; | ||
557 | |||
558 | ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle; | ||
559 | hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr)); | ||
560 | ctx_handle = hdr->ctx_dma; | ||
561 | } | ||
562 | |||
563 | /* select the queue */ | ||
564 | qno = smp_processor_id() % ndev->nr_queues; | ||
565 | |||
566 | sr->cmdq = &ndev->pkt_cmdqs[qno]; | ||
567 | |||
568 | /* | ||
569 | * 64-Byte Instruction Format | ||
570 | * | ||
571 | * ---------------------- | ||
572 | * | DPTR0 | 8 bytes | ||
573 | * ---------------------- | ||
574 | * | PKT_IN_INSTR_HDR | 8 bytes | ||
575 | * ---------------------- | ||
576 | * | PKT_IN_HDR | 16 bytes | ||
577 | * ---------------------- | ||
578 | * | SLC_INFO | 16 bytes | ||
579 | * ---------------------- | ||
580 | * | Front data | 16 bytes | ||
581 | * ---------------------- | ||
582 | */ | ||
583 | |||
584 | /* fill the packet instruction */ | ||
585 | /* word 0 */ | ||
586 | sr->instr.dptr0 = cpu_to_be64(sr->in.dma); | ||
587 | |||
588 | /* word 1 */ | ||
589 | sr->instr.ih.value = 0; | ||
590 | sr->instr.ih.s.g = 1; | ||
591 | sr->instr.ih.s.gsz = sr->in.map_bufs_cnt; | ||
592 | sr->instr.ih.s.ssz = sr->out.map_bufs_cnt; | ||
593 | sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr); | ||
594 | sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes; | ||
595 | sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value); | ||
596 | |||
597 | /* word 2 */ | ||
598 | sr->instr.irh.value[0] = 0; | ||
599 | sr->instr.irh.s.uddl = MIN_UDD_LEN; | ||
600 | /* context length in 64-bit words */ | ||
601 | sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8); | ||
602 | /* offset from solicit base port 256 */ | ||
603 | sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno; | ||
604 | sr->instr.irh.s.ctxc = req->ctrl.s.ctxc; | ||
605 | sr->instr.irh.s.arg = req->ctrl.s.arg; | ||
606 | sr->instr.irh.s.opcode = req->opcode; | ||
607 | sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]); | ||
608 | |||
609 | /* word 3 */ | ||
610 | sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle); | ||
611 | |||
612 | /* word 4 */ | ||
613 | sr->instr.slc.value[0] = 0; | ||
614 | sr->instr.slc.s.ssz = sr->out.map_bufs_cnt; | ||
615 | sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]); | ||
616 | |||
617 | /* word 5 */ | ||
618 | sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma); | ||
619 | |||
620 | /* | ||
621 | * No conversion for front data, | ||
622 | * It goes into payload | ||
623 | * put GP Header in front data | ||
624 | */ | ||
625 | sr->instr.fdata[0] = *((u64 *)&req->gph); | ||
626 | sr->instr.fdata[1] = 0; | ||
627 | /* flush the soft_req changes before posting the cmd */ | ||
628 | wmb(); | ||
629 | |||
630 | ret = nitrox_enqueue_request(sr); | ||
631 | if (ret == -EAGAIN) | ||
632 | goto send_fail; | ||
633 | |||
634 | return ret; | ||
635 | |||
636 | send_fail: | ||
637 | softreq_destroy(sr); | ||
638 | return ret; | ||
639 | } | ||
640 | |||
641 | static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout) | ||
642 | { | ||
643 | return time_after_eq(jiffies, (tstamp + timeout)); | ||
644 | } | ||
645 | |||
646 | void backlog_qflush_work(struct work_struct *work) | ||
647 | { | ||
648 | struct nitrox_cmdq *cmdq; | ||
649 | |||
650 | cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush); | ||
651 | post_backlog_cmds(cmdq); | ||
652 | } | ||
653 | |||
654 | /** | ||
655 | * process_request_list - process completed requests | ||
656 | * @ndev: N5 device | ||
657 | * @qno: queue to operate | ||
658 | * | ||
659 | * Returns the number of responses processed. | ||
660 | */ | ||
661 | static void process_response_list(struct nitrox_cmdq *cmdq) | ||
662 | { | ||
663 | struct nitrox_device *ndev = cmdq->ndev; | ||
664 | struct nitrox_softreq *sr; | ||
665 | struct skcipher_request *skreq; | ||
666 | completion_t callback; | ||
667 | int req_completed = 0, err = 0, budget; | ||
668 | |||
669 | /* check all pending requests */ | ||
670 | budget = atomic_read(&cmdq->pending_count); | ||
671 | |||
672 | while (req_completed < budget) { | ||
673 | sr = get_first_response_entry(cmdq); | ||
674 | if (!sr) | ||
675 | break; | ||
676 | |||
677 | if (atomic_read(&sr->status) != REQ_POSTED) | ||
678 | break; | ||
679 | |||
680 | /* check orh and completion bytes updates */ | ||
681 | if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) { | ||
682 | /* request not completed, check for timeout */ | ||
683 | if (!cmd_timeout(sr->tstamp, ndev->timeout)) | ||
684 | break; | ||
685 | dev_err_ratelimited(DEV(ndev), | ||
686 | "Request timeout, orh 0x%016llx\n", | ||
687 | READ_ONCE(sr->resp.orh)); | ||
688 | } | ||
689 | atomic_dec(&cmdq->pending_count); | ||
690 | /* sync with other cpus */ | ||
691 | smp_mb__after_atomic(); | ||
692 | /* remove from response list */ | ||
693 | response_list_del(sr, cmdq); | ||
694 | |||
695 | callback = sr->callback; | ||
696 | skreq = sr->skreq; | ||
697 | |||
698 | /* ORH error code */ | ||
699 | err = READ_ONCE(sr->resp.orh) & 0xff; | ||
700 | softreq_destroy(sr); | ||
701 | |||
702 | if (callback) | ||
703 | callback(skreq, err); | ||
704 | |||
705 | req_completed++; | ||
706 | } | ||
707 | } | ||
708 | |||
709 | /** | ||
710 | * pkt_slc_resp_handler - post processing of SE responses | ||
711 | */ | ||
712 | void pkt_slc_resp_handler(unsigned long data) | ||
713 | { | ||
714 | struct bh_data *bh = (void *)(uintptr_t)(data); | ||
715 | struct nitrox_cmdq *cmdq = bh->cmdq; | ||
716 | union nps_pkt_slc_cnts pkt_slc_cnts; | ||
717 | |||
718 | /* read completion count */ | ||
719 | pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr); | ||
720 | /* resend the interrupt if more work to do */ | ||
721 | pkt_slc_cnts.s.resend = 1; | ||
722 | |||
723 | process_response_list(cmdq); | ||
724 | |||
725 | /* | ||
726 | * clear the interrupt with resend bit enabled, | ||
727 | * MSI-X interrupt generates if Completion count > Threshold | ||
728 | */ | ||
729 | writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr); | ||
730 | /* order the writes */ | ||
731 | mmiowb(); | ||
732 | |||
733 | if (atomic_read(&cmdq->backlog_count)) | ||
734 | schedule_work(&cmdq->backlog_qflush); | ||
735 | } | ||
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 60919a3ec53b..59493fd3a751 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile | |||
@@ -4,7 +4,8 @@ ccp-objs := ccp-dev.o \ | |||
4 | ccp-dev-v3.o \ | 4 | ccp-dev-v3.o \ |
5 | ccp-dev-v5.o \ | 5 | ccp-dev-v5.o \ |
6 | ccp-platform.o \ | 6 | ccp-platform.o \ |
7 | ccp-dmaengine.o | 7 | ccp-dmaengine.o \ |
8 | ccp-debugfs.o | ||
8 | ccp-$(CONFIG_PCI) += ccp-pci.o | 9 | ccp-$(CONFIG_PCI) += ccp-pci.o |
9 | 10 | ||
10 | obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o | 11 | obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o |
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 6b46eea94932..ce97b3868f4a 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/crypto.h> | 18 | #include <linux/crypto.h> |
19 | #include <crypto/algapi.h> | 19 | #include <crypto/algapi.h> |
20 | #include <crypto/hash.h> | 20 | #include <crypto/hash.h> |
21 | #include <crypto/hmac.h> | ||
21 | #include <crypto/internal/hash.h> | 22 | #include <crypto/internal/hash.h> |
22 | #include <crypto/sha.h> | 23 | #include <crypto/sha.h> |
23 | #include <crypto/scatterwalk.h> | 24 | #include <crypto/scatterwalk.h> |
@@ -308,8 +309,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
308 | } | 309 | } |
309 | 310 | ||
310 | for (i = 0; i < block_size; i++) { | 311 | for (i = 0; i < block_size; i++) { |
311 | ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36; | 312 | ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ HMAC_IPAD_VALUE; |
312 | ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; | 313 | ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ HMAC_OPAD_VALUE; |
313 | } | 314 | } |
314 | 315 | ||
315 | sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); | 316 | sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); |
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c new file mode 100644 index 000000000000..3cd6c83754e0 --- /dev/null +++ b/drivers/crypto/ccp/ccp-debugfs.c | |||
@@ -0,0 +1,344 @@ | |||
1 | /* | ||
2 | * AMD Cryptographic Coprocessor (CCP) driver | ||
3 | * | ||
4 | * Copyright (C) 2017 Advanced Micro Devices, Inc. | ||
5 | * | ||
6 | * Author: Gary R Hook <gary.hook@amd.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/debugfs.h> | ||
14 | #include <linux/ccp.h> | ||
15 | |||
16 | #include "ccp-dev.h" | ||
17 | |||
18 | /* DebugFS helpers */ | ||
19 | #define OBUFP (obuf + oboff) | ||
20 | #define OBUFLEN 512 | ||
21 | #define OBUFSPC (OBUFLEN - oboff) | ||
22 | #define OSCNPRINTF(fmt, ...) \ | ||
23 | scnprintf(OBUFP, OBUFSPC, fmt, ## __VA_ARGS__) | ||
24 | |||
25 | #define BUFLEN 63 | ||
26 | |||
27 | #define RI_VERSION_NUM 0x0000003F | ||
28 | #define RI_AES_PRESENT 0x00000040 | ||
29 | #define RI_3DES_PRESENT 0x00000080 | ||
30 | #define RI_SHA_PRESENT 0x00000100 | ||
31 | #define RI_RSA_PRESENT 0x00000200 | ||
32 | #define RI_ECC_PRESENT 0x00000400 | ||
33 | #define RI_ZDE_PRESENT 0x00000800 | ||
34 | #define RI_ZCE_PRESENT 0x00001000 | ||
35 | #define RI_TRNG_PRESENT 0x00002000 | ||
36 | #define RI_ELFC_PRESENT 0x00004000 | ||
37 | #define RI_ELFC_SHIFT 14 | ||
38 | #define RI_NUM_VQM 0x00078000 | ||
39 | #define RI_NVQM_SHIFT 15 | ||
40 | #define RI_NVQM(r) (((r) * RI_NUM_VQM) >> RI_NVQM_SHIFT) | ||
41 | #define RI_LSB_ENTRIES 0x0FF80000 | ||
42 | #define RI_NLSB_SHIFT 19 | ||
43 | #define RI_NLSB(r) (((r) * RI_LSB_ENTRIES) >> RI_NLSB_SHIFT) | ||
44 | |||
45 | static ssize_t ccp5_debugfs_info_read(struct file *filp, char __user *ubuf, | ||
46 | size_t count, loff_t *offp) | ||
47 | { | ||
48 | struct ccp_device *ccp = filp->private_data; | ||
49 | unsigned int oboff = 0; | ||
50 | unsigned int regval; | ||
51 | ssize_t ret; | ||
52 | char *obuf; | ||
53 | |||
54 | if (!ccp) | ||
55 | return 0; | ||
56 | |||
57 | obuf = kmalloc(OBUFLEN, GFP_KERNEL); | ||
58 | if (!obuf) | ||
59 | return -ENOMEM; | ||
60 | |||
61 | oboff += OSCNPRINTF("Device name: %s\n", ccp->name); | ||
62 | oboff += OSCNPRINTF(" RNG name: %s\n", ccp->rngname); | ||
63 | oboff += OSCNPRINTF(" # Queues: %d\n", ccp->cmd_q_count); | ||
64 | oboff += OSCNPRINTF(" # Cmds: %d\n", ccp->cmd_count); | ||
65 | |||
66 | regval = ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION); | ||
67 | oboff += OSCNPRINTF(" Version: %d\n", regval & RI_VERSION_NUM); | ||
68 | oboff += OSCNPRINTF(" Engines:"); | ||
69 | if (regval & RI_AES_PRESENT) | ||
70 | oboff += OSCNPRINTF(" AES"); | ||
71 | if (regval & RI_3DES_PRESENT) | ||
72 | oboff += OSCNPRINTF(" 3DES"); | ||
73 | if (regval & RI_SHA_PRESENT) | ||
74 | oboff += OSCNPRINTF(" SHA"); | ||
75 | if (regval & RI_RSA_PRESENT) | ||
76 | oboff += OSCNPRINTF(" RSA"); | ||
77 | if (regval & RI_ECC_PRESENT) | ||
78 | oboff += OSCNPRINTF(" ECC"); | ||
79 | if (regval & RI_ZDE_PRESENT) | ||
80 | oboff += OSCNPRINTF(" ZDE"); | ||
81 | if (regval & RI_ZCE_PRESENT) | ||
82 | oboff += OSCNPRINTF(" ZCE"); | ||
83 | if (regval & RI_TRNG_PRESENT) | ||
84 | oboff += OSCNPRINTF(" TRNG"); | ||
85 | oboff += OSCNPRINTF("\n"); | ||
86 | oboff += OSCNPRINTF(" Queues: %d\n", | ||
87 | (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT); | ||
88 | oboff += OSCNPRINTF("LSB Entries: %d\n", | ||
89 | (regval & RI_LSB_ENTRIES) >> RI_NLSB_SHIFT); | ||
90 | |||
91 | ret = simple_read_from_buffer(ubuf, count, offp, obuf, oboff); | ||
92 | kfree(obuf); | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | /* Return a formatted buffer containing the current | ||
98 | * statistics across all queues for a CCP. | ||
99 | */ | ||
100 | static ssize_t ccp5_debugfs_stats_read(struct file *filp, char __user *ubuf, | ||
101 | size_t count, loff_t *offp) | ||
102 | { | ||
103 | struct ccp_device *ccp = filp->private_data; | ||
104 | unsigned long total_xts_aes_ops = 0; | ||
105 | unsigned long total_3des_ops = 0; | ||
106 | unsigned long total_aes_ops = 0; | ||
107 | unsigned long total_sha_ops = 0; | ||
108 | unsigned long total_rsa_ops = 0; | ||
109 | unsigned long total_ecc_ops = 0; | ||
110 | unsigned long total_pt_ops = 0; | ||
111 | unsigned long total_ops = 0; | ||
112 | unsigned int oboff = 0; | ||
113 | ssize_t ret = 0; | ||
114 | unsigned int i; | ||
115 | char *obuf; | ||
116 | |||
117 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
118 | struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; | ||
119 | |||
120 | total_ops += cmd_q->total_ops; | ||
121 | total_aes_ops += cmd_q->total_aes_ops; | ||
122 | total_xts_aes_ops += cmd_q->total_xts_aes_ops; | ||
123 | total_3des_ops += cmd_q->total_3des_ops; | ||
124 | total_sha_ops += cmd_q->total_sha_ops; | ||
125 | total_rsa_ops += cmd_q->total_rsa_ops; | ||
126 | total_pt_ops += cmd_q->total_pt_ops; | ||
127 | total_ecc_ops += cmd_q->total_ecc_ops; | ||
128 | } | ||
129 | |||
130 | obuf = kmalloc(OBUFLEN, GFP_KERNEL); | ||
131 | if (!obuf) | ||
132 | return -ENOMEM; | ||
133 | |||
134 | oboff += OSCNPRINTF("Total Interrupts Handled: %ld\n", | ||
135 | ccp->total_interrupts); | ||
136 | oboff += OSCNPRINTF(" Total Operations: %ld\n", | ||
137 | total_ops); | ||
138 | oboff += OSCNPRINTF(" AES: %ld\n", | ||
139 | total_aes_ops); | ||
140 | oboff += OSCNPRINTF(" XTS AES: %ld\n", | ||
141 | total_xts_aes_ops); | ||
142 | oboff += OSCNPRINTF(" SHA: %ld\n", | ||
143 | total_3des_ops); | ||
144 | oboff += OSCNPRINTF(" SHA: %ld\n", | ||
145 | total_sha_ops); | ||
146 | oboff += OSCNPRINTF(" RSA: %ld\n", | ||
147 | total_rsa_ops); | ||
148 | oboff += OSCNPRINTF(" Pass-Thru: %ld\n", | ||
149 | total_pt_ops); | ||
150 | oboff += OSCNPRINTF(" ECC: %ld\n", | ||
151 | total_ecc_ops); | ||
152 | |||
153 | ret = simple_read_from_buffer(ubuf, count, offp, obuf, oboff); | ||
154 | kfree(obuf); | ||
155 | |||
156 | return ret; | ||
157 | } | ||
158 | |||
159 | /* Reset the counters in a queue | ||
160 | */ | ||
161 | static void ccp5_debugfs_reset_queue_stats(struct ccp_cmd_queue *cmd_q) | ||
162 | { | ||
163 | cmd_q->total_ops = 0L; | ||
164 | cmd_q->total_aes_ops = 0L; | ||
165 | cmd_q->total_xts_aes_ops = 0L; | ||
166 | cmd_q->total_3des_ops = 0L; | ||
167 | cmd_q->total_sha_ops = 0L; | ||
168 | cmd_q->total_rsa_ops = 0L; | ||
169 | cmd_q->total_pt_ops = 0L; | ||
170 | cmd_q->total_ecc_ops = 0L; | ||
171 | } | ||
172 | |||
173 | /* A value was written to the stats variable, which | ||
174 | * should be used to reset the queue counters across | ||
175 | * that device. | ||
176 | */ | ||
177 | static ssize_t ccp5_debugfs_stats_write(struct file *filp, | ||
178 | const char __user *ubuf, | ||
179 | size_t count, loff_t *offp) | ||
180 | { | ||
181 | struct ccp_device *ccp = filp->private_data; | ||
182 | int i; | ||
183 | |||
184 | for (i = 0; i < ccp->cmd_q_count; i++) | ||
185 | ccp5_debugfs_reset_queue_stats(&ccp->cmd_q[i]); | ||
186 | ccp->total_interrupts = 0L; | ||
187 | |||
188 | return count; | ||
189 | } | ||
190 | |||
191 | /* Return a formatted buffer containing the current information | ||
192 | * for that queue | ||
193 | */ | ||
194 | static ssize_t ccp5_debugfs_queue_read(struct file *filp, char __user *ubuf, | ||
195 | size_t count, loff_t *offp) | ||
196 | { | ||
197 | struct ccp_cmd_queue *cmd_q = filp->private_data; | ||
198 | unsigned int oboff = 0; | ||
199 | unsigned int regval; | ||
200 | ssize_t ret; | ||
201 | char *obuf; | ||
202 | |||
203 | if (!cmd_q) | ||
204 | return 0; | ||
205 | |||
206 | obuf = kmalloc(OBUFLEN, GFP_KERNEL); | ||
207 | if (!obuf) | ||
208 | return -ENOMEM; | ||
209 | |||
210 | oboff += OSCNPRINTF(" Total Queue Operations: %ld\n", | ||
211 | cmd_q->total_ops); | ||
212 | oboff += OSCNPRINTF(" AES: %ld\n", | ||
213 | cmd_q->total_aes_ops); | ||
214 | oboff += OSCNPRINTF(" XTS AES: %ld\n", | ||
215 | cmd_q->total_xts_aes_ops); | ||
216 | oboff += OSCNPRINTF(" SHA: %ld\n", | ||
217 | cmd_q->total_3des_ops); | ||
218 | oboff += OSCNPRINTF(" SHA: %ld\n", | ||
219 | cmd_q->total_sha_ops); | ||
220 | oboff += OSCNPRINTF(" RSA: %ld\n", | ||
221 | cmd_q->total_rsa_ops); | ||
222 | oboff += OSCNPRINTF(" Pass-Thru: %ld\n", | ||
223 | cmd_q->total_pt_ops); | ||
224 | oboff += OSCNPRINTF(" ECC: %ld\n", | ||
225 | cmd_q->total_ecc_ops); | ||
226 | |||
227 | regval = ioread32(cmd_q->reg_int_enable); | ||
228 | oboff += OSCNPRINTF(" Enabled Interrupts:"); | ||
229 | if (regval & INT_EMPTY_QUEUE) | ||
230 | oboff += OSCNPRINTF(" EMPTY"); | ||
231 | if (regval & INT_QUEUE_STOPPED) | ||
232 | oboff += OSCNPRINTF(" STOPPED"); | ||
233 | if (regval & INT_ERROR) | ||
234 | oboff += OSCNPRINTF(" ERROR"); | ||
235 | if (regval & INT_COMPLETION) | ||
236 | oboff += OSCNPRINTF(" COMPLETION"); | ||
237 | oboff += OSCNPRINTF("\n"); | ||
238 | |||
239 | ret = simple_read_from_buffer(ubuf, count, offp, obuf, oboff); | ||
240 | kfree(obuf); | ||
241 | |||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | /* A value was written to the stats variable for a | ||
246 | * queue. Reset the queue counters to this value. | ||
247 | */ | ||
248 | static ssize_t ccp5_debugfs_queue_write(struct file *filp, | ||
249 | const char __user *ubuf, | ||
250 | size_t count, loff_t *offp) | ||
251 | { | ||
252 | struct ccp_cmd_queue *cmd_q = filp->private_data; | ||
253 | |||
254 | ccp5_debugfs_reset_queue_stats(cmd_q); | ||
255 | |||
256 | return count; | ||
257 | } | ||
258 | |||
259 | static const struct file_operations ccp_debugfs_info_ops = { | ||
260 | .owner = THIS_MODULE, | ||
261 | .open = simple_open, | ||
262 | .read = ccp5_debugfs_info_read, | ||
263 | .write = NULL, | ||
264 | }; | ||
265 | |||
266 | static const struct file_operations ccp_debugfs_queue_ops = { | ||
267 | .owner = THIS_MODULE, | ||
268 | .open = simple_open, | ||
269 | .read = ccp5_debugfs_queue_read, | ||
270 | .write = ccp5_debugfs_queue_write, | ||
271 | }; | ||
272 | |||
273 | static const struct file_operations ccp_debugfs_stats_ops = { | ||
274 | .owner = THIS_MODULE, | ||
275 | .open = simple_open, | ||
276 | .read = ccp5_debugfs_stats_read, | ||
277 | .write = ccp5_debugfs_stats_write, | ||
278 | }; | ||
279 | |||
280 | static struct dentry *ccp_debugfs_dir; | ||
281 | static DEFINE_RWLOCK(ccp_debugfs_lock); | ||
282 | |||
283 | #define MAX_NAME_LEN 20 | ||
284 | |||
285 | void ccp5_debugfs_setup(struct ccp_device *ccp) | ||
286 | { | ||
287 | struct ccp_cmd_queue *cmd_q; | ||
288 | char name[MAX_NAME_LEN + 1]; | ||
289 | struct dentry *debugfs_info; | ||
290 | struct dentry *debugfs_stats; | ||
291 | struct dentry *debugfs_q_instance; | ||
292 | struct dentry *debugfs_q_stats; | ||
293 | unsigned long flags; | ||
294 | int i; | ||
295 | |||
296 | if (!debugfs_initialized()) | ||
297 | return; | ||
298 | |||
299 | write_lock_irqsave(&ccp_debugfs_lock, flags); | ||
300 | if (!ccp_debugfs_dir) | ||
301 | ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); | ||
302 | write_unlock_irqrestore(&ccp_debugfs_lock, flags); | ||
303 | if (!ccp_debugfs_dir) | ||
304 | return; | ||
305 | |||
306 | ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir); | ||
307 | if (!ccp->debugfs_instance) | ||
308 | return; | ||
309 | |||
310 | debugfs_info = debugfs_create_file("info", 0400, | ||
311 | ccp->debugfs_instance, ccp, | ||
312 | &ccp_debugfs_info_ops); | ||
313 | if (!debugfs_info) | ||
314 | return; | ||
315 | |||
316 | debugfs_stats = debugfs_create_file("stats", 0600, | ||
317 | ccp->debugfs_instance, ccp, | ||
318 | &ccp_debugfs_stats_ops); | ||
319 | if (!debugfs_stats) | ||
320 | return; | ||
321 | |||
322 | for (i = 0; i < ccp->cmd_q_count; i++) { | ||
323 | cmd_q = &ccp->cmd_q[i]; | ||
324 | |||
325 | snprintf(name, MAX_NAME_LEN - 1, "q%d", cmd_q->id); | ||
326 | |||
327 | debugfs_q_instance = | ||
328 | debugfs_create_dir(name, ccp->debugfs_instance); | ||
329 | if (!debugfs_q_instance) | ||
330 | return; | ||
331 | |||
332 | debugfs_q_stats = | ||
333 | debugfs_create_file("stats", 0600, | ||
334 | debugfs_q_instance, cmd_q, | ||
335 | &ccp_debugfs_queue_ops); | ||
336 | if (!debugfs_q_stats) | ||
337 | return; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | void ccp5_debugfs_destroy(void) | ||
342 | { | ||
343 | debugfs_remove_recursive(ccp_debugfs_dir); | ||
344 | } | ||
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index ccbe32d5dd1c..b10d2d2075cb 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <linux/kthread.h> | 16 | #include <linux/kthread.h> |
17 | #include <linux/debugfs.h> | ||
17 | #include <linux/dma-mapping.h> | 18 | #include <linux/dma-mapping.h> |
18 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
19 | #include <linux/compiler.h> | 20 | #include <linux/compiler.h> |
@@ -231,6 +232,8 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, | |||
231 | int i; | 232 | int i; |
232 | int ret = 0; | 233 | int ret = 0; |
233 | 234 | ||
235 | cmd_q->total_ops++; | ||
236 | |||
234 | if (CCP5_CMD_SOC(desc)) { | 237 | if (CCP5_CMD_SOC(desc)) { |
235 | CCP5_CMD_IOC(desc) = 1; | 238 | CCP5_CMD_IOC(desc) = 1; |
236 | CCP5_CMD_SOC(desc) = 0; | 239 | CCP5_CMD_SOC(desc) = 0; |
@@ -282,6 +285,8 @@ static int ccp5_perform_aes(struct ccp_op *op) | |||
282 | union ccp_function function; | 285 | union ccp_function function; |
283 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; | 286 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; |
284 | 287 | ||
288 | op->cmd_q->total_aes_ops++; | ||
289 | |||
285 | /* Zero out all the fields of the command desc */ | 290 | /* Zero out all the fields of the command desc */ |
286 | memset(&desc, 0, Q_DESC_SIZE); | 291 | memset(&desc, 0, Q_DESC_SIZE); |
287 | 292 | ||
@@ -325,6 +330,8 @@ static int ccp5_perform_xts_aes(struct ccp_op *op) | |||
325 | union ccp_function function; | 330 | union ccp_function function; |
326 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; | 331 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; |
327 | 332 | ||
333 | op->cmd_q->total_xts_aes_ops++; | ||
334 | |||
328 | /* Zero out all the fields of the command desc */ | 335 | /* Zero out all the fields of the command desc */ |
329 | memset(&desc, 0, Q_DESC_SIZE); | 336 | memset(&desc, 0, Q_DESC_SIZE); |
330 | 337 | ||
@@ -364,6 +371,8 @@ static int ccp5_perform_sha(struct ccp_op *op) | |||
364 | struct ccp5_desc desc; | 371 | struct ccp5_desc desc; |
365 | union ccp_function function; | 372 | union ccp_function function; |
366 | 373 | ||
374 | op->cmd_q->total_sha_ops++; | ||
375 | |||
367 | /* Zero out all the fields of the command desc */ | 376 | /* Zero out all the fields of the command desc */ |
368 | memset(&desc, 0, Q_DESC_SIZE); | 377 | memset(&desc, 0, Q_DESC_SIZE); |
369 | 378 | ||
@@ -404,6 +413,8 @@ static int ccp5_perform_des3(struct ccp_op *op) | |||
404 | union ccp_function function; | 413 | union ccp_function function; |
405 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; | 414 | u32 key_addr = op->sb_key * LSB_ITEM_SIZE; |
406 | 415 | ||
416 | op->cmd_q->total_3des_ops++; | ||
417 | |||
407 | /* Zero out all the fields of the command desc */ | 418 | /* Zero out all the fields of the command desc */ |
408 | memset(&desc, 0, sizeof(struct ccp5_desc)); | 419 | memset(&desc, 0, sizeof(struct ccp5_desc)); |
409 | 420 | ||
@@ -444,6 +455,8 @@ static int ccp5_perform_rsa(struct ccp_op *op) | |||
444 | struct ccp5_desc desc; | 455 | struct ccp5_desc desc; |
445 | union ccp_function function; | 456 | union ccp_function function; |
446 | 457 | ||
458 | op->cmd_q->total_rsa_ops++; | ||
459 | |||
447 | /* Zero out all the fields of the command desc */ | 460 | /* Zero out all the fields of the command desc */ |
448 | memset(&desc, 0, Q_DESC_SIZE); | 461 | memset(&desc, 0, Q_DESC_SIZE); |
449 | 462 | ||
@@ -487,6 +500,8 @@ static int ccp5_perform_passthru(struct ccp_op *op) | |||
487 | struct ccp_dma_info *daddr = &op->dst.u.dma; | 500 | struct ccp_dma_info *daddr = &op->dst.u.dma; |
488 | 501 | ||
489 | 502 | ||
503 | op->cmd_q->total_pt_ops++; | ||
504 | |||
490 | memset(&desc, 0, Q_DESC_SIZE); | 505 | memset(&desc, 0, Q_DESC_SIZE); |
491 | 506 | ||
492 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; | 507 | CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; |
@@ -543,6 +558,8 @@ static int ccp5_perform_ecc(struct ccp_op *op) | |||
543 | struct ccp5_desc desc; | 558 | struct ccp5_desc desc; |
544 | union ccp_function function; | 559 | union ccp_function function; |
545 | 560 | ||
561 | op->cmd_q->total_ecc_ops++; | ||
562 | |||
546 | /* Zero out all the fields of the command desc */ | 563 | /* Zero out all the fields of the command desc */ |
547 | memset(&desc, 0, Q_DESC_SIZE); | 564 | memset(&desc, 0, Q_DESC_SIZE); |
548 | 565 | ||
@@ -592,7 +609,6 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) | |||
592 | return queues ? 0 : -EINVAL; | 609 | return queues ? 0 : -EINVAL; |
593 | } | 610 | } |
594 | 611 | ||
595 | |||
596 | static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, | 612 | static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, |
597 | int lsb_cnt, int n_lsbs, | 613 | int lsb_cnt, int n_lsbs, |
598 | unsigned long *lsb_pub) | 614 | unsigned long *lsb_pub) |
@@ -757,6 +773,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data) | |||
757 | struct ccp_device *ccp = dev_get_drvdata(dev); | 773 | struct ccp_device *ccp = dev_get_drvdata(dev); |
758 | 774 | ||
759 | ccp5_disable_queue_interrupts(ccp); | 775 | ccp5_disable_queue_interrupts(ccp); |
776 | ccp->total_interrupts++; | ||
760 | if (ccp->use_tasklet) | 777 | if (ccp->use_tasklet) |
761 | tasklet_schedule(&ccp->irq_tasklet); | 778 | tasklet_schedule(&ccp->irq_tasklet); |
762 | else | 779 | else |
@@ -956,6 +973,9 @@ static int ccp5_init(struct ccp_device *ccp) | |||
956 | if (ret) | 973 | if (ret) |
957 | goto e_hwrng; | 974 | goto e_hwrng; |
958 | 975 | ||
976 | /* Set up debugfs entries */ | ||
977 | ccp5_debugfs_setup(ccp); | ||
978 | |||
959 | return 0; | 979 | return 0; |
960 | 980 | ||
961 | e_hwrng: | 981 | e_hwrng: |
@@ -992,6 +1012,12 @@ static void ccp5_destroy(struct ccp_device *ccp) | |||
992 | /* Remove this device from the list of available units first */ | 1012 | /* Remove this device from the list of available units first */ |
993 | ccp_del_device(ccp); | 1013 | ccp_del_device(ccp); |
994 | 1014 | ||
1015 | /* We're in the process of tearing down the entire driver; | ||
1016 | * when all the devices are gone clean up debugfs | ||
1017 | */ | ||
1018 | if (ccp_present()) | ||
1019 | ccp5_debugfs_destroy(); | ||
1020 | |||
995 | /* Disable and clear interrupts */ | 1021 | /* Disable and clear interrupts */ |
996 | ccp5_disable_queue_interrupts(ccp); | 1022 | ccp5_disable_queue_interrupts(ccp); |
997 | for (i = 0; i < ccp->cmd_q_count; i++) { | 1023 | for (i = 0; i < ccp->cmd_q_count; i++) { |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 92d1c6959f08..2506b5025700 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
@@ -31,8 +31,9 @@ | |||
31 | #include "ccp-dev.h" | 31 | #include "ccp-dev.h" |
32 | 32 | ||
33 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); | 33 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); |
34 | MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>"); | ||
34 | MODULE_LICENSE("GPL"); | 35 | MODULE_LICENSE("GPL"); |
35 | MODULE_VERSION("1.0.0"); | 36 | MODULE_VERSION("1.1.0"); |
36 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); | 37 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); |
37 | 38 | ||
38 | struct ccp_tasklet_data { | 39 | struct ccp_tasklet_data { |
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 0cb09d0feeaf..a70154ac7405 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h | |||
@@ -70,6 +70,7 @@ | |||
70 | #define LSB_PUBLIC_MASK_HI_OFFSET 0x1C | 70 | #define LSB_PUBLIC_MASK_HI_OFFSET 0x1C |
71 | #define LSB_PRIVATE_MASK_LO_OFFSET 0x20 | 71 | #define LSB_PRIVATE_MASK_LO_OFFSET 0x20 |
72 | #define LSB_PRIVATE_MASK_HI_OFFSET 0x24 | 72 | #define LSB_PRIVATE_MASK_HI_OFFSET 0x24 |
73 | #define CMD5_PSP_CCP_VERSION 0x100 | ||
73 | 74 | ||
74 | #define CMD5_Q_CONTROL_BASE 0x0000 | 75 | #define CMD5_Q_CONTROL_BASE 0x0000 |
75 | #define CMD5_Q_TAIL_LO_BASE 0x0004 | 76 | #define CMD5_Q_TAIL_LO_BASE 0x0004 |
@@ -322,6 +323,16 @@ struct ccp_cmd_queue { | |||
322 | /* Interrupt wait queue */ | 323 | /* Interrupt wait queue */ |
323 | wait_queue_head_t int_queue; | 324 | wait_queue_head_t int_queue; |
324 | unsigned int int_rcvd; | 325 | unsigned int int_rcvd; |
326 | |||
327 | /* Per-queue Statistics */ | ||
328 | unsigned long total_ops; | ||
329 | unsigned long total_aes_ops; | ||
330 | unsigned long total_xts_aes_ops; | ||
331 | unsigned long total_3des_ops; | ||
332 | unsigned long total_sha_ops; | ||
333 | unsigned long total_rsa_ops; | ||
334 | unsigned long total_pt_ops; | ||
335 | unsigned long total_ecc_ops; | ||
325 | } ____cacheline_aligned; | 336 | } ____cacheline_aligned; |
326 | 337 | ||
327 | struct ccp_device { | 338 | struct ccp_device { |
@@ -419,6 +430,12 @@ struct ccp_device { | |||
419 | 430 | ||
420 | /* DMA caching attribute support */ | 431 | /* DMA caching attribute support */ |
421 | unsigned int axcache; | 432 | unsigned int axcache; |
433 | |||
434 | /* Device Statistics */ | ||
435 | unsigned long total_interrupts; | ||
436 | |||
437 | /* DebugFS info */ | ||
438 | struct dentry *debugfs_instance; | ||
422 | }; | 439 | }; |
423 | 440 | ||
424 | enum ccp_memtype { | 441 | enum ccp_memtype { |
@@ -632,6 +649,9 @@ void ccp_unregister_rng(struct ccp_device *ccp); | |||
632 | int ccp_dmaengine_register(struct ccp_device *ccp); | 649 | int ccp_dmaengine_register(struct ccp_device *ccp); |
633 | void ccp_dmaengine_unregister(struct ccp_device *ccp); | 650 | void ccp_dmaengine_unregister(struct ccp_device *ccp); |
634 | 651 | ||
652 | void ccp5_debugfs_setup(struct ccp_device *ccp); | ||
653 | void ccp5_debugfs_destroy(void); | ||
654 | |||
635 | /* Structure for computation functions that are device-specific */ | 655 | /* Structure for computation functions that are device-specific */ |
636 | struct ccp_actions { | 656 | struct ccp_actions { |
637 | int (*aes)(struct ccp_op *); | 657 | int (*aes)(struct ccp_op *); |
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index 351f28d8c336..e26969e601ad 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c | |||
@@ -44,7 +44,7 @@ static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev) | |||
44 | if (match && match->data) | 44 | if (match && match->data) |
45 | return (struct ccp_vdata *)match->data; | 45 | return (struct ccp_vdata *)match->data; |
46 | #endif | 46 | #endif |
47 | return 0; | 47 | return NULL; |
48 | } | 48 | } |
49 | 49 | ||
50 | static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev) | 50 | static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev) |
@@ -56,7 +56,7 @@ static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev) | |||
56 | if (match && match->driver_data) | 56 | if (match && match->driver_data) |
57 | return (struct ccp_vdata *)match->driver_data; | 57 | return (struct ccp_vdata *)match->driver_data; |
58 | #endif | 58 | #endif |
59 | return 0; | 59 | return NULL; |
60 | } | 60 | } |
61 | 61 | ||
62 | static int ccp_get_irq(struct ccp_device *ccp) | 62 | static int ccp_get_irq(struct ccp_device *ccp) |
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index f00e0d8bd039..aa4e5b88483d 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c | |||
@@ -55,6 +55,8 @@ | |||
55 | #include <crypto/hash.h> | 55 | #include <crypto/hash.h> |
56 | #include <crypto/sha.h> | 56 | #include <crypto/sha.h> |
57 | #include <crypto/authenc.h> | 57 | #include <crypto/authenc.h> |
58 | #include <crypto/ctr.h> | ||
59 | #include <crypto/gf128mul.h> | ||
58 | #include <crypto/internal/aead.h> | 60 | #include <crypto/internal/aead.h> |
59 | #include <crypto/null.h> | 61 | #include <crypto/null.h> |
60 | #include <crypto/internal/skcipher.h> | 62 | #include <crypto/internal/skcipher.h> |
@@ -126,13 +128,13 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) | |||
126 | fw6_pld = (struct cpl_fw6_pld *)input; | 128 | fw6_pld = (struct cpl_fw6_pld *)input; |
127 | if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) || | 129 | if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) || |
128 | (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) { | 130 | (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) { |
129 | cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize); | 131 | cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize); |
130 | } else { | 132 | } else { |
131 | 133 | ||
132 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp, | 134 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp, |
133 | authsize, req->assoclen + | 135 | authsize, req->assoclen + |
134 | req->cryptlen - authsize); | 136 | req->cryptlen - authsize); |
135 | cmp = memcmp(temp, (fw6_pld + 1), authsize); | 137 | cmp = crypto_memneq(temp, (fw6_pld + 1), authsize); |
136 | } | 138 | } |
137 | if (cmp) | 139 | if (cmp) |
138 | *err = -EBADMSG; | 140 | *err = -EBADMSG; |
@@ -151,12 +153,12 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |||
151 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | 153 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); |
152 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 154 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
153 | struct chcr_req_ctx ctx_req; | 155 | struct chcr_req_ctx ctx_req; |
154 | struct cpl_fw6_pld *fw6_pld; | ||
155 | unsigned int digestsize, updated_digestsize; | 156 | unsigned int digestsize, updated_digestsize; |
157 | struct adapter *adap = padap(ctx->dev); | ||
156 | 158 | ||
157 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | 159 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { |
158 | case CRYPTO_ALG_TYPE_AEAD: | 160 | case CRYPTO_ALG_TYPE_AEAD: |
159 | ctx_req.req.aead_req = (struct aead_request *)req; | 161 | ctx_req.req.aead_req = aead_request_cast(req); |
160 | ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); | 162 | ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); |
161 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst, | 163 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst, |
162 | ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); | 164 | ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); |
@@ -164,32 +166,23 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |||
164 | kfree_skb(ctx_req.ctx.reqctx->skb); | 166 | kfree_skb(ctx_req.ctx.reqctx->skb); |
165 | ctx_req.ctx.reqctx->skb = NULL; | 167 | ctx_req.ctx.reqctx->skb = NULL; |
166 | } | 168 | } |
169 | free_new_sg(ctx_req.ctx.reqctx->newdstsg); | ||
170 | ctx_req.ctx.reqctx->newdstsg = NULL; | ||
167 | if (ctx_req.ctx.reqctx->verify == VERIFY_SW) { | 171 | if (ctx_req.ctx.reqctx->verify == VERIFY_SW) { |
168 | chcr_verify_tag(ctx_req.req.aead_req, input, | 172 | chcr_verify_tag(ctx_req.req.aead_req, input, |
169 | &err); | 173 | &err); |
170 | ctx_req.ctx.reqctx->verify = VERIFY_HW; | 174 | ctx_req.ctx.reqctx->verify = VERIFY_HW; |
171 | } | 175 | } |
176 | ctx_req.req.aead_req->base.complete(req, err); | ||
172 | break; | 177 | break; |
173 | 178 | ||
174 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 179 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
175 | ctx_req.req.ablk_req = (struct ablkcipher_request *)req; | 180 | err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), |
176 | ctx_req.ctx.ablk_ctx = | 181 | input, err); |
177 | ablkcipher_request_ctx(ctx_req.req.ablk_req); | ||
178 | if (!err) { | ||
179 | fw6_pld = (struct cpl_fw6_pld *)input; | ||
180 | memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2], | ||
181 | AES_BLOCK_SIZE); | ||
182 | } | ||
183 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst, | ||
184 | ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE); | ||
185 | if (ctx_req.ctx.ablk_ctx->skb) { | ||
186 | kfree_skb(ctx_req.ctx.ablk_ctx->skb); | ||
187 | ctx_req.ctx.ablk_ctx->skb = NULL; | ||
188 | } | ||
189 | break; | 182 | break; |
190 | 183 | ||
191 | case CRYPTO_ALG_TYPE_AHASH: | 184 | case CRYPTO_ALG_TYPE_AHASH: |
192 | ctx_req.req.ahash_req = (struct ahash_request *)req; | 185 | ctx_req.req.ahash_req = ahash_request_cast(req); |
193 | ctx_req.ctx.ahash_ctx = | 186 | ctx_req.ctx.ahash_ctx = |
194 | ahash_request_ctx(ctx_req.req.ahash_req); | 187 | ahash_request_ctx(ctx_req.req.ahash_req); |
195 | digestsize = | 188 | digestsize = |
@@ -214,8 +207,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |||
214 | sizeof(struct cpl_fw6_pld), | 207 | sizeof(struct cpl_fw6_pld), |
215 | updated_digestsize); | 208 | updated_digestsize); |
216 | } | 209 | } |
210 | ctx_req.req.ahash_req->base.complete(req, err); | ||
217 | break; | 211 | break; |
218 | } | 212 | } |
213 | atomic_inc(&adap->chcr_stats.complete); | ||
219 | return err; | 214 | return err; |
220 | } | 215 | } |
221 | 216 | ||
@@ -392,7 +387,7 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, | |||
392 | struct phys_sge_parm *sg_param) | 387 | struct phys_sge_parm *sg_param) |
393 | { | 388 | { |
394 | struct phys_sge_pairs *to; | 389 | struct phys_sge_pairs *to; |
395 | int out_buf_size = sg_param->obsize; | 390 | unsigned int len = 0, left_size = sg_param->obsize; |
396 | unsigned int nents = sg_param->nents, i, j = 0; | 391 | unsigned int nents = sg_param->nents, i, j = 0; |
397 | 392 | ||
398 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) | 393 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) |
@@ -409,20 +404,15 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, | |||
409 | phys_cpl->rss_hdr_int.hash_val = 0; | 404 | phys_cpl->rss_hdr_int.hash_val = 0; |
410 | to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl + | 405 | to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl + |
411 | sizeof(struct cpl_rx_phys_dsgl)); | 406 | sizeof(struct cpl_rx_phys_dsgl)); |
412 | 407 | for (i = 0; nents && left_size; to++) { | |
413 | for (i = 0; nents; to++) { | 408 | for (j = 0; j < 8 && nents && left_size; j++, nents--) { |
414 | for (j = 0; j < 8 && nents; j++, nents--) { | 409 | len = min(left_size, sg_dma_len(sg)); |
415 | out_buf_size -= sg_dma_len(sg); | 410 | to->len[j] = htons(len); |
416 | to->len[j] = htons(sg_dma_len(sg)); | ||
417 | to->addr[j] = cpu_to_be64(sg_dma_address(sg)); | 411 | to->addr[j] = cpu_to_be64(sg_dma_address(sg)); |
412 | left_size -= len; | ||
418 | sg = sg_next(sg); | 413 | sg = sg_next(sg); |
419 | } | 414 | } |
420 | } | 415 | } |
421 | if (out_buf_size) { | ||
422 | j--; | ||
423 | to--; | ||
424 | to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size)); | ||
425 | } | ||
426 | } | 416 | } |
427 | 417 | ||
428 | static inline int map_writesg_phys_cpl(struct device *dev, | 418 | static inline int map_writesg_phys_cpl(struct device *dev, |
@@ -431,7 +421,7 @@ static inline int map_writesg_phys_cpl(struct device *dev, | |||
431 | struct phys_sge_parm *sg_param) | 421 | struct phys_sge_parm *sg_param) |
432 | { | 422 | { |
433 | if (!sg || !sg_param->nents) | 423 | if (!sg || !sg_param->nents) |
434 | return 0; | 424 | return -EINVAL; |
435 | 425 | ||
436 | sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE); | 426 | sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE); |
437 | if (sg_param->nents == 0) { | 427 | if (sg_param->nents == 0) { |
@@ -498,6 +488,24 @@ write_sg_to_skb(struct sk_buff *skb, unsigned int *frags, | |||
498 | } | 488 | } |
499 | } | 489 | } |
500 | 490 | ||
491 | static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) | ||
492 | { | ||
493 | struct adapter *adap = netdev2adap(dev); | ||
494 | struct sge_uld_txq_info *txq_info = | ||
495 | adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; | ||
496 | struct sge_uld_txq *txq; | ||
497 | int ret = 0; | ||
498 | |||
499 | local_bh_disable(); | ||
500 | txq = &txq_info->uldtxq[idx]; | ||
501 | spin_lock(&txq->sendq.lock); | ||
502 | if (txq->full) | ||
503 | ret = -1; | ||
504 | spin_unlock(&txq->sendq.lock); | ||
505 | local_bh_enable(); | ||
506 | return ret; | ||
507 | } | ||
508 | |||
501 | static int generate_copy_rrkey(struct ablk_ctx *ablkctx, | 509 | static int generate_copy_rrkey(struct ablk_ctx *ablkctx, |
502 | struct _key_ctx *key_ctx) | 510 | struct _key_ctx *key_ctx) |
503 | { | 511 | { |
@@ -512,13 +520,67 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx, | |||
512 | } | 520 | } |
513 | return 0; | 521 | return 0; |
514 | } | 522 | } |
523 | static int chcr_sg_ent_in_wr(struct scatterlist *src, | ||
524 | struct scatterlist *dst, | ||
525 | unsigned int minsg, | ||
526 | unsigned int space, | ||
527 | short int *sent, | ||
528 | short int *dent) | ||
529 | { | ||
530 | int srclen = 0, dstlen = 0; | ||
531 | int srcsg = minsg, dstsg = 0; | ||
532 | |||
533 | *sent = 0; | ||
534 | *dent = 0; | ||
535 | while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) && | ||
536 | space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { | ||
537 | srclen += src->length; | ||
538 | srcsg++; | ||
539 | while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && | ||
540 | space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { | ||
541 | if (srclen <= dstlen) | ||
542 | break; | ||
543 | dstlen += dst->length; | ||
544 | dst = sg_next(dst); | ||
545 | dstsg++; | ||
546 | } | ||
547 | src = sg_next(src); | ||
548 | } | ||
549 | *sent = srcsg - minsg; | ||
550 | *dent = dstsg; | ||
551 | return min(srclen, dstlen); | ||
552 | } | ||
553 | |||
554 | static int chcr_cipher_fallback(struct crypto_skcipher *cipher, | ||
555 | u32 flags, | ||
556 | struct scatterlist *src, | ||
557 | struct scatterlist *dst, | ||
558 | unsigned int nbytes, | ||
559 | u8 *iv, | ||
560 | unsigned short op_type) | ||
561 | { | ||
562 | int err; | ||
515 | 563 | ||
564 | SKCIPHER_REQUEST_ON_STACK(subreq, cipher); | ||
565 | skcipher_request_set_tfm(subreq, cipher); | ||
566 | skcipher_request_set_callback(subreq, flags, NULL, NULL); | ||
567 | skcipher_request_set_crypt(subreq, src, dst, | ||
568 | nbytes, iv); | ||
569 | |||
570 | err = op_type ? crypto_skcipher_decrypt(subreq) : | ||
571 | crypto_skcipher_encrypt(subreq); | ||
572 | skcipher_request_zero(subreq); | ||
573 | |||
574 | return err; | ||
575 | |||
576 | } | ||
516 | static inline void create_wreq(struct chcr_context *ctx, | 577 | static inline void create_wreq(struct chcr_context *ctx, |
517 | struct chcr_wr *chcr_req, | 578 | struct chcr_wr *chcr_req, |
518 | void *req, struct sk_buff *skb, | 579 | void *req, struct sk_buff *skb, |
519 | int kctx_len, int hash_sz, | 580 | int kctx_len, int hash_sz, |
520 | int is_iv, | 581 | int is_iv, |
521 | unsigned int sc_len) | 582 | unsigned int sc_len, |
583 | unsigned int lcb) | ||
522 | { | 584 | { |
523 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 585 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
524 | int iv_loc = IV_DSGL; | 586 | int iv_loc = IV_DSGL; |
@@ -543,7 +605,8 @@ static inline void create_wreq(struct chcr_context *ctx, | |||
543 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); | 605 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); |
544 | chcr_req->wreq.rx_chid_to_rx_q_id = | 606 | chcr_req->wreq.rx_chid_to_rx_q_id = |
545 | FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, | 607 | FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, |
546 | is_iv ? iv_loc : IV_NOP, ctx->tx_qidx); | 608 | is_iv ? iv_loc : IV_NOP, !!lcb, |
609 | ctx->tx_qidx); | ||
547 | 610 | ||
548 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, | 611 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, |
549 | qid); | 612 | qid); |
@@ -563,69 +626,62 @@ static inline void create_wreq(struct chcr_context *ctx, | |||
563 | * @qid: ingress qid where response of this WR should be received. | 626 | * @qid: ingress qid where response of this WR should be received. |
564 | * @op_type: encryption or decryption | 627 | * @op_type: encryption or decryption |
565 | */ | 628 | */ |
566 | static struct sk_buff | 629 | static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) |
567 | *create_cipher_wr(struct ablkcipher_request *req, | ||
568 | unsigned short qid, | ||
569 | unsigned short op_type) | ||
570 | { | 630 | { |
571 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 631 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); |
572 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 632 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); |
573 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 633 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
574 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | 634 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
575 | struct sk_buff *skb = NULL; | 635 | struct sk_buff *skb = NULL; |
576 | struct chcr_wr *chcr_req; | 636 | struct chcr_wr *chcr_req; |
577 | struct cpl_rx_phys_dsgl *phys_cpl; | 637 | struct cpl_rx_phys_dsgl *phys_cpl; |
578 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | 638 | struct chcr_blkcipher_req_ctx *reqctx = |
639 | ablkcipher_request_ctx(wrparam->req); | ||
579 | struct phys_sge_parm sg_param; | 640 | struct phys_sge_parm sg_param; |
580 | unsigned int frags = 0, transhdr_len, phys_dsgl; | 641 | unsigned int frags = 0, transhdr_len, phys_dsgl; |
581 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len; | 642 | int error; |
582 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 643 | unsigned int ivsize = AES_BLOCK_SIZE, kctx_len; |
583 | GFP_ATOMIC; | 644 | gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
584 | 645 | GFP_KERNEL : GFP_ATOMIC; | |
585 | if (!req->info) | 646 | struct adapter *adap = padap(ctx->dev); |
586 | return ERR_PTR(-EINVAL); | ||
587 | reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); | ||
588 | if (reqctx->dst_nents <= 0) { | ||
589 | pr_err("AES:Invalid Destination sg lists\n"); | ||
590 | return ERR_PTR(-EINVAL); | ||
591 | } | ||
592 | if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || | ||
593 | (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) { | ||
594 | pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", | ||
595 | ablkctx->enckey_len, req->nbytes, ivsize); | ||
596 | return ERR_PTR(-EINVAL); | ||
597 | } | ||
598 | 647 | ||
599 | phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents); | 648 | phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents); |
600 | 649 | ||
601 | kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); | 650 | kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); |
602 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); | 651 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); |
603 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | 652 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
604 | if (!skb) | 653 | if (!skb) { |
605 | return ERR_PTR(-ENOMEM); | 654 | error = -ENOMEM; |
655 | goto err; | ||
656 | } | ||
606 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | 657 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); |
607 | chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len); | 658 | chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len); |
608 | memset(chcr_req, 0, transhdr_len); | 659 | memset(chcr_req, 0, transhdr_len); |
609 | chcr_req->sec_cpl.op_ivinsrtofst = | 660 | chcr_req->sec_cpl.op_ivinsrtofst = |
610 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1); | 661 | FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1); |
611 | 662 | ||
612 | chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes); | 663 | chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes); |
613 | chcr_req->sec_cpl.aadstart_cipherstop_hi = | 664 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
614 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0); | 665 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0); |
615 | 666 | ||
616 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | 667 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
617 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); | 668 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); |
618 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0, | 669 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, |
619 | ablkctx->ciph_mode, | 670 | ablkctx->ciph_mode, |
620 | 0, 0, ivsize >> 1); | 671 | 0, 0, ivsize >> 1); |
621 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, | 672 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, |
622 | 0, 1, phys_dsgl); | 673 | 0, 1, phys_dsgl); |
623 | 674 | ||
624 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; | 675 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; |
625 | if (op_type == CHCR_DECRYPT_OP) { | 676 | if ((reqctx->op == CHCR_DECRYPT_OP) && |
677 | (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | ||
678 | CRYPTO_ALG_SUB_TYPE_CTR)) && | ||
679 | (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | ||
680 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) { | ||
626 | generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); | 681 | generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); |
627 | } else { | 682 | } else { |
628 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { | 683 | if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) || |
684 | (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) { | ||
629 | memcpy(chcr_req->key_ctx.key, ablkctx->key, | 685 | memcpy(chcr_req->key_ctx.key, ablkctx->key, |
630 | ablkctx->enckey_len); | 686 | ablkctx->enckey_len); |
631 | } else { | 687 | } else { |
@@ -640,45 +696,80 @@ static struct sk_buff | |||
640 | } | 696 | } |
641 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | 697 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
642 | sg_param.nents = reqctx->dst_nents; | 698 | sg_param.nents = reqctx->dst_nents; |
643 | sg_param.obsize = req->nbytes; | 699 | sg_param.obsize = wrparam->bytes; |
644 | sg_param.qid = qid; | 700 | sg_param.qid = wrparam->qid; |
645 | sg_param.align = 1; | 701 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, |
646 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst, | 702 | reqctx->dst, &sg_param); |
647 | &sg_param)) | 703 | if (error) |
648 | goto map_fail1; | 704 | goto map_fail1; |
649 | 705 | ||
650 | skb_set_transport_header(skb, transhdr_len); | 706 | skb_set_transport_header(skb, transhdr_len); |
651 | memcpy(reqctx->iv, req->info, ivsize); | ||
652 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); | 707 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); |
653 | write_sg_to_skb(skb, &frags, req->src, req->nbytes); | 708 | write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes); |
654 | create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1, | 709 | atomic_inc(&adap->chcr_stats.cipher_rqst); |
655 | sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl); | 710 | create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1, |
711 | sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl, | ||
712 | ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); | ||
656 | reqctx->skb = skb; | 713 | reqctx->skb = skb; |
657 | skb_get(skb); | 714 | skb_get(skb); |
658 | return skb; | 715 | return skb; |
659 | map_fail1: | 716 | map_fail1: |
660 | kfree_skb(skb); | 717 | kfree_skb(skb); |
661 | return ERR_PTR(-ENOMEM); | 718 | err: |
719 | return ERR_PTR(error); | ||
662 | } | 720 | } |
663 | 721 | ||
664 | static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 722 | static inline int chcr_keyctx_ck_size(unsigned int keylen) |
723 | { | ||
724 | int ck_size = 0; | ||
725 | |||
726 | if (keylen == AES_KEYSIZE_128) | ||
727 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | ||
728 | else if (keylen == AES_KEYSIZE_192) | ||
729 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | ||
730 | else if (keylen == AES_KEYSIZE_256) | ||
731 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | ||
732 | else | ||
733 | ck_size = 0; | ||
734 | |||
735 | return ck_size; | ||
736 | } | ||
737 | static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher, | ||
738 | const u8 *key, | ||
739 | unsigned int keylen) | ||
740 | { | ||
741 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
742 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); | ||
743 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
744 | int err = 0; | ||
745 | |||
746 | crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | ||
747 | crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & | ||
748 | CRYPTO_TFM_REQ_MASK); | ||
749 | err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); | ||
750 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | ||
751 | tfm->crt_flags |= | ||
752 | crypto_skcipher_get_flags(ablkctx->sw_cipher) & | ||
753 | CRYPTO_TFM_RES_MASK; | ||
754 | return err; | ||
755 | } | ||
756 | |||
757 | static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher, | ||
758 | const u8 *key, | ||
665 | unsigned int keylen) | 759 | unsigned int keylen) |
666 | { | 760 | { |
667 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 761 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); |
668 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | 762 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
669 | unsigned int ck_size, context_size; | 763 | unsigned int ck_size, context_size; |
670 | u16 alignment = 0; | 764 | u16 alignment = 0; |
765 | int err; | ||
671 | 766 | ||
672 | if (keylen == AES_KEYSIZE_128) { | 767 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); |
673 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | 768 | if (err) |
674 | } else if (keylen == AES_KEYSIZE_192) { | ||
675 | alignment = 8; | ||
676 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | ||
677 | } else if (keylen == AES_KEYSIZE_256) { | ||
678 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | ||
679 | } else { | ||
680 | goto badkey_err; | 769 | goto badkey_err; |
681 | } | 770 | |
771 | ck_size = chcr_keyctx_ck_size(keylen); | ||
772 | alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0; | ||
682 | memcpy(ablkctx->key, key, keylen); | 773 | memcpy(ablkctx->key, key, keylen); |
683 | ablkctx->enckey_len = keylen; | 774 | ablkctx->enckey_len = keylen; |
684 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3); | 775 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3); |
@@ -690,35 +781,398 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
690 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; | 781 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; |
691 | return 0; | 782 | return 0; |
692 | badkey_err: | 783 | badkey_err: |
693 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 784 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
694 | ablkctx->enckey_len = 0; | 785 | ablkctx->enckey_len = 0; |
695 | return -EINVAL; | 786 | |
787 | return err; | ||
696 | } | 788 | } |
697 | 789 | ||
698 | static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) | 790 | static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher, |
791 | const u8 *key, | ||
792 | unsigned int keylen) | ||
699 | { | 793 | { |
700 | struct adapter *adap = netdev2adap(dev); | 794 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); |
701 | struct sge_uld_txq_info *txq_info = | 795 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
702 | adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; | 796 | unsigned int ck_size, context_size; |
703 | struct sge_uld_txq *txq; | 797 | u16 alignment = 0; |
798 | int err; | ||
799 | |||
800 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); | ||
801 | if (err) | ||
802 | goto badkey_err; | ||
803 | ck_size = chcr_keyctx_ck_size(keylen); | ||
804 | alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; | ||
805 | memcpy(ablkctx->key, key, keylen); | ||
806 | ablkctx->enckey_len = keylen; | ||
807 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + | ||
808 | keylen + alignment) >> 4; | ||
809 | |||
810 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, | ||
811 | 0, 0, context_size); | ||
812 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; | ||
813 | |||
814 | return 0; | ||
815 | badkey_err: | ||
816 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
817 | ablkctx->enckey_len = 0; | ||
818 | |||
819 | return err; | ||
820 | } | ||
821 | |||
822 | static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher, | ||
823 | const u8 *key, | ||
824 | unsigned int keylen) | ||
825 | { | ||
826 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); | ||
827 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
828 | unsigned int ck_size, context_size; | ||
829 | u16 alignment = 0; | ||
830 | int err; | ||
831 | |||
832 | if (keylen < CTR_RFC3686_NONCE_SIZE) | ||
833 | return -EINVAL; | ||
834 | memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), | ||
835 | CTR_RFC3686_NONCE_SIZE); | ||
836 | |||
837 | keylen -= CTR_RFC3686_NONCE_SIZE; | ||
838 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); | ||
839 | if (err) | ||
840 | goto badkey_err; | ||
841 | |||
842 | ck_size = chcr_keyctx_ck_size(keylen); | ||
843 | alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; | ||
844 | memcpy(ablkctx->key, key, keylen); | ||
845 | ablkctx->enckey_len = keylen; | ||
846 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + | ||
847 | keylen + alignment) >> 4; | ||
848 | |||
849 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, | ||
850 | 0, 0, context_size); | ||
851 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; | ||
852 | |||
853 | return 0; | ||
854 | badkey_err: | ||
855 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
856 | ablkctx->enckey_len = 0; | ||
857 | |||
858 | return err; | ||
859 | } | ||
860 | static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add) | ||
861 | { | ||
862 | unsigned int size = AES_BLOCK_SIZE; | ||
863 | __be32 *b = (__be32 *)(dstiv + size); | ||
864 | u32 c, prev; | ||
865 | |||
866 | memcpy(dstiv, srciv, AES_BLOCK_SIZE); | ||
867 | for (; size >= 4; size -= 4) { | ||
868 | prev = be32_to_cpu(*--b); | ||
869 | c = prev + add; | ||
870 | *b = cpu_to_be32(c); | ||
871 | if (prev < c) | ||
872 | break; | ||
873 | add = 1; | ||
874 | } | ||
875 | |||
876 | } | ||
877 | |||
878 | static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) | ||
879 | { | ||
880 | __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE); | ||
881 | u64 c; | ||
882 | u32 temp = be32_to_cpu(*--b); | ||
883 | |||
884 | temp = ~temp; | ||
885 | c = (u64)temp + 1; // No of block can processed withou overflow | ||
886 | if ((bytes / AES_BLOCK_SIZE) > c) | ||
887 | bytes = c * AES_BLOCK_SIZE; | ||
888 | return bytes; | ||
889 | } | ||
890 | |||
891 | static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) | ||
892 | { | ||
893 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
894 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | ||
895 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
896 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | ||
897 | struct crypto_cipher *cipher; | ||
898 | int ret, i; | ||
899 | u8 *key; | ||
900 | unsigned int keylen; | ||
901 | |||
902 | cipher = crypto_alloc_cipher("aes-generic", 0, 0); | ||
903 | memcpy(iv, req->info, AES_BLOCK_SIZE); | ||
904 | |||
905 | if (IS_ERR(cipher)) { | ||
906 | ret = -ENOMEM; | ||
907 | goto out; | ||
908 | } | ||
909 | keylen = ablkctx->enckey_len / 2; | ||
910 | key = ablkctx->key + keylen; | ||
911 | ret = crypto_cipher_setkey(cipher, key, keylen); | ||
912 | if (ret) | ||
913 | goto out1; | ||
914 | |||
915 | crypto_cipher_encrypt_one(cipher, iv, iv); | ||
916 | for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++) | ||
917 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); | ||
918 | |||
919 | crypto_cipher_decrypt_one(cipher, iv, iv); | ||
920 | out1: | ||
921 | crypto_free_cipher(cipher); | ||
922 | out: | ||
923 | return ret; | ||
924 | } | ||
925 | |||
926 | static int chcr_update_cipher_iv(struct ablkcipher_request *req, | ||
927 | struct cpl_fw6_pld *fw6_pld, u8 *iv) | ||
928 | { | ||
929 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
930 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | ||
931 | int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); | ||
704 | int ret = 0; | 932 | int ret = 0; |
705 | 933 | ||
706 | local_bh_disable(); | 934 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) |
707 | txq = &txq_info->uldtxq[idx]; | 935 | ctr_add_iv(iv, req->info, (reqctx->processed / |
708 | spin_lock(&txq->sendq.lock); | 936 | AES_BLOCK_SIZE)); |
709 | if (txq->full) | 937 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) |
710 | ret = -1; | 938 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + |
711 | spin_unlock(&txq->sendq.lock); | 939 | CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / |
712 | local_bh_enable(); | 940 | AES_BLOCK_SIZE) + 1); |
941 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) | ||
942 | ret = chcr_update_tweak(req, iv); | ||
943 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { | ||
944 | if (reqctx->op) | ||
945 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, | ||
946 | 16, | ||
947 | reqctx->processed - AES_BLOCK_SIZE); | ||
948 | else | ||
949 | memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); | ||
950 | } | ||
951 | |||
713 | return ret; | 952 | return ret; |
953 | |||
714 | } | 954 | } |
715 | 955 | ||
716 | static int chcr_aes_encrypt(struct ablkcipher_request *req) | 956 | /* We need separate function for final iv because in rfc3686 Initial counter |
957 | * starts from 1 and buffer size of iv is 8 byte only which remains constant | ||
958 | * for subsequent update requests | ||
959 | */ | ||
960 | |||
961 | static int chcr_final_cipher_iv(struct ablkcipher_request *req, | ||
962 | struct cpl_fw6_pld *fw6_pld, u8 *iv) | ||
963 | { | ||
964 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
965 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | ||
966 | int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); | ||
967 | int ret = 0; | ||
968 | |||
969 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) | ||
970 | ctr_add_iv(iv, req->info, (reqctx->processed / | ||
971 | AES_BLOCK_SIZE)); | ||
972 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) | ||
973 | ret = chcr_update_tweak(req, iv); | ||
974 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { | ||
975 | if (reqctx->op) | ||
976 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, | ||
977 | 16, | ||
978 | reqctx->processed - AES_BLOCK_SIZE); | ||
979 | else | ||
980 | memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); | ||
981 | |||
982 | } | ||
983 | return ret; | ||
984 | |||
985 | } | ||
986 | |||
987 | |||
988 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | ||
989 | unsigned char *input, int err) | ||
717 | { | 990 | { |
718 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 991 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
719 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 992 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); |
720 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 993 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
994 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
721 | struct sk_buff *skb; | 995 | struct sk_buff *skb; |
996 | struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; | ||
997 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | ||
998 | struct cipher_wr_param wrparam; | ||
999 | int bytes; | ||
1000 | |||
1001 | dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents, | ||
1002 | DMA_FROM_DEVICE); | ||
1003 | |||
1004 | if (reqctx->skb) { | ||
1005 | kfree_skb(reqctx->skb); | ||
1006 | reqctx->skb = NULL; | ||
1007 | } | ||
1008 | if (err) | ||
1009 | goto complete; | ||
1010 | |||
1011 | if (req->nbytes == reqctx->processed) { | ||
1012 | err = chcr_final_cipher_iv(req, fw6_pld, req->info); | ||
1013 | goto complete; | ||
1014 | } | ||
1015 | |||
1016 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | ||
1017 | ctx->tx_qidx))) { | ||
1018 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { | ||
1019 | err = -EBUSY; | ||
1020 | goto complete; | ||
1021 | } | ||
1022 | |||
1023 | } | ||
1024 | wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src, | ||
1025 | reqctx->processed); | ||
1026 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg, | ||
1027 | reqctx->processed); | ||
1028 | if (!wrparam.srcsg || !reqctx->dst) { | ||
1029 | pr_err("Input sg list length less that nbytes\n"); | ||
1030 | err = -EINVAL; | ||
1031 | goto complete; | ||
1032 | } | ||
1033 | bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1, | ||
1034 | SPACE_LEFT(ablkctx->enckey_len), | ||
1035 | &wrparam.snent, &reqctx->dst_nents); | ||
1036 | if ((bytes + reqctx->processed) >= req->nbytes) | ||
1037 | bytes = req->nbytes - reqctx->processed; | ||
1038 | else | ||
1039 | bytes = ROUND_16(bytes); | ||
1040 | err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); | ||
1041 | if (err) | ||
1042 | goto complete; | ||
1043 | |||
1044 | if (unlikely(bytes == 0)) { | ||
1045 | err = chcr_cipher_fallback(ablkctx->sw_cipher, | ||
1046 | req->base.flags, | ||
1047 | wrparam.srcsg, | ||
1048 | reqctx->dst, | ||
1049 | req->nbytes - reqctx->processed, | ||
1050 | reqctx->iv, | ||
1051 | reqctx->op); | ||
1052 | goto complete; | ||
1053 | } | ||
1054 | |||
1055 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | ||
1056 | CRYPTO_ALG_SUB_TYPE_CTR) | ||
1057 | bytes = adjust_ctr_overflow(reqctx->iv, bytes); | ||
1058 | reqctx->processed += bytes; | ||
1059 | wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; | ||
1060 | wrparam.req = req; | ||
1061 | wrparam.bytes = bytes; | ||
1062 | skb = create_cipher_wr(&wrparam); | ||
1063 | if (IS_ERR(skb)) { | ||
1064 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | ||
1065 | err = PTR_ERR(skb); | ||
1066 | goto complete; | ||
1067 | } | ||
1068 | skb->dev = u_ctx->lldi.ports[0]; | ||
1069 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | ||
1070 | chcr_send_wr(skb); | ||
1071 | return 0; | ||
1072 | complete: | ||
1073 | free_new_sg(reqctx->newdstsg); | ||
1074 | reqctx->newdstsg = NULL; | ||
1075 | req->base.complete(&req->base, err); | ||
1076 | return err; | ||
1077 | } | ||
1078 | |||
1079 | static int process_cipher(struct ablkcipher_request *req, | ||
1080 | unsigned short qid, | ||
1081 | struct sk_buff **skb, | ||
1082 | unsigned short op_type) | ||
1083 | { | ||
1084 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
1085 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); | ||
1086 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | ||
1087 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | ||
1088 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
1089 | struct cipher_wr_param wrparam; | ||
1090 | int bytes, nents, err = -EINVAL; | ||
1091 | |||
1092 | reqctx->newdstsg = NULL; | ||
1093 | reqctx->processed = 0; | ||
1094 | if (!req->info) | ||
1095 | goto error; | ||
1096 | if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || | ||
1097 | (req->nbytes == 0) || | ||
1098 | (req->nbytes % crypto_ablkcipher_blocksize(tfm))) { | ||
1099 | pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", | ||
1100 | ablkctx->enckey_len, req->nbytes, ivsize); | ||
1101 | goto error; | ||
1102 | } | ||
1103 | wrparam.srcsg = req->src; | ||
1104 | if (is_newsg(req->dst, &nents)) { | ||
1105 | reqctx->newdstsg = alloc_new_sg(req->dst, nents); | ||
1106 | if (IS_ERR(reqctx->newdstsg)) | ||
1107 | return PTR_ERR(reqctx->newdstsg); | ||
1108 | reqctx->dstsg = reqctx->newdstsg; | ||
1109 | } else { | ||
1110 | reqctx->dstsg = req->dst; | ||
1111 | } | ||
1112 | bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG, | ||
1113 | SPACE_LEFT(ablkctx->enckey_len), | ||
1114 | &wrparam.snent, | ||
1115 | &reqctx->dst_nents); | ||
1116 | if ((bytes + reqctx->processed) >= req->nbytes) | ||
1117 | bytes = req->nbytes - reqctx->processed; | ||
1118 | else | ||
1119 | bytes = ROUND_16(bytes); | ||
1120 | if (unlikely(bytes > req->nbytes)) | ||
1121 | bytes = req->nbytes; | ||
1122 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | ||
1123 | CRYPTO_ALG_SUB_TYPE_CTR) { | ||
1124 | bytes = adjust_ctr_overflow(req->info, bytes); | ||
1125 | } | ||
1126 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | ||
1127 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { | ||
1128 | memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); | ||
1129 | memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info, | ||
1130 | CTR_RFC3686_IV_SIZE); | ||
1131 | |||
1132 | /* initialize counter portion of counter block */ | ||
1133 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + | ||
1134 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); | ||
1135 | |||
1136 | } else { | ||
1137 | |||
1138 | memcpy(reqctx->iv, req->info, ivsize); | ||
1139 | } | ||
1140 | if (unlikely(bytes == 0)) { | ||
1141 | err = chcr_cipher_fallback(ablkctx->sw_cipher, | ||
1142 | req->base.flags, | ||
1143 | req->src, | ||
1144 | req->dst, | ||
1145 | req->nbytes, | ||
1146 | req->info, | ||
1147 | op_type); | ||
1148 | goto error; | ||
1149 | } | ||
1150 | reqctx->processed = bytes; | ||
1151 | reqctx->dst = reqctx->dstsg; | ||
1152 | reqctx->op = op_type; | ||
1153 | wrparam.qid = qid; | ||
1154 | wrparam.req = req; | ||
1155 | wrparam.bytes = bytes; | ||
1156 | *skb = create_cipher_wr(&wrparam); | ||
1157 | if (IS_ERR(*skb)) { | ||
1158 | err = PTR_ERR(*skb); | ||
1159 | goto error; | ||
1160 | } | ||
1161 | |||
1162 | return 0; | ||
1163 | error: | ||
1164 | free_new_sg(reqctx->newdstsg); | ||
1165 | reqctx->newdstsg = NULL; | ||
1166 | return err; | ||
1167 | } | ||
1168 | |||
1169 | static int chcr_aes_encrypt(struct ablkcipher_request *req) | ||
1170 | { | ||
1171 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
1172 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | ||
1173 | struct sk_buff *skb = NULL; | ||
1174 | int err; | ||
1175 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | ||
722 | 1176 | ||
723 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | 1177 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
724 | ctx->tx_qidx))) { | 1178 | ctx->tx_qidx))) { |
@@ -726,12 +1180,10 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) | |||
726 | return -EBUSY; | 1180 | return -EBUSY; |
727 | } | 1181 | } |
728 | 1182 | ||
729 | skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], | 1183 | err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, |
730 | CHCR_ENCRYPT_OP); | 1184 | CHCR_ENCRYPT_OP); |
731 | if (IS_ERR(skb)) { | 1185 | if (err || !skb) |
732 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | 1186 | return err; |
733 | return PTR_ERR(skb); | ||
734 | } | ||
735 | skb->dev = u_ctx->lldi.ports[0]; | 1187 | skb->dev = u_ctx->lldi.ports[0]; |
736 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 1188 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); |
737 | chcr_send_wr(skb); | 1189 | chcr_send_wr(skb); |
@@ -743,7 +1195,8 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) | |||
743 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 1195 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
744 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 1196 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); |
745 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | 1197 | struct uld_ctx *u_ctx = ULD_CTX(ctx); |
746 | struct sk_buff *skb; | 1198 | struct sk_buff *skb = NULL; |
1199 | int err; | ||
747 | 1200 | ||
748 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | 1201 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
749 | ctx->tx_qidx))) { | 1202 | ctx->tx_qidx))) { |
@@ -751,12 +1204,10 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) | |||
751 | return -EBUSY; | 1204 | return -EBUSY; |
752 | } | 1205 | } |
753 | 1206 | ||
754 | skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], | 1207 | err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, |
755 | CHCR_DECRYPT_OP); | 1208 | CHCR_DECRYPT_OP); |
756 | if (IS_ERR(skb)) { | 1209 | if (err || !skb) |
757 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | 1210 | return err; |
758 | return PTR_ERR(skb); | ||
759 | } | ||
760 | skb->dev = u_ctx->lldi.ports[0]; | 1211 | skb->dev = u_ctx->lldi.ports[0]; |
761 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); | 1212 | set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); |
762 | chcr_send_wr(skb); | 1213 | chcr_send_wr(skb); |
@@ -765,7 +1216,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) | |||
765 | 1216 | ||
766 | static int chcr_device_init(struct chcr_context *ctx) | 1217 | static int chcr_device_init(struct chcr_context *ctx) |
767 | { | 1218 | { |
768 | struct uld_ctx *u_ctx; | 1219 | struct uld_ctx *u_ctx = NULL; |
769 | struct adapter *adap; | 1220 | struct adapter *adap; |
770 | unsigned int id; | 1221 | unsigned int id; |
771 | int txq_perchan, txq_idx, ntxq; | 1222 | int txq_perchan, txq_idx, ntxq; |
@@ -773,12 +1224,12 @@ static int chcr_device_init(struct chcr_context *ctx) | |||
773 | 1224 | ||
774 | id = smp_processor_id(); | 1225 | id = smp_processor_id(); |
775 | if (!ctx->dev) { | 1226 | if (!ctx->dev) { |
776 | err = assign_chcr_device(&ctx->dev); | 1227 | u_ctx = assign_chcr_device(); |
777 | if (err) { | 1228 | if (!u_ctx) { |
778 | pr_err("chcr device assignment fails\n"); | 1229 | pr_err("chcr device assignment fails\n"); |
779 | goto out; | 1230 | goto out; |
780 | } | 1231 | } |
781 | u_ctx = ULD_CTX(ctx); | 1232 | ctx->dev = u_ctx->dev; |
782 | adap = padap(ctx->dev); | 1233 | adap = padap(ctx->dev); |
783 | ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq, | 1234 | ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq, |
784 | adap->vres.ncrypto_fc); | 1235 | adap->vres.ncrypto_fc); |
@@ -801,10 +1252,48 @@ out: | |||
801 | 1252 | ||
802 | static int chcr_cra_init(struct crypto_tfm *tfm) | 1253 | static int chcr_cra_init(struct crypto_tfm *tfm) |
803 | { | 1254 | { |
1255 | struct crypto_alg *alg = tfm->__crt_alg; | ||
1256 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | ||
1257 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
1258 | |||
1259 | ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0, | ||
1260 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | ||
1261 | if (IS_ERR(ablkctx->sw_cipher)) { | ||
1262 | pr_err("failed to allocate fallback for %s\n", alg->cra_name); | ||
1263 | return PTR_ERR(ablkctx->sw_cipher); | ||
1264 | } | ||
1265 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); | ||
1266 | return chcr_device_init(crypto_tfm_ctx(tfm)); | ||
1267 | } | ||
1268 | |||
1269 | static int chcr_rfc3686_init(struct crypto_tfm *tfm) | ||
1270 | { | ||
1271 | struct crypto_alg *alg = tfm->__crt_alg; | ||
1272 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | ||
1273 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
1274 | |||
1275 | /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) | ||
1276 | * cannot be used as fallback in chcr_handle_cipher_response | ||
1277 | */ | ||
1278 | ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, | ||
1279 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | ||
1280 | if (IS_ERR(ablkctx->sw_cipher)) { | ||
1281 | pr_err("failed to allocate fallback for %s\n", alg->cra_name); | ||
1282 | return PTR_ERR(ablkctx->sw_cipher); | ||
1283 | } | ||
804 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); | 1284 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); |
805 | return chcr_device_init(crypto_tfm_ctx(tfm)); | 1285 | return chcr_device_init(crypto_tfm_ctx(tfm)); |
806 | } | 1286 | } |
807 | 1287 | ||
1288 | |||
1289 | static void chcr_cra_exit(struct crypto_tfm *tfm) | ||
1290 | { | ||
1291 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | ||
1292 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | ||
1293 | |||
1294 | crypto_free_skcipher(ablkctx->sw_cipher); | ||
1295 | } | ||
1296 | |||
808 | static int get_alg_config(struct algo_param *params, | 1297 | static int get_alg_config(struct algo_param *params, |
809 | unsigned int auth_size) | 1298 | unsigned int auth_size) |
810 | { | 1299 | { |
@@ -865,6 +1354,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, | |||
865 | u8 hash_size_in_response = 0; | 1354 | u8 hash_size_in_response = 0; |
866 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1355 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
867 | GFP_ATOMIC; | 1356 | GFP_ATOMIC; |
1357 | struct adapter *adap = padap(ctx->dev); | ||
868 | 1358 | ||
869 | iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); | 1359 | iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); |
870 | kctx_len = param->alg_prm.result_size + iopad_alignment; | 1360 | kctx_len = param->alg_prm.result_size + iopad_alignment; |
@@ -921,9 +1411,9 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, | |||
921 | param->bfr_len); | 1411 | param->bfr_len); |
922 | if (param->sg_len != 0) | 1412 | if (param->sg_len != 0) |
923 | write_sg_to_skb(skb, &frags, req->src, param->sg_len); | 1413 | write_sg_to_skb(skb, &frags, req->src, param->sg_len); |
924 | 1414 | atomic_inc(&adap->chcr_stats.digest_rqst); | |
925 | create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0, | 1415 | create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, |
926 | DUMMY_BYTES); | 1416 | hash_size_in_response, 0, DUMMY_BYTES, 0); |
927 | req_ctx->skb = skb; | 1417 | req_ctx->skb = skb; |
928 | skb_get(skb); | 1418 | skb_get(skb); |
929 | return skb; | 1419 | return skb; |
@@ -1226,21 +1716,17 @@ out: | |||
1226 | return err; | 1716 | return err; |
1227 | } | 1717 | } |
1228 | 1718 | ||
1229 | static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 1719 | static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
1230 | unsigned int key_len) | 1720 | unsigned int key_len) |
1231 | { | 1721 | { |
1232 | struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); | 1722 | struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); |
1233 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | 1723 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); |
1234 | unsigned short context_size = 0; | 1724 | unsigned short context_size = 0; |
1725 | int err; | ||
1235 | 1726 | ||
1236 | if ((key_len != (AES_KEYSIZE_128 << 1)) && | 1727 | err = chcr_cipher_fallback_setkey(cipher, key, key_len); |
1237 | (key_len != (AES_KEYSIZE_256 << 1))) { | 1728 | if (err) |
1238 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | 1729 | goto badkey_err; |
1239 | CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1240 | ablkctx->enckey_len = 0; | ||
1241 | return -EINVAL; | ||
1242 | |||
1243 | } | ||
1244 | 1730 | ||
1245 | memcpy(ablkctx->key, key, key_len); | 1731 | memcpy(ablkctx->key, key, key_len); |
1246 | ablkctx->enckey_len = key_len; | 1732 | ablkctx->enckey_len = key_len; |
@@ -1254,6 +1740,11 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
1254 | 0, context_size); | 1740 | 0, context_size); |
1255 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; | 1741 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; |
1256 | return 0; | 1742 | return 0; |
1743 | badkey_err: | ||
1744 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1745 | ablkctx->enckey_len = 0; | ||
1746 | |||
1747 | return err; | ||
1257 | } | 1748 | } |
1258 | 1749 | ||
1259 | static int chcr_sha_init(struct ahash_request *areq) | 1750 | static int chcr_sha_init(struct ahash_request *areq) |
@@ -1330,6 +1821,63 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) | |||
1330 | } | 1821 | } |
1331 | } | 1822 | } |
1332 | 1823 | ||
1824 | static int is_newsg(struct scatterlist *sgl, unsigned int *newents) | ||
1825 | { | ||
1826 | int nents = 0; | ||
1827 | int ret = 0; | ||
1828 | |||
1829 | while (sgl) { | ||
1830 | if (sgl->length > CHCR_SG_SIZE) | ||
1831 | ret = 1; | ||
1832 | nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE); | ||
1833 | sgl = sg_next(sgl); | ||
1834 | } | ||
1835 | *newents = nents; | ||
1836 | return ret; | ||
1837 | } | ||
1838 | |||
1839 | static inline void free_new_sg(struct scatterlist *sgl) | ||
1840 | { | ||
1841 | kfree(sgl); | ||
1842 | } | ||
1843 | |||
1844 | static struct scatterlist *alloc_new_sg(struct scatterlist *sgl, | ||
1845 | unsigned int nents) | ||
1846 | { | ||
1847 | struct scatterlist *newsg, *sg; | ||
1848 | int i, len, processed = 0; | ||
1849 | struct page *spage; | ||
1850 | int offset; | ||
1851 | |||
1852 | newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL); | ||
1853 | if (!newsg) | ||
1854 | return ERR_PTR(-ENOMEM); | ||
1855 | sg = newsg; | ||
1856 | sg_init_table(sg, nents); | ||
1857 | offset = sgl->offset; | ||
1858 | spage = sg_page(sgl); | ||
1859 | for (i = 0; i < nents; i++) { | ||
1860 | len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE); | ||
1861 | sg_set_page(sg, spage, len, offset); | ||
1862 | processed += len; | ||
1863 | offset += len; | ||
1864 | if (offset >= PAGE_SIZE) { | ||
1865 | offset = offset % PAGE_SIZE; | ||
1866 | spage++; | ||
1867 | } | ||
1868 | if (processed == sgl->length) { | ||
1869 | processed = 0; | ||
1870 | sgl = sg_next(sgl); | ||
1871 | if (!sgl) | ||
1872 | break; | ||
1873 | spage = sg_page(sgl); | ||
1874 | offset = sgl->offset; | ||
1875 | } | ||
1876 | sg = sg_next(sg); | ||
1877 | } | ||
1878 | return newsg; | ||
1879 | } | ||
1880 | |||
1333 | static int chcr_copy_assoc(struct aead_request *req, | 1881 | static int chcr_copy_assoc(struct aead_request *req, |
1334 | struct chcr_aead_ctx *ctx) | 1882 | struct chcr_aead_ctx *ctx) |
1335 | { | 1883 | { |
@@ -1392,16 +1940,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1392 | struct scatterlist *src; | 1940 | struct scatterlist *src; |
1393 | unsigned int frags = 0, transhdr_len; | 1941 | unsigned int frags = 0, transhdr_len; |
1394 | unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; | 1942 | unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; |
1395 | unsigned int kctx_len = 0; | 1943 | unsigned int kctx_len = 0, nents; |
1396 | unsigned short stop_offset = 0; | 1944 | unsigned short stop_offset = 0; |
1397 | unsigned int assoclen = req->assoclen; | 1945 | unsigned int assoclen = req->assoclen; |
1398 | unsigned int authsize = crypto_aead_authsize(tfm); | 1946 | unsigned int authsize = crypto_aead_authsize(tfm); |
1399 | int err = -EINVAL, src_nent; | 1947 | int error = -EINVAL, src_nent; |
1400 | int null = 0; | 1948 | int null = 0; |
1401 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 1949 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1402 | GFP_ATOMIC; | 1950 | GFP_ATOMIC; |
1951 | struct adapter *adap = padap(ctx->dev); | ||
1403 | 1952 | ||
1404 | if (aeadctx->enckey_len == 0 || (req->cryptlen == 0)) | 1953 | reqctx->newdstsg = NULL; |
1954 | dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize : | ||
1955 | authsize); | ||
1956 | if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0)) | ||
1405 | goto err; | 1957 | goto err; |
1406 | 1958 | ||
1407 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | 1959 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) |
@@ -1410,14 +1962,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1410 | if (src_nent < 0) | 1962 | if (src_nent < 0) |
1411 | goto err; | 1963 | goto err; |
1412 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); | 1964 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); |
1413 | reqctx->dst = src; | ||
1414 | 1965 | ||
1415 | if (req->src != req->dst) { | 1966 | if (req->src != req->dst) { |
1416 | err = chcr_copy_assoc(req, aeadctx); | 1967 | error = chcr_copy_assoc(req, aeadctx); |
1417 | if (err) | 1968 | if (error) |
1418 | return ERR_PTR(err); | 1969 | return ERR_PTR(error); |
1419 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, | 1970 | } |
1420 | req->assoclen); | 1971 | if (dst_size && is_newsg(req->dst, &nents)) { |
1972 | reqctx->newdstsg = alloc_new_sg(req->dst, nents); | ||
1973 | if (IS_ERR(reqctx->newdstsg)) | ||
1974 | return ERR_CAST(reqctx->newdstsg); | ||
1975 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
1976 | reqctx->newdstsg, req->assoclen); | ||
1977 | } else { | ||
1978 | if (req->src == req->dst) | ||
1979 | reqctx->dst = src; | ||
1980 | else | ||
1981 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
1982 | req->dst, req->assoclen); | ||
1421 | } | 1983 | } |
1422 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { | 1984 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { |
1423 | null = 1; | 1985 | null = 1; |
@@ -1427,6 +1989,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1427 | (op_type ? -authsize : authsize)); | 1989 | (op_type ? -authsize : authsize)); |
1428 | if (reqctx->dst_nents < 0) { | 1990 | if (reqctx->dst_nents < 0) { |
1429 | pr_err("AUTHENC:Invalid Destination sg entries\n"); | 1991 | pr_err("AUTHENC:Invalid Destination sg entries\n"); |
1992 | error = -EINVAL; | ||
1430 | goto err; | 1993 | goto err; |
1431 | } | 1994 | } |
1432 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | 1995 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); |
@@ -1437,11 +2000,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1437 | T6_MAX_AAD_SIZE, | 2000 | T6_MAX_AAD_SIZE, |
1438 | transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8), | 2001 | transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8), |
1439 | op_type)) { | 2002 | op_type)) { |
2003 | atomic_inc(&adap->chcr_stats.fallback); | ||
2004 | free_new_sg(reqctx->newdstsg); | ||
2005 | reqctx->newdstsg = NULL; | ||
1440 | return ERR_PTR(chcr_aead_fallback(req, op_type)); | 2006 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
1441 | } | 2007 | } |
1442 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | 2008 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
1443 | if (!skb) | 2009 | if (!skb) { |
2010 | error = -ENOMEM; | ||
1444 | goto err; | 2011 | goto err; |
2012 | } | ||
1445 | 2013 | ||
1446 | /* LLD is going to write the sge hdr. */ | 2014 | /* LLD is going to write the sge hdr. */ |
1447 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | 2015 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); |
@@ -1493,9 +2061,9 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1493 | sg_param.nents = reqctx->dst_nents; | 2061 | sg_param.nents = reqctx->dst_nents; |
1494 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | 2062 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); |
1495 | sg_param.qid = qid; | 2063 | sg_param.qid = qid; |
1496 | sg_param.align = 0; | 2064 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, |
1497 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst, | 2065 | reqctx->dst, &sg_param); |
1498 | &sg_param)) | 2066 | if (error) |
1499 | goto dstmap_fail; | 2067 | goto dstmap_fail; |
1500 | 2068 | ||
1501 | skb_set_transport_header(skb, transhdr_len); | 2069 | skb_set_transport_header(skb, transhdr_len); |
@@ -1507,8 +2075,9 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, | |||
1507 | } | 2075 | } |
1508 | write_buffer_to_skb(skb, &frags, req->iv, ivsize); | 2076 | write_buffer_to_skb(skb, &frags, req->iv, ivsize); |
1509 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | 2077 | write_sg_to_skb(skb, &frags, src, req->cryptlen); |
1510 | create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, | 2078 | atomic_inc(&adap->chcr_stats.cipher_rqst); |
1511 | sizeof(struct cpl_rx_phys_dsgl) + dst_size); | 2079 | create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, |
2080 | sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); | ||
1512 | reqctx->skb = skb; | 2081 | reqctx->skb = skb; |
1513 | skb_get(skb); | 2082 | skb_get(skb); |
1514 | 2083 | ||
@@ -1517,7 +2086,9 @@ dstmap_fail: | |||
1517 | /* ivmap_fail: */ | 2086 | /* ivmap_fail: */ |
1518 | kfree_skb(skb); | 2087 | kfree_skb(skb); |
1519 | err: | 2088 | err: |
1520 | return ERR_PTR(-EINVAL); | 2089 | free_new_sg(reqctx->newdstsg); |
2090 | reqctx->newdstsg = NULL; | ||
2091 | return ERR_PTR(error); | ||
1521 | } | 2092 | } |
1522 | 2093 | ||
1523 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) | 2094 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) |
@@ -1724,14 +2295,17 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
1724 | struct phys_sge_parm sg_param; | 2295 | struct phys_sge_parm sg_param; |
1725 | struct scatterlist *src; | 2296 | struct scatterlist *src; |
1726 | unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; | 2297 | unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; |
1727 | unsigned int dst_size = 0, kctx_len; | 2298 | unsigned int dst_size = 0, kctx_len, nents; |
1728 | unsigned int sub_type; | 2299 | unsigned int sub_type; |
1729 | unsigned int authsize = crypto_aead_authsize(tfm); | 2300 | unsigned int authsize = crypto_aead_authsize(tfm); |
1730 | int err = -EINVAL, src_nent; | 2301 | int error = -EINVAL, src_nent; |
1731 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 2302 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1732 | GFP_ATOMIC; | 2303 | GFP_ATOMIC; |
2304 | struct adapter *adap = padap(ctx->dev); | ||
1733 | 2305 | ||
1734 | 2306 | dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize : | |
2307 | authsize); | ||
2308 | reqctx->newdstsg = NULL; | ||
1735 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | 2309 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) |
1736 | goto err; | 2310 | goto err; |
1737 | src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); | 2311 | src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); |
@@ -1740,26 +2314,35 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
1740 | 2314 | ||
1741 | sub_type = get_aead_subtype(tfm); | 2315 | sub_type = get_aead_subtype(tfm); |
1742 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); | 2316 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); |
1743 | reqctx->dst = src; | ||
1744 | |||
1745 | if (req->src != req->dst) { | 2317 | if (req->src != req->dst) { |
1746 | err = chcr_copy_assoc(req, aeadctx); | 2318 | error = chcr_copy_assoc(req, aeadctx); |
1747 | if (err) { | 2319 | if (error) { |
1748 | pr_err("AAD copy to destination buffer fails\n"); | 2320 | pr_err("AAD copy to destination buffer fails\n"); |
1749 | return ERR_PTR(err); | 2321 | return ERR_PTR(error); |
1750 | } | 2322 | } |
1751 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, | 2323 | } |
1752 | req->assoclen); | 2324 | if (dst_size && is_newsg(req->dst, &nents)) { |
2325 | reqctx->newdstsg = alloc_new_sg(req->dst, nents); | ||
2326 | if (IS_ERR(reqctx->newdstsg)) | ||
2327 | return ERR_CAST(reqctx->newdstsg); | ||
2328 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
2329 | reqctx->newdstsg, req->assoclen); | ||
2330 | } else { | ||
2331 | if (req->src == req->dst) | ||
2332 | reqctx->dst = src; | ||
2333 | else | ||
2334 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
2335 | req->dst, req->assoclen); | ||
1753 | } | 2336 | } |
1754 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + | 2337 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + |
1755 | (op_type ? -authsize : authsize)); | 2338 | (op_type ? -authsize : authsize)); |
1756 | if (reqctx->dst_nents < 0) { | 2339 | if (reqctx->dst_nents < 0) { |
1757 | pr_err("CCM:Invalid Destination sg entries\n"); | 2340 | pr_err("CCM:Invalid Destination sg entries\n"); |
2341 | error = -EINVAL; | ||
1758 | goto err; | 2342 | goto err; |
1759 | } | 2343 | } |
1760 | 2344 | error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); | |
1761 | 2345 | if (error) | |
1762 | if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type)) | ||
1763 | goto err; | 2346 | goto err; |
1764 | 2347 | ||
1765 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); | 2348 | dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); |
@@ -1769,13 +2352,18 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
1769 | T6_MAX_AAD_SIZE - 18, | 2352 | T6_MAX_AAD_SIZE - 18, |
1770 | transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8), | 2353 | transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8), |
1771 | op_type)) { | 2354 | op_type)) { |
2355 | atomic_inc(&adap->chcr_stats.fallback); | ||
2356 | free_new_sg(reqctx->newdstsg); | ||
2357 | reqctx->newdstsg = NULL; | ||
1772 | return ERR_PTR(chcr_aead_fallback(req, op_type)); | 2358 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
1773 | } | 2359 | } |
1774 | 2360 | ||
1775 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | 2361 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
1776 | 2362 | ||
1777 | if (!skb) | 2363 | if (!skb) { |
2364 | error = -ENOMEM; | ||
1778 | goto err; | 2365 | goto err; |
2366 | } | ||
1779 | 2367 | ||
1780 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | 2368 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); |
1781 | 2369 | ||
@@ -1790,29 +2378,32 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, | |||
1790 | 16), aeadctx->key, aeadctx->enckey_len); | 2378 | 16), aeadctx->key, aeadctx->enckey_len); |
1791 | 2379 | ||
1792 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | 2380 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
1793 | if (ccm_format_packet(req, aeadctx, sub_type, op_type)) | 2381 | error = ccm_format_packet(req, aeadctx, sub_type, op_type); |
2382 | if (error) | ||
1794 | goto dstmap_fail; | 2383 | goto dstmap_fail; |
1795 | 2384 | ||
1796 | sg_param.nents = reqctx->dst_nents; | 2385 | sg_param.nents = reqctx->dst_nents; |
1797 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | 2386 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); |
1798 | sg_param.qid = qid; | 2387 | sg_param.qid = qid; |
1799 | sg_param.align = 0; | 2388 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, |
1800 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst, | 2389 | reqctx->dst, &sg_param); |
1801 | &sg_param)) | 2390 | if (error) |
1802 | goto dstmap_fail; | 2391 | goto dstmap_fail; |
1803 | 2392 | ||
1804 | skb_set_transport_header(skb, transhdr_len); | 2393 | skb_set_transport_header(skb, transhdr_len); |
1805 | frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx); | 2394 | frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx); |
1806 | create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1, | 2395 | atomic_inc(&adap->chcr_stats.aead_rqst); |
1807 | sizeof(struct cpl_rx_phys_dsgl) + dst_size); | 2396 | create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1, |
2397 | sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); | ||
1808 | reqctx->skb = skb; | 2398 | reqctx->skb = skb; |
1809 | skb_get(skb); | 2399 | skb_get(skb); |
1810 | return skb; | 2400 | return skb; |
1811 | dstmap_fail: | 2401 | dstmap_fail: |
1812 | kfree_skb(skb); | 2402 | kfree_skb(skb); |
1813 | skb = NULL; | ||
1814 | err: | 2403 | err: |
1815 | return ERR_PTR(-EINVAL); | 2404 | free_new_sg(reqctx->newdstsg); |
2405 | reqctx->newdstsg = NULL; | ||
2406 | return ERR_PTR(error); | ||
1816 | } | 2407 | } |
1817 | 2408 | ||
1818 | static struct sk_buff *create_gcm_wr(struct aead_request *req, | 2409 | static struct sk_buff *create_gcm_wr(struct aead_request *req, |
@@ -1832,45 +2423,53 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1832 | struct scatterlist *src; | 2423 | struct scatterlist *src; |
1833 | unsigned int frags = 0, transhdr_len; | 2424 | unsigned int frags = 0, transhdr_len; |
1834 | unsigned int ivsize = AES_BLOCK_SIZE; | 2425 | unsigned int ivsize = AES_BLOCK_SIZE; |
1835 | unsigned int dst_size = 0, kctx_len; | 2426 | unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen; |
1836 | unsigned char tag_offset = 0; | 2427 | unsigned char tag_offset = 0; |
1837 | unsigned int crypt_len = 0; | ||
1838 | unsigned int authsize = crypto_aead_authsize(tfm); | 2428 | unsigned int authsize = crypto_aead_authsize(tfm); |
1839 | int err = -EINVAL, src_nent; | 2429 | int error = -EINVAL, src_nent; |
1840 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | 2430 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1841 | GFP_ATOMIC; | 2431 | GFP_ATOMIC; |
2432 | struct adapter *adap = padap(ctx->dev); | ||
1842 | 2433 | ||
2434 | reqctx->newdstsg = NULL; | ||
2435 | dst_size = assoclen + req->cryptlen + (op_type ? -authsize : | ||
2436 | authsize); | ||
1843 | /* validate key size */ | 2437 | /* validate key size */ |
1844 | if (aeadctx->enckey_len == 0) | 2438 | if (aeadctx->enckey_len == 0) |
1845 | goto err; | 2439 | goto err; |
1846 | 2440 | ||
1847 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) | 2441 | if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) |
1848 | goto err; | 2442 | goto err; |
1849 | src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); | 2443 | src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen); |
1850 | if (src_nent < 0) | 2444 | if (src_nent < 0) |
1851 | goto err; | 2445 | goto err; |
1852 | 2446 | ||
1853 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); | 2447 | src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen); |
1854 | reqctx->dst = src; | ||
1855 | if (req->src != req->dst) { | 2448 | if (req->src != req->dst) { |
1856 | err = chcr_copy_assoc(req, aeadctx); | 2449 | error = chcr_copy_assoc(req, aeadctx); |
1857 | if (err) | 2450 | if (error) |
1858 | return ERR_PTR(err); | 2451 | return ERR_PTR(error); |
1859 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst, | 2452 | } |
1860 | req->assoclen); | 2453 | |
2454 | if (dst_size && is_newsg(req->dst, &nents)) { | ||
2455 | reqctx->newdstsg = alloc_new_sg(req->dst, nents); | ||
2456 | if (IS_ERR(reqctx->newdstsg)) | ||
2457 | return ERR_CAST(reqctx->newdstsg); | ||
2458 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
2459 | reqctx->newdstsg, assoclen); | ||
2460 | } else { | ||
2461 | if (req->src == req->dst) | ||
2462 | reqctx->dst = src; | ||
2463 | else | ||
2464 | reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, | ||
2465 | req->dst, assoclen); | ||
1861 | } | 2466 | } |
1862 | 2467 | ||
1863 | if (!req->cryptlen) | ||
1864 | /* null-payload is not supported in the hardware. | ||
1865 | * software is sending block size | ||
1866 | */ | ||
1867 | crypt_len = AES_BLOCK_SIZE; | ||
1868 | else | ||
1869 | crypt_len = req->cryptlen; | ||
1870 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + | 2468 | reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + |
1871 | (op_type ? -authsize : authsize)); | 2469 | (op_type ? -authsize : authsize)); |
1872 | if (reqctx->dst_nents < 0) { | 2470 | if (reqctx->dst_nents < 0) { |
1873 | pr_err("GCM:Invalid Destination sg entries\n"); | 2471 | pr_err("GCM:Invalid Destination sg entries\n"); |
2472 | error = -EINVAL; | ||
1874 | goto err; | 2473 | goto err; |
1875 | } | 2474 | } |
1876 | 2475 | ||
@@ -1883,11 +2482,16 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1883 | T6_MAX_AAD_SIZE, | 2482 | T6_MAX_AAD_SIZE, |
1884 | transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8), | 2483 | transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8), |
1885 | op_type)) { | 2484 | op_type)) { |
2485 | atomic_inc(&adap->chcr_stats.fallback); | ||
2486 | free_new_sg(reqctx->newdstsg); | ||
2487 | reqctx->newdstsg = NULL; | ||
1886 | return ERR_PTR(chcr_aead_fallback(req, op_type)); | 2488 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
1887 | } | 2489 | } |
1888 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); | 2490 | skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); |
1889 | if (!skb) | 2491 | if (!skb) { |
2492 | error = -ENOMEM; | ||
1890 | goto err; | 2493 | goto err; |
2494 | } | ||
1891 | 2495 | ||
1892 | /* NIC driver is going to write the sge hdr. */ | 2496 | /* NIC driver is going to write the sge hdr. */ |
1893 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); | 2497 | skb_reserve(skb, sizeof(struct sge_opaque_hdr)); |
@@ -1896,19 +2500,19 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1896 | memset(chcr_req, 0, transhdr_len); | 2500 | memset(chcr_req, 0, transhdr_len); |
1897 | 2501 | ||
1898 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) | 2502 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) |
1899 | req->assoclen -= 8; | 2503 | assoclen = req->assoclen - 8; |
1900 | 2504 | ||
1901 | tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; | 2505 | tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; |
1902 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( | 2506 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( |
1903 | ctx->dev->rx_channel_id, 2, (ivsize ? | 2507 | ctx->dev->rx_channel_id, 2, (ivsize ? |
1904 | (req->assoclen + 1) : 0)); | 2508 | (assoclen + 1) : 0)); |
1905 | chcr_req->sec_cpl.pldlen = | 2509 | chcr_req->sec_cpl.pldlen = |
1906 | htonl(req->assoclen + ivsize + req->cryptlen); | 2510 | htonl(assoclen + ivsize + req->cryptlen); |
1907 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | 2511 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
1908 | req->assoclen ? 1 : 0, req->assoclen, | 2512 | assoclen ? 1 : 0, assoclen, |
1909 | req->assoclen + ivsize + 1, 0); | 2513 | assoclen + ivsize + 1, 0); |
1910 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | 2514 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
1911 | FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1, | 2515 | FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1, |
1912 | tag_offset, tag_offset); | 2516 | tag_offset, tag_offset); |
1913 | chcr_req->sec_cpl.seqno_numivs = | 2517 | chcr_req->sec_cpl.seqno_numivs = |
1914 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == | 2518 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == |
@@ -1938,19 +2542,19 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1938 | sg_param.nents = reqctx->dst_nents; | 2542 | sg_param.nents = reqctx->dst_nents; |
1939 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); | 2543 | sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); |
1940 | sg_param.qid = qid; | 2544 | sg_param.qid = qid; |
1941 | sg_param.align = 0; | 2545 | error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, |
1942 | if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst, | 2546 | reqctx->dst, &sg_param); |
1943 | &sg_param)) | 2547 | if (error) |
1944 | goto dstmap_fail; | 2548 | goto dstmap_fail; |
1945 | 2549 | ||
1946 | skb_set_transport_header(skb, transhdr_len); | 2550 | skb_set_transport_header(skb, transhdr_len); |
1947 | 2551 | write_sg_to_skb(skb, &frags, req->src, assoclen); | |
1948 | write_sg_to_skb(skb, &frags, req->src, req->assoclen); | ||
1949 | |||
1950 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); | 2552 | write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); |
1951 | write_sg_to_skb(skb, &frags, src, req->cryptlen); | 2553 | write_sg_to_skb(skb, &frags, src, req->cryptlen); |
1952 | create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, | 2554 | atomic_inc(&adap->chcr_stats.aead_rqst); |
1953 | sizeof(struct cpl_rx_phys_dsgl) + dst_size); | 2555 | create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, |
2556 | sizeof(struct cpl_rx_phys_dsgl) + dst_size, | ||
2557 | reqctx->verify); | ||
1954 | reqctx->skb = skb; | 2558 | reqctx->skb = skb; |
1955 | skb_get(skb); | 2559 | skb_get(skb); |
1956 | return skb; | 2560 | return skb; |
@@ -1958,9 +2562,10 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, | |||
1958 | dstmap_fail: | 2562 | dstmap_fail: |
1959 | /* ivmap_fail: */ | 2563 | /* ivmap_fail: */ |
1960 | kfree_skb(skb); | 2564 | kfree_skb(skb); |
1961 | skb = NULL; | ||
1962 | err: | 2565 | err: |
1963 | return skb; | 2566 | free_new_sg(reqctx->newdstsg); |
2567 | reqctx->newdstsg = NULL; | ||
2568 | return ERR_PTR(error); | ||
1964 | } | 2569 | } |
1965 | 2570 | ||
1966 | 2571 | ||
@@ -1972,7 +2577,8 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm) | |||
1972 | struct aead_alg *alg = crypto_aead_alg(tfm); | 2577 | struct aead_alg *alg = crypto_aead_alg(tfm); |
1973 | 2578 | ||
1974 | aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, | 2579 | aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, |
1975 | CRYPTO_ALG_NEED_FALLBACK); | 2580 | CRYPTO_ALG_NEED_FALLBACK | |
2581 | CRYPTO_ALG_ASYNC); | ||
1976 | if (IS_ERR(aeadctx->sw_cipher)) | 2582 | if (IS_ERR(aeadctx->sw_cipher)) |
1977 | return PTR_ERR(aeadctx->sw_cipher); | 2583 | return PTR_ERR(aeadctx->sw_cipher); |
1978 | crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), | 2584 | crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), |
@@ -2206,7 +2812,8 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, | |||
2206 | unsigned int keylen) | 2812 | unsigned int keylen) |
2207 | { | 2813 | { |
2208 | struct chcr_context *ctx = crypto_aead_ctx(aead); | 2814 | struct chcr_context *ctx = crypto_aead_ctx(aead); |
2209 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); | 2815 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); |
2816 | int error; | ||
2210 | 2817 | ||
2211 | if (keylen < 3) { | 2818 | if (keylen < 3) { |
2212 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | 2819 | crypto_tfm_set_flags((struct crypto_tfm *)aead, |
@@ -2214,6 +2821,15 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, | |||
2214 | aeadctx->enckey_len = 0; | 2821 | aeadctx->enckey_len = 0; |
2215 | return -EINVAL; | 2822 | return -EINVAL; |
2216 | } | 2823 | } |
2824 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | ||
2825 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & | ||
2826 | CRYPTO_TFM_REQ_MASK); | ||
2827 | error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | ||
2828 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | ||
2829 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | ||
2830 | CRYPTO_TFM_RES_MASK); | ||
2831 | if (error) | ||
2832 | return error; | ||
2217 | keylen -= 3; | 2833 | keylen -= 3; |
2218 | memcpy(aeadctx->salt, key + keylen, 3); | 2834 | memcpy(aeadctx->salt, key + keylen, 3); |
2219 | return chcr_ccm_common_setkey(aead, key, keylen); | 2835 | return chcr_ccm_common_setkey(aead, key, keylen); |
@@ -2552,22 +3168,14 @@ static int chcr_aead_op(struct aead_request *req, | |||
2552 | static struct chcr_alg_template driver_algs[] = { | 3168 | static struct chcr_alg_template driver_algs[] = { |
2553 | /* AES-CBC */ | 3169 | /* AES-CBC */ |
2554 | { | 3170 | { |
2555 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 3171 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, |
2556 | .is_registered = 0, | 3172 | .is_registered = 0, |
2557 | .alg.crypto = { | 3173 | .alg.crypto = { |
2558 | .cra_name = "cbc(aes)", | 3174 | .cra_name = "cbc(aes)", |
2559 | .cra_driver_name = "cbc-aes-chcr", | 3175 | .cra_driver_name = "cbc-aes-chcr", |
2560 | .cra_priority = CHCR_CRA_PRIORITY, | ||
2561 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
2562 | CRYPTO_ALG_ASYNC, | ||
2563 | .cra_blocksize = AES_BLOCK_SIZE, | 3176 | .cra_blocksize = AES_BLOCK_SIZE, |
2564 | .cra_ctxsize = sizeof(struct chcr_context) | ||
2565 | + sizeof(struct ablk_ctx), | ||
2566 | .cra_alignmask = 0, | ||
2567 | .cra_type = &crypto_ablkcipher_type, | ||
2568 | .cra_module = THIS_MODULE, | ||
2569 | .cra_init = chcr_cra_init, | 3177 | .cra_init = chcr_cra_init, |
2570 | .cra_exit = NULL, | 3178 | .cra_exit = chcr_cra_exit, |
2571 | .cra_u.ablkcipher = { | 3179 | .cra_u.ablkcipher = { |
2572 | .min_keysize = AES_MIN_KEY_SIZE, | 3180 | .min_keysize = AES_MIN_KEY_SIZE, |
2573 | .max_keysize = AES_MAX_KEY_SIZE, | 3181 | .max_keysize = AES_MAX_KEY_SIZE, |
@@ -2579,24 +3187,15 @@ static struct chcr_alg_template driver_algs[] = { | |||
2579 | } | 3187 | } |
2580 | }, | 3188 | }, |
2581 | { | 3189 | { |
2582 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 3190 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, |
2583 | .is_registered = 0, | 3191 | .is_registered = 0, |
2584 | .alg.crypto = { | 3192 | .alg.crypto = { |
2585 | .cra_name = "xts(aes)", | 3193 | .cra_name = "xts(aes)", |
2586 | .cra_driver_name = "xts-aes-chcr", | 3194 | .cra_driver_name = "xts-aes-chcr", |
2587 | .cra_priority = CHCR_CRA_PRIORITY, | ||
2588 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
2589 | CRYPTO_ALG_ASYNC, | ||
2590 | .cra_blocksize = AES_BLOCK_SIZE, | 3195 | .cra_blocksize = AES_BLOCK_SIZE, |
2591 | .cra_ctxsize = sizeof(struct chcr_context) + | ||
2592 | sizeof(struct ablk_ctx), | ||
2593 | .cra_alignmask = 0, | ||
2594 | .cra_type = &crypto_ablkcipher_type, | ||
2595 | .cra_module = THIS_MODULE, | ||
2596 | .cra_init = chcr_cra_init, | 3196 | .cra_init = chcr_cra_init, |
2597 | .cra_exit = NULL, | 3197 | .cra_exit = NULL, |
2598 | .cra_u = { | 3198 | .cra_u .ablkcipher = { |
2599 | .ablkcipher = { | ||
2600 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | 3199 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
2601 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | 3200 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
2602 | .ivsize = AES_BLOCK_SIZE, | 3201 | .ivsize = AES_BLOCK_SIZE, |
@@ -2605,6 +3204,47 @@ static struct chcr_alg_template driver_algs[] = { | |||
2605 | .decrypt = chcr_aes_decrypt, | 3204 | .decrypt = chcr_aes_decrypt, |
2606 | } | 3205 | } |
2607 | } | 3206 | } |
3207 | }, | ||
3208 | { | ||
3209 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, | ||
3210 | .is_registered = 0, | ||
3211 | .alg.crypto = { | ||
3212 | .cra_name = "ctr(aes)", | ||
3213 | .cra_driver_name = "ctr-aes-chcr", | ||
3214 | .cra_blocksize = 1, | ||
3215 | .cra_init = chcr_cra_init, | ||
3216 | .cra_exit = chcr_cra_exit, | ||
3217 | .cra_u.ablkcipher = { | ||
3218 | .min_keysize = AES_MIN_KEY_SIZE, | ||
3219 | .max_keysize = AES_MAX_KEY_SIZE, | ||
3220 | .ivsize = AES_BLOCK_SIZE, | ||
3221 | .setkey = chcr_aes_ctr_setkey, | ||
3222 | .encrypt = chcr_aes_encrypt, | ||
3223 | .decrypt = chcr_aes_decrypt, | ||
3224 | } | ||
3225 | } | ||
3226 | }, | ||
3227 | { | ||
3228 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | | ||
3229 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686, | ||
3230 | .is_registered = 0, | ||
3231 | .alg.crypto = { | ||
3232 | .cra_name = "rfc3686(ctr(aes))", | ||
3233 | .cra_driver_name = "rfc3686-ctr-aes-chcr", | ||
3234 | .cra_blocksize = 1, | ||
3235 | .cra_init = chcr_rfc3686_init, | ||
3236 | .cra_exit = chcr_cra_exit, | ||
3237 | .cra_u.ablkcipher = { | ||
3238 | .min_keysize = AES_MIN_KEY_SIZE + | ||
3239 | CTR_RFC3686_NONCE_SIZE, | ||
3240 | .max_keysize = AES_MAX_KEY_SIZE + | ||
3241 | CTR_RFC3686_NONCE_SIZE, | ||
3242 | .ivsize = CTR_RFC3686_IV_SIZE, | ||
3243 | .setkey = chcr_aes_rfc3686_setkey, | ||
3244 | .encrypt = chcr_aes_encrypt, | ||
3245 | .decrypt = chcr_aes_decrypt, | ||
3246 | .geniv = "seqiv", | ||
3247 | } | ||
2608 | } | 3248 | } |
2609 | }, | 3249 | }, |
2610 | /* SHA */ | 3250 | /* SHA */ |
@@ -2986,6 +3626,18 @@ static int chcr_register_alg(void) | |||
2986 | continue; | 3626 | continue; |
2987 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { | 3627 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { |
2988 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 3628 | case CRYPTO_ALG_TYPE_ABLKCIPHER: |
3629 | driver_algs[i].alg.crypto.cra_priority = | ||
3630 | CHCR_CRA_PRIORITY; | ||
3631 | driver_algs[i].alg.crypto.cra_module = THIS_MODULE; | ||
3632 | driver_algs[i].alg.crypto.cra_flags = | ||
3633 | CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | | ||
3634 | CRYPTO_ALG_NEED_FALLBACK; | ||
3635 | driver_algs[i].alg.crypto.cra_ctxsize = | ||
3636 | sizeof(struct chcr_context) + | ||
3637 | sizeof(struct ablk_ctx); | ||
3638 | driver_algs[i].alg.crypto.cra_alignmask = 0; | ||
3639 | driver_algs[i].alg.crypto.cra_type = | ||
3640 | &crypto_ablkcipher_type; | ||
2989 | err = crypto_register_alg(&driver_algs[i].alg.crypto); | 3641 | err = crypto_register_alg(&driver_algs[i].alg.crypto); |
2990 | name = driver_algs[i].alg.crypto.cra_driver_name; | 3642 | name = driver_algs[i].alg.crypto.cra_driver_name; |
2991 | break; | 3643 | break; |
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index 751d06a58101..583008de51a3 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h | |||
@@ -185,11 +185,11 @@ | |||
185 | FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \ | 185 | FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \ |
186 | FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len))) | 186 | FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len))) |
187 | 187 | ||
188 | #define FILL_WR_RX_Q_ID(cid, qid, wr_iv, fid) \ | 188 | #define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \ |
189 | htonl( \ | 189 | htonl( \ |
190 | FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \ | 190 | FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \ |
191 | FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \ | 191 | FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \ |
192 | FW_CRYPTO_LOOKASIDE_WR_LCB_V(0) | \ | 192 | FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \ |
193 | FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \ | 193 | FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \ |
194 | FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid)) | 194 | FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid)) |
195 | 195 | ||
@@ -219,9 +219,26 @@ | |||
219 | #define MAX_NK 8 | 219 | #define MAX_NK 8 |
220 | #define CRYPTO_MAX_IMM_TX_PKT_LEN 256 | 220 | #define CRYPTO_MAX_IMM_TX_PKT_LEN 256 |
221 | #define MAX_WR_SIZE 512 | 221 | #define MAX_WR_SIZE 512 |
222 | #define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0) | ||
223 | #define MAX_DSGL_ENT 32 | ||
224 | #define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 2) | ||
225 | #define MIN_CIPHER_SG 1 /* IV */ | ||
222 | #define MIN_AUTH_SG 2 /*IV + AAD*/ | 226 | #define MIN_AUTH_SG 2 /*IV + AAD*/ |
223 | #define MIN_GCM_SG 2 /* IV + AAD*/ | 227 | #define MIN_GCM_SG 2 /* IV + AAD*/ |
228 | #define MIN_DIGEST_SG 1 /*Partial Buffer*/ | ||
224 | #define MIN_CCM_SG 3 /*IV+AAD+B0*/ | 229 | #define MIN_CCM_SG 3 /*IV+AAD+B0*/ |
230 | #define SPACE_LEFT(len) \ | ||
231 | ((MAX_WR_SIZE - WR_MIN_LEN - (len))) | ||
232 | |||
233 | unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, | ||
234 | 48, 64, 72, 88, | ||
235 | 96, 112, 120, 136, | ||
236 | 144, 160, 168, 184, | ||
237 | 192}; | ||
238 | unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80, | ||
239 | 112, 112, 128, 128, 144, 144, 160, 160, | ||
240 | 192, 192, 208, 208, 224, 224, 240, 240, | ||
241 | 272, 272, 288, 288, 304, 304, 320, 320}; | ||
225 | 242 | ||
226 | struct algo_param { | 243 | struct algo_param { |
227 | unsigned int auth_mode; | 244 | unsigned int auth_mode; |
@@ -239,6 +256,14 @@ struct hash_wr_param { | |||
239 | u64 scmd1; | 256 | u64 scmd1; |
240 | }; | 257 | }; |
241 | 258 | ||
259 | struct cipher_wr_param { | ||
260 | struct ablkcipher_request *req; | ||
261 | struct scatterlist *srcsg; | ||
262 | char *iv; | ||
263 | int bytes; | ||
264 | short int snent; | ||
265 | unsigned short qid; | ||
266 | }; | ||
242 | enum { | 267 | enum { |
243 | AES_KEYLENGTH_128BIT = 128, | 268 | AES_KEYLENGTH_128BIT = 128, |
244 | AES_KEYLENGTH_192BIT = 192, | 269 | AES_KEYLENGTH_192BIT = 192, |
@@ -293,7 +318,6 @@ struct phys_sge_parm { | |||
293 | unsigned int nents; | 318 | unsigned int nents; |
294 | unsigned int obsize; | 319 | unsigned int obsize; |
295 | unsigned short qid; | 320 | unsigned short qid; |
296 | unsigned char align; | ||
297 | }; | 321 | }; |
298 | 322 | ||
299 | struct crypto_result { | 323 | struct crypto_result { |
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index c28e018e0773..b6dd9cbe815f 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c | |||
@@ -29,6 +29,7 @@ | |||
29 | static LIST_HEAD(uld_ctx_list); | 29 | static LIST_HEAD(uld_ctx_list); |
30 | static DEFINE_MUTEX(dev_mutex); | 30 | static DEFINE_MUTEX(dev_mutex); |
31 | static atomic_t dev_count; | 31 | static atomic_t dev_count; |
32 | static struct uld_ctx *ctx_rr; | ||
32 | 33 | ||
33 | typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); | 34 | typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); |
34 | static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); | 35 | static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); |
@@ -49,25 +50,28 @@ static struct cxgb4_uld_info chcr_uld_info = { | |||
49 | .rx_handler = chcr_uld_rx_handler, | 50 | .rx_handler = chcr_uld_rx_handler, |
50 | }; | 51 | }; |
51 | 52 | ||
52 | int assign_chcr_device(struct chcr_dev **dev) | 53 | struct uld_ctx *assign_chcr_device(void) |
53 | { | 54 | { |
54 | struct uld_ctx *u_ctx; | 55 | struct uld_ctx *u_ctx = NULL; |
55 | int ret = -ENXIO; | ||
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Which device to use if multiple devices are available TODO | 58 | * When multiple devices are present in system select |
59 | * May be select the device based on round robin. One session | 59 | * device in round-robin fashion for crypto operations |
60 | * must go to the same device to maintain the ordering. | 60 | * Although One session must use the same device to |
61 | * maintain request-response ordering. | ||
61 | */ | 62 | */ |
62 | mutex_lock(&dev_mutex); /* TODO ? */ | 63 | mutex_lock(&dev_mutex); |
63 | list_for_each_entry(u_ctx, &uld_ctx_list, entry) | 64 | if (!list_empty(&uld_ctx_list)) { |
64 | if (u_ctx->dev) { | 65 | u_ctx = ctx_rr; |
65 | *dev = u_ctx->dev; | 66 | if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) |
66 | ret = 0; | 67 | ctx_rr = list_first_entry(&uld_ctx_list, |
67 | break; | 68 | struct uld_ctx, |
69 | entry); | ||
70 | else | ||
71 | ctx_rr = list_next_entry(ctx_rr, entry); | ||
68 | } | 72 | } |
69 | mutex_unlock(&dev_mutex); | 73 | mutex_unlock(&dev_mutex); |
70 | return ret; | 74 | return u_ctx; |
71 | } | 75 | } |
72 | 76 | ||
73 | static int chcr_dev_add(struct uld_ctx *u_ctx) | 77 | static int chcr_dev_add(struct uld_ctx *u_ctx) |
@@ -82,11 +86,27 @@ static int chcr_dev_add(struct uld_ctx *u_ctx) | |||
82 | u_ctx->dev = dev; | 86 | u_ctx->dev = dev; |
83 | dev->u_ctx = u_ctx; | 87 | dev->u_ctx = u_ctx; |
84 | atomic_inc(&dev_count); | 88 | atomic_inc(&dev_count); |
89 | mutex_lock(&dev_mutex); | ||
90 | list_add_tail(&u_ctx->entry, &uld_ctx_list); | ||
91 | if (!ctx_rr) | ||
92 | ctx_rr = u_ctx; | ||
93 | mutex_unlock(&dev_mutex); | ||
85 | return 0; | 94 | return 0; |
86 | } | 95 | } |
87 | 96 | ||
88 | static int chcr_dev_remove(struct uld_ctx *u_ctx) | 97 | static int chcr_dev_remove(struct uld_ctx *u_ctx) |
89 | { | 98 | { |
99 | if (ctx_rr == u_ctx) { | ||
100 | if (list_is_last(&ctx_rr->entry, &uld_ctx_list)) | ||
101 | ctx_rr = list_first_entry(&uld_ctx_list, | ||
102 | struct uld_ctx, | ||
103 | entry); | ||
104 | else | ||
105 | ctx_rr = list_next_entry(ctx_rr, entry); | ||
106 | } | ||
107 | list_del(&u_ctx->entry); | ||
108 | if (list_empty(&uld_ctx_list)) | ||
109 | ctx_rr = NULL; | ||
90 | kfree(u_ctx->dev); | 110 | kfree(u_ctx->dev); |
91 | u_ctx->dev = NULL; | 111 | u_ctx->dev = NULL; |
92 | atomic_dec(&dev_count); | 112 | atomic_dec(&dev_count); |
@@ -100,6 +120,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, | |||
100 | struct cpl_fw6_pld *fw6_pld; | 120 | struct cpl_fw6_pld *fw6_pld; |
101 | u32 ack_err_status = 0; | 121 | u32 ack_err_status = 0; |
102 | int error_status = 0; | 122 | int error_status = 0; |
123 | struct adapter *adap = padap(dev); | ||
103 | 124 | ||
104 | fw6_pld = (struct cpl_fw6_pld *)input; | 125 | fw6_pld = (struct cpl_fw6_pld *)input; |
105 | req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( | 126 | req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( |
@@ -111,11 +132,11 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev, | |||
111 | if (CHK_MAC_ERR_BIT(ack_err_status) || | 132 | if (CHK_MAC_ERR_BIT(ack_err_status) || |
112 | CHK_PAD_ERR_BIT(ack_err_status)) | 133 | CHK_PAD_ERR_BIT(ack_err_status)) |
113 | error_status = -EBADMSG; | 134 | error_status = -EBADMSG; |
135 | atomic_inc(&adap->chcr_stats.error); | ||
114 | } | 136 | } |
115 | /* call completion callback with failure status */ | 137 | /* call completion callback with failure status */ |
116 | if (req) { | 138 | if (req) { |
117 | error_status = chcr_handle_resp(req, input, error_status); | 139 | error_status = chcr_handle_resp(req, input, error_status); |
118 | req->complete(req, error_status); | ||
119 | } else { | 140 | } else { |
120 | pr_err("Incorrect request address from the firmware\n"); | 141 | pr_err("Incorrect request address from the firmware\n"); |
121 | return -EFAULT; | 142 | return -EFAULT; |
@@ -138,10 +159,11 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld) | |||
138 | u_ctx = ERR_PTR(-ENOMEM); | 159 | u_ctx = ERR_PTR(-ENOMEM); |
139 | goto out; | 160 | goto out; |
140 | } | 161 | } |
162 | if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) { | ||
163 | u_ctx = ERR_PTR(-ENOMEM); | ||
164 | goto out; | ||
165 | } | ||
141 | u_ctx->lldi = *lld; | 166 | u_ctx->lldi = *lld; |
142 | mutex_lock(&dev_mutex); | ||
143 | list_add_tail(&u_ctx->entry, &uld_ctx_list); | ||
144 | mutex_unlock(&dev_mutex); | ||
145 | out: | 167 | out: |
146 | return u_ctx; | 168 | return u_ctx; |
147 | } | 169 | } |
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index cd0c35a18d92..c9a19b2a1e9f 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h | |||
@@ -53,6 +53,9 @@ | |||
53 | #define MAC_ERROR_BIT 0 | 53 | #define MAC_ERROR_BIT 0 |
54 | #define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) | 54 | #define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) |
55 | #define MAX_SALT 4 | 55 | #define MAX_SALT 4 |
56 | #define WR_MIN_LEN (sizeof(struct chcr_wr) + \ | ||
57 | sizeof(struct cpl_rx_phys_dsgl) + \ | ||
58 | sizeof(struct ulptx_sgl)) | ||
56 | 59 | ||
57 | #define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev) | 60 | #define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev) |
58 | 61 | ||
@@ -86,7 +89,7 @@ struct uld_ctx { | |||
86 | struct chcr_dev *dev; | 89 | struct chcr_dev *dev; |
87 | }; | 90 | }; |
88 | 91 | ||
89 | int assign_chcr_device(struct chcr_dev **dev); | 92 | struct uld_ctx * assign_chcr_device(void); |
90 | int chcr_send_wr(struct sk_buff *skb); | 93 | int chcr_send_wr(struct sk_buff *skb); |
91 | int start_crypto(void); | 94 | int start_crypto(void); |
92 | int stop_crypto(void); | 95 | int stop_crypto(void); |
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index 5b2fabb14229..a4f95b014b46 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h | |||
@@ -139,6 +139,9 @@ | |||
139 | #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000 | 139 | #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000 |
140 | #define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000 | 140 | #define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000 |
141 | #define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000 | 141 | #define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000 |
142 | #define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000 | ||
143 | #define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000 | ||
144 | #define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000 | ||
142 | #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ | 145 | #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ |
143 | CRYPTO_ALG_SUB_TYPE_HASH_HMAC) | 146 | CRYPTO_ALG_SUB_TYPE_HASH_HMAC) |
144 | 147 | ||
@@ -146,19 +149,23 @@ | |||
146 | 149 | ||
147 | #define CHCR_HASH_MAX_BLOCK_SIZE_64 64 | 150 | #define CHCR_HASH_MAX_BLOCK_SIZE_64 64 |
148 | #define CHCR_HASH_MAX_BLOCK_SIZE_128 128 | 151 | #define CHCR_HASH_MAX_BLOCK_SIZE_128 128 |
152 | #define CHCR_SG_SIZE 2048 | ||
149 | 153 | ||
150 | /* Aligned to 128 bit boundary */ | 154 | /* Aligned to 128 bit boundary */ |
151 | 155 | ||
152 | struct ablk_ctx { | 156 | struct ablk_ctx { |
157 | struct crypto_skcipher *sw_cipher; | ||
153 | __be32 key_ctx_hdr; | 158 | __be32 key_ctx_hdr; |
154 | unsigned int enckey_len; | 159 | unsigned int enckey_len; |
155 | u8 key[CHCR_AES_MAX_KEY_LEN]; | ||
156 | unsigned char ciph_mode; | 160 | unsigned char ciph_mode; |
161 | u8 key[CHCR_AES_MAX_KEY_LEN]; | ||
162 | u8 nonce[4]; | ||
157 | u8 rrkey[AES_MAX_KEY_SIZE]; | 163 | u8 rrkey[AES_MAX_KEY_SIZE]; |
158 | }; | 164 | }; |
159 | struct chcr_aead_reqctx { | 165 | struct chcr_aead_reqctx { |
160 | struct sk_buff *skb; | 166 | struct sk_buff *skb; |
161 | struct scatterlist *dst; | 167 | struct scatterlist *dst; |
168 | struct scatterlist *newdstsg; | ||
162 | struct scatterlist srcffwd[2]; | 169 | struct scatterlist srcffwd[2]; |
163 | struct scatterlist dstffwd[2]; | 170 | struct scatterlist dstffwd[2]; |
164 | short int dst_nents; | 171 | short int dst_nents; |
@@ -233,7 +240,14 @@ struct chcr_ahash_req_ctx { | |||
233 | 240 | ||
234 | struct chcr_blkcipher_req_ctx { | 241 | struct chcr_blkcipher_req_ctx { |
235 | struct sk_buff *skb; | 242 | struct sk_buff *skb; |
236 | unsigned int dst_nents; | 243 | struct scatterlist srcffwd[2]; |
244 | struct scatterlist dstffwd[2]; | ||
245 | struct scatterlist *dstsg; | ||
246 | struct scatterlist *dst; | ||
247 | struct scatterlist *newdstsg; | ||
248 | unsigned int processed; | ||
249 | unsigned int op; | ||
250 | short int dst_nents; | ||
237 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; | 251 | u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; |
238 | }; | 252 | }; |
239 | 253 | ||
@@ -275,5 +289,10 @@ static int chcr_aead_op(struct aead_request *req_base, | |||
275 | int size, | 289 | int size, |
276 | create_wr_t create_wr_fn); | 290 | create_wr_t create_wr_fn); |
277 | static inline int get_aead_subtype(struct crypto_aead *aead); | 291 | static inline int get_aead_subtype(struct crypto_aead *aead); |
278 | 292 | static int is_newsg(struct scatterlist *sgl, unsigned int *newents); | |
293 | static struct scatterlist *alloc_new_sg(struct scatterlist *sgl, | ||
294 | unsigned int nents); | ||
295 | static inline void free_new_sg(struct scatterlist *sgl); | ||
296 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | ||
297 | unsigned char *input, int err); | ||
279 | #endif /* __CHCR_CRYPTO_H__ */ | 298 | #endif /* __CHCR_CRYPTO_H__ */ |
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index 9b07f3d88feb..0c6a917a9ab8 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c | |||
@@ -1088,9 +1088,17 @@ static int img_hash_suspend(struct device *dev) | |||
1088 | static int img_hash_resume(struct device *dev) | 1088 | static int img_hash_resume(struct device *dev) |
1089 | { | 1089 | { |
1090 | struct img_hash_dev *hdev = dev_get_drvdata(dev); | 1090 | struct img_hash_dev *hdev = dev_get_drvdata(dev); |
1091 | int ret; | ||
1091 | 1092 | ||
1092 | clk_prepare_enable(hdev->hash_clk); | 1093 | ret = clk_prepare_enable(hdev->hash_clk); |
1093 | clk_prepare_enable(hdev->sys_clk); | 1094 | if (ret) |
1095 | return ret; | ||
1096 | |||
1097 | ret = clk_prepare_enable(hdev->sys_clk); | ||
1098 | if (ret) { | ||
1099 | clk_disable_unprepare(hdev->hash_clk); | ||
1100 | return ret; | ||
1101 | } | ||
1094 | 1102 | ||
1095 | return 0; | 1103 | return 0; |
1096 | } | 1104 | } |
diff --git a/drivers/crypto/inside-secure/Makefile b/drivers/crypto/inside-secure/Makefile new file mode 100644 index 000000000000..302f07dde98c --- /dev/null +++ b/drivers/crypto/inside-secure/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o | ||
2 | crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o | ||
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c new file mode 100644 index 000000000000..e7f87ac12685 --- /dev/null +++ b/drivers/crypto/inside-secure/safexcel.c | |||
@@ -0,0 +1,926 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017 Marvell | ||
3 | * | ||
4 | * Antoine Tenart <antoine.tenart@free-electrons.com> | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public | ||
7 | * License version 2. This program is licensed "as is" without any | ||
8 | * warranty of any kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/dma-mapping.h> | ||
14 | #include <linux/dmapool.h> | ||
15 | #include <linux/firmware.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/of_platform.h> | ||
19 | #include <linux/of_irq.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/workqueue.h> | ||
22 | |||
23 | #include <crypto/internal/hash.h> | ||
24 | #include <crypto/internal/skcipher.h> | ||
25 | |||
26 | #include "safexcel.h" | ||
27 | |||
28 | static u32 max_rings = EIP197_MAX_RINGS; | ||
29 | module_param(max_rings, uint, 0644); | ||
30 | MODULE_PARM_DESC(max_rings, "Maximum number of rings to use."); | ||
31 | |||
32 | static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv) | ||
33 | { | ||
34 | u32 val, htable_offset; | ||
35 | int i; | ||
36 | |||
37 | /* Enable the record cache memory access */ | ||
38 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | ||
39 | val &= ~EIP197_TRC_ENABLE_MASK; | ||
40 | val |= EIP197_TRC_ENABLE_0; | ||
41 | writel(val, priv->base + EIP197_CS_RAM_CTRL); | ||
42 | |||
43 | /* Clear all ECC errors */ | ||
44 | writel(0, priv->base + EIP197_TRC_ECCCTRL); | ||
45 | |||
46 | /* | ||
47 | * Make sure the cache memory is accessible by taking record cache into | ||
48 | * reset. | ||
49 | */ | ||
50 | val = readl(priv->base + EIP197_TRC_PARAMS); | ||
51 | val |= EIP197_TRC_PARAMS_SW_RESET; | ||
52 | val &= ~EIP197_TRC_PARAMS_DATA_ACCESS; | ||
53 | writel(val, priv->base + EIP197_TRC_PARAMS); | ||
54 | |||
55 | /* Clear all records */ | ||
56 | for (i = 0; i < EIP197_CS_RC_MAX; i++) { | ||
57 | u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; | ||
58 | |||
59 | writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) | | ||
60 | EIP197_CS_RC_PREV(EIP197_RC_NULL), | ||
61 | priv->base + offset); | ||
62 | |||
63 | val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1); | ||
64 | if (i == 0) | ||
65 | val |= EIP197_CS_RC_PREV(EIP197_RC_NULL); | ||
66 | else if (i == EIP197_CS_RC_MAX - 1) | ||
67 | val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL); | ||
68 | writel(val, priv->base + offset + sizeof(u32)); | ||
69 | } | ||
70 | |||
71 | /* Clear the hash table entries */ | ||
72 | htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE; | ||
73 | for (i = 0; i < 64; i++) | ||
74 | writel(GENMASK(29, 0), | ||
75 | priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32)); | ||
76 | |||
77 | /* Disable the record cache memory access */ | ||
78 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | ||
79 | val &= ~EIP197_TRC_ENABLE_MASK; | ||
80 | writel(val, priv->base + EIP197_CS_RAM_CTRL); | ||
81 | |||
82 | /* Write head and tail pointers of the record free chain */ | ||
83 | val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) | | ||
84 | EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1); | ||
85 | writel(val, priv->base + EIP197_TRC_FREECHAIN); | ||
86 | |||
87 | /* Configure the record cache #1 */ | ||
88 | val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) | | ||
89 | EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX); | ||
90 | writel(val, priv->base + EIP197_TRC_PARAMS2); | ||
91 | |||
92 | /* Configure the record cache #2 */ | ||
93 | val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) | | ||
94 | EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) | | ||
95 | EIP197_TRC_PARAMS_HTABLE_SZ(2); | ||
96 | writel(val, priv->base + EIP197_TRC_PARAMS); | ||
97 | } | ||
98 | |||
99 | static void eip197_write_firmware(struct safexcel_crypto_priv *priv, | ||
100 | const struct firmware *fw, u32 ctrl, | ||
101 | u32 prog_en) | ||
102 | { | ||
103 | const u32 *data = (const u32 *)fw->data; | ||
104 | u32 val; | ||
105 | int i; | ||
106 | |||
107 | /* Reset the engine to make its program memory accessible */ | ||
108 | writel(EIP197_PE_ICE_x_CTRL_SW_RESET | | ||
109 | EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | | ||
110 | EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, | ||
111 | priv->base + ctrl); | ||
112 | |||
113 | /* Enable access to the program memory */ | ||
114 | writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL); | ||
115 | |||
116 | /* Write the firmware */ | ||
117 | for (i = 0; i < fw->size / sizeof(u32); i++) | ||
118 | writel(be32_to_cpu(data[i]), | ||
119 | priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); | ||
120 | |||
121 | /* Disable access to the program memory */ | ||
122 | writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL); | ||
123 | |||
124 | /* Release engine from reset */ | ||
125 | val = readl(priv->base + ctrl); | ||
126 | val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; | ||
127 | writel(val, priv->base + ctrl); | ||
128 | } | ||
129 | |||
130 | static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) | ||
131 | { | ||
132 | const char *fw_name[] = {"ifpp.bin", "ipue.bin"}; | ||
133 | const struct firmware *fw[FW_NB]; | ||
134 | int i, j, ret = 0; | ||
135 | u32 val; | ||
136 | |||
137 | for (i = 0; i < FW_NB; i++) { | ||
138 | ret = request_firmware(&fw[i], fw_name[i], priv->dev); | ||
139 | if (ret) { | ||
140 | dev_err(priv->dev, | ||
141 | "Failed to request firmware %s (%d)\n", | ||
142 | fw_name[i], ret); | ||
143 | goto release_fw; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | /* Clear the scratchpad memory */ | ||
148 | val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL); | ||
149 | val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | | ||
150 | EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | | ||
151 | EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | | ||
152 | EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; | ||
153 | writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL); | ||
154 | |||
155 | memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0, | ||
156 | EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); | ||
157 | |||
158 | eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, | ||
159 | EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); | ||
160 | |||
161 | eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL, | ||
162 | EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN); | ||
163 | |||
164 | release_fw: | ||
165 | for (j = 0; j < i; j++) | ||
166 | release_firmware(fw[j]); | ||
167 | |||
168 | return ret; | ||
169 | } | ||
170 | |||
171 | static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) | ||
172 | { | ||
173 | u32 hdw, cd_size_rnd, val; | ||
174 | int i; | ||
175 | |||
176 | hdw = readl(priv->base + EIP197_HIA_OPTIONS); | ||
177 | hdw &= GENMASK(27, 25); | ||
178 | hdw >>= 25; | ||
179 | |||
180 | cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw; | ||
181 | |||
182 | for (i = 0; i < priv->config.rings; i++) { | ||
183 | /* ring base address */ | ||
184 | writel(lower_32_bits(priv->ring[i].cdr.base_dma), | ||
185 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); | ||
186 | writel(upper_32_bits(priv->ring[i].cdr.base_dma), | ||
187 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); | ||
188 | |||
189 | writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | | ||
190 | priv->config.cd_size, | ||
191 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE); | ||
192 | writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | | ||
193 | (EIP197_FETCH_COUNT * priv->config.cd_offset), | ||
194 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG); | ||
195 | |||
196 | /* Configure DMA tx control */ | ||
197 | val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); | ||
198 | val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); | ||
199 | writel(val, | ||
200 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG); | ||
201 | |||
202 | /* clear any pending interrupt */ | ||
203 | writel(GENMASK(5, 0), | ||
204 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT); | ||
205 | } | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) | ||
211 | { | ||
212 | u32 hdw, rd_size_rnd, val; | ||
213 | int i; | ||
214 | |||
215 | hdw = readl(priv->base + EIP197_HIA_OPTIONS); | ||
216 | hdw &= GENMASK(27, 25); | ||
217 | hdw >>= 25; | ||
218 | |||
219 | rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw; | ||
220 | |||
221 | for (i = 0; i < priv->config.rings; i++) { | ||
222 | /* ring base address */ | ||
223 | writel(lower_32_bits(priv->ring[i].rdr.base_dma), | ||
224 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); | ||
225 | writel(upper_32_bits(priv->ring[i].rdr.base_dma), | ||
226 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); | ||
227 | |||
228 | writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) | | ||
229 | priv->config.rd_size, | ||
230 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE); | ||
231 | |||
232 | writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | | ||
233 | (EIP197_FETCH_COUNT * priv->config.rd_offset), | ||
234 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG); | ||
235 | |||
236 | /* Configure DMA tx control */ | ||
237 | val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); | ||
238 | val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); | ||
239 | val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG; | ||
240 | writel(val, | ||
241 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG); | ||
242 | |||
243 | /* clear any pending interrupt */ | ||
244 | writel(GENMASK(7, 0), | ||
245 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT); | ||
246 | |||
247 | /* enable ring interrupt */ | ||
248 | val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); | ||
249 | val |= EIP197_RDR_IRQ(i); | ||
250 | writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); | ||
251 | } | ||
252 | |||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | ||
257 | { | ||
258 | u32 version, val; | ||
259 | int i, ret; | ||
260 | |||
261 | /* Determine endianess and configure byte swap */ | ||
262 | version = readl(priv->base + EIP197_HIA_VERSION); | ||
263 | val = readl(priv->base + EIP197_HIA_MST_CTRL); | ||
264 | |||
265 | if ((version & 0xffff) == EIP197_HIA_VERSION_BE) | ||
266 | val |= EIP197_MST_CTRL_BYTE_SWAP; | ||
267 | else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) | ||
268 | val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); | ||
269 | |||
270 | writel(val, priv->base + EIP197_HIA_MST_CTRL); | ||
271 | |||
272 | |||
273 | /* Configure wr/rd cache values */ | ||
274 | writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | | ||
275 | EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS), | ||
276 | priv->base + EIP197_MST_CTRL); | ||
277 | |||
278 | /* Interrupts reset */ | ||
279 | |||
280 | /* Disable all global interrupts */ | ||
281 | writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL); | ||
282 | |||
283 | /* Clear any pending interrupt */ | ||
284 | writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK); | ||
285 | |||
286 | /* Data Fetch Engine configuration */ | ||
287 | |||
288 | /* Reset all DFE threads */ | ||
289 | writel(EIP197_DxE_THR_CTRL_RESET_PE, | ||
290 | priv->base + EIP197_HIA_DFE_THR_CTRL); | ||
291 | |||
292 | /* Reset HIA input interface arbiter */ | ||
293 | writel(EIP197_HIA_RA_PE_CTRL_RESET, | ||
294 | priv->base + EIP197_HIA_RA_PE_CTRL); | ||
295 | |||
296 | /* DMA transfer size to use */ | ||
297 | val = EIP197_HIA_DFE_CFG_DIS_DEBUG; | ||
298 | val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9); | ||
299 | val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7); | ||
300 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS); | ||
301 | val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS); | ||
302 | writel(val, priv->base + EIP197_HIA_DFE_CFG); | ||
303 | |||
304 | /* Leave the DFE threads reset state */ | ||
305 | writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL); | ||
306 | |||
307 | /* Configure the procesing engine thresholds */ | ||
308 | writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9), | ||
309 | priv->base + EIP197_PE_IN_DBUF_THRES); | ||
310 | writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7), | ||
311 | priv->base + EIP197_PE_IN_TBUF_THRES); | ||
312 | |||
313 | /* enable HIA input interface arbiter and rings */ | ||
314 | writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0), | ||
315 | priv->base + EIP197_HIA_RA_PE_CTRL); | ||
316 | |||
317 | /* Data Store Engine configuration */ | ||
318 | |||
319 | /* Reset all DSE threads */ | ||
320 | writel(EIP197_DxE_THR_CTRL_RESET_PE, | ||
321 | priv->base + EIP197_HIA_DSE_THR_CTRL); | ||
322 | |||
323 | /* Wait for all DSE threads to complete */ | ||
324 | while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) & | ||
325 | GENMASK(15, 12)) != GENMASK(15, 12)) | ||
326 | ; | ||
327 | |||
328 | /* DMA transfer size to use */ | ||
329 | val = EIP197_HIA_DSE_CFG_DIS_DEBUG; | ||
330 | val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); | ||
331 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); | ||
332 | val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE; | ||
333 | val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; | ||
334 | writel(val, priv->base + EIP197_HIA_DSE_CFG); | ||
335 | |||
336 | /* Leave the DSE threads reset state */ | ||
337 | writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL); | ||
338 | |||
339 | /* Configure the procesing engine thresholds */ | ||
340 | writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8), | ||
341 | priv->base + EIP197_PE_OUT_DBUF_THRES); | ||
342 | |||
343 | /* Processing Engine configuration */ | ||
344 | |||
345 | /* H/W capabilities selection */ | ||
346 | val = EIP197_FUNCTION_RSVD; | ||
347 | val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY; | ||
348 | val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; | ||
349 | val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; | ||
350 | val |= EIP197_ALG_SHA2; | ||
351 | writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN); | ||
352 | |||
353 | /* Command Descriptor Rings prepare */ | ||
354 | for (i = 0; i < priv->config.rings; i++) { | ||
355 | /* Clear interrupts for this ring */ | ||
356 | writel(GENMASK(31, 0), | ||
357 | priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i)); | ||
358 | |||
359 | /* Disable external triggering */ | ||
360 | writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG); | ||
361 | |||
362 | /* Clear the pending prepared counter */ | ||
363 | writel(EIP197_xDR_PREP_CLR_COUNT, | ||
364 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT); | ||
365 | |||
366 | /* Clear the pending processed counter */ | ||
367 | writel(EIP197_xDR_PROC_CLR_COUNT, | ||
368 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT); | ||
369 | |||
370 | writel(0, | ||
371 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR); | ||
372 | writel(0, | ||
373 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR); | ||
374 | |||
375 | writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2, | ||
376 | priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE); | ||
377 | } | ||
378 | |||
379 | /* Result Descriptor Ring prepare */ | ||
380 | for (i = 0; i < priv->config.rings; i++) { | ||
381 | /* Disable external triggering*/ | ||
382 | writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG); | ||
383 | |||
384 | /* Clear the pending prepared counter */ | ||
385 | writel(EIP197_xDR_PREP_CLR_COUNT, | ||
386 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT); | ||
387 | |||
388 | /* Clear the pending processed counter */ | ||
389 | writel(EIP197_xDR_PROC_CLR_COUNT, | ||
390 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT); | ||
391 | |||
392 | writel(0, | ||
393 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR); | ||
394 | writel(0, | ||
395 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR); | ||
396 | |||
397 | /* Ring size */ | ||
398 | writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2, | ||
399 | priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE); | ||
400 | } | ||
401 | |||
402 | /* Enable command descriptor rings */ | ||
403 | writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), | ||
404 | priv->base + EIP197_HIA_DFE_THR_CTRL); | ||
405 | |||
406 | /* Enable result descriptor rings */ | ||
407 | writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), | ||
408 | priv->base + EIP197_HIA_DSE_THR_CTRL); | ||
409 | |||
410 | /* Clear any HIA interrupt */ | ||
411 | writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK); | ||
412 | |||
413 | eip197_trc_cache_init(priv); | ||
414 | |||
415 | ret = eip197_load_firmwares(priv); | ||
416 | if (ret) | ||
417 | return ret; | ||
418 | |||
419 | safexcel_hw_setup_cdesc_rings(priv); | ||
420 | safexcel_hw_setup_rdesc_rings(priv); | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) | ||
426 | { | ||
427 | struct crypto_async_request *req, *backlog; | ||
428 | struct safexcel_context *ctx; | ||
429 | struct safexcel_request *request; | ||
430 | int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; | ||
431 | |||
432 | priv->ring[ring].need_dequeue = false; | ||
433 | |||
434 | do { | ||
435 | spin_lock_bh(&priv->ring[ring].queue_lock); | ||
436 | backlog = crypto_get_backlog(&priv->ring[ring].queue); | ||
437 | req = crypto_dequeue_request(&priv->ring[ring].queue); | ||
438 | spin_unlock_bh(&priv->ring[ring].queue_lock); | ||
439 | |||
440 | if (!req) | ||
441 | goto finalize; | ||
442 | |||
443 | request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req)); | ||
444 | if (!request) { | ||
445 | spin_lock_bh(&priv->ring[ring].queue_lock); | ||
446 | crypto_enqueue_request(&priv->ring[ring].queue, req); | ||
447 | spin_unlock_bh(&priv->ring[ring].queue_lock); | ||
448 | |||
449 | priv->ring[ring].need_dequeue = true; | ||
450 | goto finalize; | ||
451 | } | ||
452 | |||
453 | ctx = crypto_tfm_ctx(req->tfm); | ||
454 | ret = ctx->send(req, ring, request, &commands, &results); | ||
455 | if (ret) { | ||
456 | kfree(request); | ||
457 | req->complete(req, ret); | ||
458 | priv->ring[ring].need_dequeue = true; | ||
459 | goto finalize; | ||
460 | } | ||
461 | |||
462 | if (backlog) | ||
463 | backlog->complete(backlog, -EINPROGRESS); | ||
464 | |||
465 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
466 | list_add_tail(&request->list, &priv->ring[ring].list); | ||
467 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
468 | |||
469 | cdesc += commands; | ||
470 | rdesc += results; | ||
471 | } while (nreq++ < EIP197_MAX_BATCH_SZ); | ||
472 | |||
473 | finalize: | ||
474 | if (nreq == EIP197_MAX_BATCH_SZ) | ||
475 | priv->ring[ring].need_dequeue = true; | ||
476 | else if (!nreq) | ||
477 | return; | ||
478 | |||
479 | spin_lock_bh(&priv->ring[ring].lock); | ||
480 | |||
481 | /* Configure when we want an interrupt */ | ||
482 | writel(EIP197_HIA_RDR_THRESH_PKT_MODE | | ||
483 | EIP197_HIA_RDR_THRESH_PROC_PKT(nreq), | ||
484 | priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH); | ||
485 | |||
486 | /* let the RDR know we have pending descriptors */ | ||
487 | writel((rdesc * priv->config.rd_offset) << 2, | ||
488 | priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT); | ||
489 | |||
490 | /* let the CDR know we have pending descriptors */ | ||
491 | writel((cdesc * priv->config.cd_offset) << 2, | ||
492 | priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT); | ||
493 | |||
494 | spin_unlock_bh(&priv->ring[ring].lock); | ||
495 | } | ||
496 | |||
497 | void safexcel_free_context(struct safexcel_crypto_priv *priv, | ||
498 | struct crypto_async_request *req, | ||
499 | int result_sz) | ||
500 | { | ||
501 | struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm); | ||
502 | |||
503 | if (ctx->result_dma) | ||
504 | dma_unmap_single(priv->dev, ctx->result_dma, result_sz, | ||
505 | DMA_FROM_DEVICE); | ||
506 | |||
507 | if (ctx->cache) { | ||
508 | dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz, | ||
509 | DMA_TO_DEVICE); | ||
510 | kfree(ctx->cache); | ||
511 | ctx->cache = NULL; | ||
512 | ctx->cache_sz = 0; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) | ||
517 | { | ||
518 | struct safexcel_command_desc *cdesc; | ||
519 | |||
520 | /* Acknowledge the command descriptors */ | ||
521 | do { | ||
522 | cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr); | ||
523 | if (IS_ERR(cdesc)) { | ||
524 | dev_err(priv->dev, | ||
525 | "Could not retrieve the command descriptor\n"); | ||
526 | return; | ||
527 | } | ||
528 | } while (!cdesc->last_seg); | ||
529 | } | ||
530 | |||
531 | void safexcel_inv_complete(struct crypto_async_request *req, int error) | ||
532 | { | ||
533 | struct safexcel_inv_result *result = req->data; | ||
534 | |||
535 | if (error == -EINPROGRESS) | ||
536 | return; | ||
537 | |||
538 | result->error = error; | ||
539 | complete(&result->completion); | ||
540 | } | ||
541 | |||
542 | int safexcel_invalidate_cache(struct crypto_async_request *async, | ||
543 | struct safexcel_context *ctx, | ||
544 | struct safexcel_crypto_priv *priv, | ||
545 | dma_addr_t ctxr_dma, int ring, | ||
546 | struct safexcel_request *request) | ||
547 | { | ||
548 | struct safexcel_command_desc *cdesc; | ||
549 | struct safexcel_result_desc *rdesc; | ||
550 | int ret = 0; | ||
551 | |||
552 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
553 | |||
554 | /* Prepare command descriptor */ | ||
555 | cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma); | ||
556 | if (IS_ERR(cdesc)) { | ||
557 | ret = PTR_ERR(cdesc); | ||
558 | goto unlock; | ||
559 | } | ||
560 | |||
561 | cdesc->control_data.type = EIP197_TYPE_EXTENDED; | ||
562 | cdesc->control_data.options = 0; | ||
563 | cdesc->control_data.refresh = 0; | ||
564 | cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR; | ||
565 | |||
566 | /* Prepare result descriptor */ | ||
567 | rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0); | ||
568 | |||
569 | if (IS_ERR(rdesc)) { | ||
570 | ret = PTR_ERR(rdesc); | ||
571 | goto cdesc_rollback; | ||
572 | } | ||
573 | |||
574 | request->req = async; | ||
575 | goto unlock; | ||
576 | |||
577 | cdesc_rollback: | ||
578 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); | ||
579 | |||
580 | unlock: | ||
581 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
582 | return ret; | ||
583 | } | ||
584 | |||
585 | static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv, | ||
586 | int ring) | ||
587 | { | ||
588 | struct safexcel_request *sreq; | ||
589 | struct safexcel_context *ctx; | ||
590 | int ret, i, nreq, ndesc = 0; | ||
591 | bool should_complete; | ||
592 | |||
593 | nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT); | ||
594 | nreq >>= 24; | ||
595 | nreq &= GENMASK(6, 0); | ||
596 | if (!nreq) | ||
597 | return; | ||
598 | |||
599 | for (i = 0; i < nreq; i++) { | ||
600 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
601 | sreq = list_first_entry(&priv->ring[ring].list, | ||
602 | struct safexcel_request, list); | ||
603 | list_del(&sreq->list); | ||
604 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
605 | |||
606 | ctx = crypto_tfm_ctx(sreq->req->tfm); | ||
607 | ndesc = ctx->handle_result(priv, ring, sreq->req, | ||
608 | &should_complete, &ret); | ||
609 | if (ndesc < 0) { | ||
610 | dev_err(priv->dev, "failed to handle result (%d)", ndesc); | ||
611 | return; | ||
612 | } | ||
613 | |||
614 | writel(EIP197_xDR_PROC_xD_PKT(1) | | ||
615 | EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset), | ||
616 | priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT); | ||
617 | |||
618 | if (should_complete) { | ||
619 | local_bh_disable(); | ||
620 | sreq->req->complete(sreq->req, ret); | ||
621 | local_bh_enable(); | ||
622 | } | ||
623 | |||
624 | kfree(sreq); | ||
625 | } | ||
626 | } | ||
627 | |||
628 | static void safexcel_handle_result_work(struct work_struct *work) | ||
629 | { | ||
630 | struct safexcel_work_data *data = | ||
631 | container_of(work, struct safexcel_work_data, work); | ||
632 | struct safexcel_crypto_priv *priv = data->priv; | ||
633 | |||
634 | safexcel_handle_result_descriptor(priv, data->ring); | ||
635 | |||
636 | if (priv->ring[data->ring].need_dequeue) | ||
637 | safexcel_dequeue(data->priv, data->ring); | ||
638 | } | ||
639 | |||
640 | struct safexcel_ring_irq_data { | ||
641 | struct safexcel_crypto_priv *priv; | ||
642 | int ring; | ||
643 | }; | ||
644 | |||
645 | static irqreturn_t safexcel_irq_ring(int irq, void *data) | ||
646 | { | ||
647 | struct safexcel_ring_irq_data *irq_data = data; | ||
648 | struct safexcel_crypto_priv *priv = irq_data->priv; | ||
649 | int ring = irq_data->ring; | ||
650 | u32 status, stat; | ||
651 | |||
652 | status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring)); | ||
653 | if (!status) | ||
654 | return IRQ_NONE; | ||
655 | |||
656 | /* RDR interrupts */ | ||
657 | if (status & EIP197_RDR_IRQ(ring)) { | ||
658 | stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT); | ||
659 | |||
660 | if (unlikely(stat & EIP197_xDR_ERR)) { | ||
661 | /* | ||
662 | * Fatal error, the RDR is unusable and must be | ||
663 | * reinitialized. This should not happen under | ||
664 | * normal circumstances. | ||
665 | */ | ||
666 | dev_err(priv->dev, "RDR: fatal error."); | ||
667 | } else if (likely(stat & EIP197_xDR_THRESH)) { | ||
668 | queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); | ||
669 | } | ||
670 | |||
671 | /* ACK the interrupts */ | ||
672 | writel(stat & 0xff, | ||
673 | priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT); | ||
674 | } | ||
675 | |||
676 | /* ACK the interrupts */ | ||
677 | writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring)); | ||
678 | |||
679 | return IRQ_HANDLED; | ||
680 | } | ||
681 | |||
682 | static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, | ||
683 | irq_handler_t handler, | ||
684 | struct safexcel_ring_irq_data *ring_irq_priv) | ||
685 | { | ||
686 | int ret, irq = platform_get_irq_byname(pdev, name); | ||
687 | |||
688 | if (irq < 0) { | ||
689 | dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name); | ||
690 | return irq; | ||
691 | } | ||
692 | |||
693 | ret = devm_request_irq(&pdev->dev, irq, handler, 0, | ||
694 | dev_name(&pdev->dev), ring_irq_priv); | ||
695 | if (ret) { | ||
696 | dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); | ||
697 | return ret; | ||
698 | } | ||
699 | |||
700 | return irq; | ||
701 | } | ||
702 | |||
703 | static struct safexcel_alg_template *safexcel_algs[] = { | ||
704 | &safexcel_alg_ecb_aes, | ||
705 | &safexcel_alg_cbc_aes, | ||
706 | &safexcel_alg_sha1, | ||
707 | &safexcel_alg_sha224, | ||
708 | &safexcel_alg_sha256, | ||
709 | &safexcel_alg_hmac_sha1, | ||
710 | }; | ||
711 | |||
712 | static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) | ||
713 | { | ||
714 | int i, j, ret = 0; | ||
715 | |||
716 | for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { | ||
717 | safexcel_algs[i]->priv = priv; | ||
718 | |||
719 | if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) | ||
720 | ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher); | ||
721 | else | ||
722 | ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash); | ||
723 | |||
724 | if (ret) | ||
725 | goto fail; | ||
726 | } | ||
727 | |||
728 | return 0; | ||
729 | |||
730 | fail: | ||
731 | for (j = 0; j < i; j++) { | ||
732 | if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) | ||
733 | crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher); | ||
734 | else | ||
735 | crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash); | ||
736 | } | ||
737 | |||
738 | return ret; | ||
739 | } | ||
740 | |||
741 | static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) | ||
742 | { | ||
743 | int i; | ||
744 | |||
745 | for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { | ||
746 | if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) | ||
747 | crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher); | ||
748 | else | ||
749 | crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash); | ||
750 | } | ||
751 | } | ||
752 | |||
753 | static void safexcel_configure(struct safexcel_crypto_priv *priv) | ||
754 | { | ||
755 | u32 val, mask; | ||
756 | |||
757 | val = readl(priv->base + EIP197_HIA_OPTIONS); | ||
758 | val = (val & GENMASK(27, 25)) >> 25; | ||
759 | mask = BIT(val) - 1; | ||
760 | |||
761 | val = readl(priv->base + EIP197_HIA_OPTIONS); | ||
762 | priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); | ||
763 | |||
764 | priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); | ||
765 | priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; | ||
766 | |||
767 | priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32)); | ||
768 | priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask; | ||
769 | } | ||
770 | |||
771 | static int safexcel_probe(struct platform_device *pdev) | ||
772 | { | ||
773 | struct device *dev = &pdev->dev; | ||
774 | struct resource *res; | ||
775 | struct safexcel_crypto_priv *priv; | ||
776 | u64 dma_mask; | ||
777 | int i, ret; | ||
778 | |||
779 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | ||
780 | if (!priv) | ||
781 | return -ENOMEM; | ||
782 | |||
783 | priv->dev = dev; | ||
784 | |||
785 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
786 | priv->base = devm_ioremap_resource(dev, res); | ||
787 | if (IS_ERR(priv->base)) { | ||
788 | dev_err(dev, "failed to get resource\n"); | ||
789 | return PTR_ERR(priv->base); | ||
790 | } | ||
791 | |||
792 | priv->clk = of_clk_get(dev->of_node, 0); | ||
793 | if (!IS_ERR(priv->clk)) { | ||
794 | ret = clk_prepare_enable(priv->clk); | ||
795 | if (ret) { | ||
796 | dev_err(dev, "unable to enable clk (%d)\n", ret); | ||
797 | return ret; | ||
798 | } | ||
799 | } else { | ||
800 | /* The clock isn't mandatory */ | ||
801 | if (PTR_ERR(priv->clk) == -EPROBE_DEFER) | ||
802 | return -EPROBE_DEFER; | ||
803 | } | ||
804 | |||
805 | if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask)) | ||
806 | dma_mask = DMA_BIT_MASK(64); | ||
807 | ret = dma_set_mask_and_coherent(dev, dma_mask); | ||
808 | if (ret) | ||
809 | goto err_clk; | ||
810 | |||
811 | priv->context_pool = dmam_pool_create("safexcel-context", dev, | ||
812 | sizeof(struct safexcel_context_record), | ||
813 | 1, 0); | ||
814 | if (!priv->context_pool) { | ||
815 | ret = -ENOMEM; | ||
816 | goto err_clk; | ||
817 | } | ||
818 | |||
819 | safexcel_configure(priv); | ||
820 | |||
821 | for (i = 0; i < priv->config.rings; i++) { | ||
822 | char irq_name[6] = {0}; /* "ringX\0" */ | ||
823 | char wq_name[9] = {0}; /* "wq_ringX\0" */ | ||
824 | int irq; | ||
825 | struct safexcel_ring_irq_data *ring_irq; | ||
826 | |||
827 | ret = safexcel_init_ring_descriptors(priv, | ||
828 | &priv->ring[i].cdr, | ||
829 | &priv->ring[i].rdr); | ||
830 | if (ret) | ||
831 | goto err_clk; | ||
832 | |||
833 | ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); | ||
834 | if (!ring_irq) { | ||
835 | ret = -ENOMEM; | ||
836 | goto err_clk; | ||
837 | } | ||
838 | |||
839 | ring_irq->priv = priv; | ||
840 | ring_irq->ring = i; | ||
841 | |||
842 | snprintf(irq_name, 6, "ring%d", i); | ||
843 | irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, | ||
844 | ring_irq); | ||
845 | |||
846 | if (irq < 0) | ||
847 | goto err_clk; | ||
848 | |||
849 | priv->ring[i].work_data.priv = priv; | ||
850 | priv->ring[i].work_data.ring = i; | ||
851 | INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work); | ||
852 | |||
853 | snprintf(wq_name, 9, "wq_ring%d", i); | ||
854 | priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); | ||
855 | if (!priv->ring[i].workqueue) { | ||
856 | ret = -ENOMEM; | ||
857 | goto err_clk; | ||
858 | } | ||
859 | |||
860 | crypto_init_queue(&priv->ring[i].queue, | ||
861 | EIP197_DEFAULT_RING_SIZE); | ||
862 | |||
863 | INIT_LIST_HEAD(&priv->ring[i].list); | ||
864 | spin_lock_init(&priv->ring[i].lock); | ||
865 | spin_lock_init(&priv->ring[i].egress_lock); | ||
866 | spin_lock_init(&priv->ring[i].queue_lock); | ||
867 | } | ||
868 | |||
869 | platform_set_drvdata(pdev, priv); | ||
870 | atomic_set(&priv->ring_used, 0); | ||
871 | |||
872 | ret = safexcel_hw_init(priv); | ||
873 | if (ret) { | ||
874 | dev_err(dev, "EIP h/w init failed (%d)\n", ret); | ||
875 | goto err_clk; | ||
876 | } | ||
877 | |||
878 | ret = safexcel_register_algorithms(priv); | ||
879 | if (ret) { | ||
880 | dev_err(dev, "Failed to register algorithms (%d)\n", ret); | ||
881 | goto err_clk; | ||
882 | } | ||
883 | |||
884 | return 0; | ||
885 | |||
886 | err_clk: | ||
887 | clk_disable_unprepare(priv->clk); | ||
888 | return ret; | ||
889 | } | ||
890 | |||
891 | |||
892 | static int safexcel_remove(struct platform_device *pdev) | ||
893 | { | ||
894 | struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); | ||
895 | int i; | ||
896 | |||
897 | safexcel_unregister_algorithms(priv); | ||
898 | clk_disable_unprepare(priv->clk); | ||
899 | |||
900 | for (i = 0; i < priv->config.rings; i++) | ||
901 | destroy_workqueue(priv->ring[i].workqueue); | ||
902 | |||
903 | return 0; | ||
904 | } | ||
905 | |||
906 | static const struct of_device_id safexcel_of_match_table[] = { | ||
907 | { .compatible = "inside-secure,safexcel-eip197" }, | ||
908 | {}, | ||
909 | }; | ||
910 | |||
911 | |||
912 | static struct platform_driver crypto_safexcel = { | ||
913 | .probe = safexcel_probe, | ||
914 | .remove = safexcel_remove, | ||
915 | .driver = { | ||
916 | .name = "crypto-safexcel", | ||
917 | .of_match_table = safexcel_of_match_table, | ||
918 | }, | ||
919 | }; | ||
920 | module_platform_driver(crypto_safexcel); | ||
921 | |||
922 | MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>"); | ||
923 | MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>"); | ||
924 | MODULE_AUTHOR("Igal Liberman <igall@marvell.com>"); | ||
925 | MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197"); | ||
926 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h new file mode 100644 index 000000000000..304c5838c11a --- /dev/null +++ b/drivers/crypto/inside-secure/safexcel.h | |||
@@ -0,0 +1,574 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017 Marvell | ||
3 | * | ||
4 | * Antoine Tenart <antoine.tenart@free-electrons.com> | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public | ||
7 | * License version 2. This program is licensed "as is" without any | ||
8 | * warranty of any kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #ifndef __SAFEXCEL_H__ | ||
12 | #define __SAFEXCEL_H__ | ||
13 | |||
14 | #include <crypto/algapi.h> | ||
15 | #include <crypto/internal/hash.h> | ||
16 | #include <crypto/skcipher.h> | ||
17 | |||
18 | #define EIP197_HIA_VERSION_LE 0xca35 | ||
19 | #define EIP197_HIA_VERSION_BE 0x35ca | ||
20 | |||
21 | /* Static configuration */ | ||
22 | #define EIP197_DEFAULT_RING_SIZE 64 | ||
23 | #define EIP197_MAX_TOKENS 5 | ||
24 | #define EIP197_MAX_RINGS 4 | ||
25 | #define EIP197_FETCH_COUNT 1 | ||
26 | #define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE | ||
27 | |||
28 | #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ | ||
29 | GFP_KERNEL : GFP_ATOMIC) | ||
30 | |||
31 | /* CDR/RDR register offsets */ | ||
32 | #define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000) | ||
33 | #define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r)) | ||
34 | #define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800) | ||
35 | #define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0 | ||
36 | #define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4 | ||
37 | #define EIP197_HIA_xDR_RING_SIZE 0x18 | ||
38 | #define EIP197_HIA_xDR_DESC_SIZE 0x1c | ||
39 | #define EIP197_HIA_xDR_CFG 0x20 | ||
40 | #define EIP197_HIA_xDR_DMA_CFG 0x24 | ||
41 | #define EIP197_HIA_xDR_THRESH 0x28 | ||
42 | #define EIP197_HIA_xDR_PREP_COUNT 0x2c | ||
43 | #define EIP197_HIA_xDR_PROC_COUNT 0x30 | ||
44 | #define EIP197_HIA_xDR_PREP_PNTR 0x34 | ||
45 | #define EIP197_HIA_xDR_PROC_PNTR 0x38 | ||
46 | #define EIP197_HIA_xDR_STAT 0x3c | ||
47 | |||
48 | /* register offsets */ | ||
49 | #define EIP197_HIA_DFE_CFG 0x8c000 | ||
50 | #define EIP197_HIA_DFE_THR_CTRL 0x8c040 | ||
51 | #define EIP197_HIA_DFE_THR_STAT 0x8c044 | ||
52 | #define EIP197_HIA_DSE_CFG 0x8d000 | ||
53 | #define EIP197_HIA_DSE_THR_CTRL 0x8d040 | ||
54 | #define EIP197_HIA_DSE_THR_STAT 0x8d044 | ||
55 | #define EIP197_HIA_RA_PE_CTRL 0x90010 | ||
56 | #define EIP197_HIA_RA_PE_STAT 0x90014 | ||
57 | #define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000) | ||
58 | #define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r)) | ||
59 | #define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r)) | ||
60 | #define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r)) | ||
61 | #define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r)) | ||
62 | #define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808 | ||
63 | #define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810 | ||
64 | #define EIP197_HIA_AIC_G_ACK 0x9f810 | ||
65 | #define EIP197_HIA_MST_CTRL 0x9fff4 | ||
66 | #define EIP197_HIA_OPTIONS 0x9fff8 | ||
67 | #define EIP197_HIA_VERSION 0x9fffc | ||
68 | #define EIP197_PE_IN_DBUF_THRES 0xa0000 | ||
69 | #define EIP197_PE_IN_TBUF_THRES 0xa0100 | ||
70 | #define EIP197_PE_ICE_SCRATCH_RAM 0xa0800 | ||
71 | #define EIP197_PE_ICE_PUE_CTRL 0xa0c80 | ||
72 | #define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04 | ||
73 | #define EIP197_PE_ICE_FPP_CTRL 0xa0d80 | ||
74 | #define EIP197_PE_ICE_RAM_CTRL 0xa0ff0 | ||
75 | #define EIP197_PE_EIP96_FUNCTION_EN 0xa1004 | ||
76 | #define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008 | ||
77 | #define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c | ||
78 | #define EIP197_PE_OUT_DBUF_THRES 0xa1c00 | ||
79 | #define EIP197_PE_OUT_TBUF_THRES 0xa1d00 | ||
80 | #define EIP197_CLASSIFICATION_RAMS 0xe0000 | ||
81 | #define EIP197_TRC_CTRL 0xf0800 | ||
82 | #define EIP197_TRC_LASTRES 0xf0804 | ||
83 | #define EIP197_TRC_REGINDEX 0xf0808 | ||
84 | #define EIP197_TRC_PARAMS 0xf0820 | ||
85 | #define EIP197_TRC_FREECHAIN 0xf0824 | ||
86 | #define EIP197_TRC_PARAMS2 0xf0828 | ||
87 | #define EIP197_TRC_ECCCTRL 0xf0830 | ||
88 | #define EIP197_TRC_ECCSTAT 0xf0834 | ||
89 | #define EIP197_TRC_ECCADMINSTAT 0xf0838 | ||
90 | #define EIP197_TRC_ECCDATASTAT 0xf083c | ||
91 | #define EIP197_TRC_ECCDATA 0xf0840 | ||
92 | #define EIP197_CS_RAM_CTRL 0xf7ff0 | ||
93 | #define EIP197_MST_CTRL 0xffff4 | ||
94 | |||
95 | /* EIP197_HIA_xDR_DESC_SIZE */ | ||
96 | #define EIP197_xDR_DESC_MODE_64BIT BIT(31) | ||
97 | |||
98 | /* EIP197_HIA_xDR_DMA_CFG */ | ||
99 | #define EIP197_HIA_xDR_WR_RES_BUF BIT(22) | ||
100 | #define EIP197_HIA_xDR_WR_CTRL_BUG BIT(23) | ||
101 | #define EIP197_HIA_xDR_WR_OWN_BUF BIT(24) | ||
102 | #define EIP197_HIA_xDR_CFG_WR_CACHE(n) (((n) & 0x7) << 25) | ||
103 | #define EIP197_HIA_xDR_CFG_RD_CACHE(n) (((n) & 0x7) << 29) | ||
104 | |||
105 | /* EIP197_HIA_CDR_THRESH */ | ||
106 | #define EIP197_HIA_CDR_THRESH_PROC_PKT(n) (n) | ||
107 | #define EIP197_HIA_CDR_THRESH_PROC_MODE BIT(22) | ||
108 | #define EIP197_HIA_CDR_THRESH_PKT_MODE BIT(23) | ||
109 | #define EIP197_HIA_CDR_THRESH_TIMEOUT(n) ((n) << 24) /* x256 clk cycles */ | ||
110 | |||
111 | /* EIP197_HIA_RDR_THRESH */ | ||
112 | #define EIP197_HIA_RDR_THRESH_PROC_PKT(n) (n) | ||
113 | #define EIP197_HIA_RDR_THRESH_PKT_MODE BIT(23) | ||
114 | #define EIP197_HIA_RDR_THRESH_TIMEOUT(n) ((n) << 24) /* x256 clk cycles */ | ||
115 | |||
116 | /* EIP197_HIA_xDR_PREP_COUNT */ | ||
117 | #define EIP197_xDR_PREP_CLR_COUNT BIT(31) | ||
118 | |||
119 | /* EIP197_HIA_xDR_PROC_COUNT */ | ||
120 | #define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2) | ||
121 | #define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24) | ||
122 | #define EIP197_xDR_PROC_CLR_COUNT BIT(31) | ||
123 | |||
124 | /* EIP197_HIA_xDR_STAT */ | ||
125 | #define EIP197_xDR_DMA_ERR BIT(0) | ||
126 | #define EIP197_xDR_PREP_CMD_THRES BIT(1) | ||
127 | #define EIP197_xDR_ERR BIT(2) | ||
128 | #define EIP197_xDR_THRESH BIT(4) | ||
129 | #define EIP197_xDR_TIMEOUT BIT(5) | ||
130 | |||
131 | #define EIP197_HIA_RA_PE_CTRL_RESET BIT(31) | ||
132 | #define EIP197_HIA_RA_PE_CTRL_EN BIT(30) | ||
133 | |||
134 | /* EIP197_HIA_AIC_R_ENABLE_CTRL */ | ||
135 | #define EIP197_CDR_IRQ(n) BIT((n) * 2) | ||
136 | #define EIP197_RDR_IRQ(n) BIT((n) * 2 + 1) | ||
137 | |||
138 | /* EIP197_HIA_DFE/DSE_CFG */ | ||
139 | #define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n) ((n) << 0) | ||
140 | #define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n) (((n) & 0x7) << 4) | ||
141 | #define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n) ((n) << 8) | ||
142 | #define EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE GENMASK(15, 14) | ||
143 | #define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16) | ||
144 | #define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20) | ||
145 | #define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24) | ||
146 | #define EIP197_HIA_DFE_CFG_DIS_DEBUG (BIT(31) | BIT(29)) | ||
147 | #define EIP197_HIA_DSE_CFG_EN_SINGLE_WR BIT(29) | ||
148 | #define EIP197_HIA_DSE_CFG_DIS_DEBUG BIT(31) | ||
149 | |||
150 | /* EIP197_HIA_DFE/DSE_THR_CTRL */ | ||
151 | #define EIP197_DxE_THR_CTRL_EN BIT(30) | ||
152 | #define EIP197_DxE_THR_CTRL_RESET_PE BIT(31) | ||
153 | |||
154 | /* EIP197_HIA_AIC_G_ENABLED_STAT */ | ||
155 | #define EIP197_G_IRQ_DFE(n) BIT((n) << 1) | ||
156 | #define EIP197_G_IRQ_DSE(n) BIT(((n) << 1) + 1) | ||
157 | #define EIP197_G_IRQ_RING BIT(16) | ||
158 | #define EIP197_G_IRQ_PE(n) BIT((n) + 20) | ||
159 | |||
160 | /* EIP197_HIA_MST_CTRL */ | ||
161 | #define RD_CACHE_3BITS 0x5 | ||
162 | #define WR_CACHE_3BITS 0x3 | ||
163 | #define RD_CACHE_4BITS (RD_CACHE_3BITS << 1 | BIT(0)) | ||
164 | #define WR_CACHE_4BITS (WR_CACHE_3BITS << 1 | BIT(0)) | ||
165 | #define EIP197_MST_CTRL_RD_CACHE(n) (((n) & 0xf) << 0) | ||
166 | #define EIP197_MST_CTRL_WD_CACHE(n) (((n) & 0xf) << 4) | ||
167 | #define EIP197_MST_CTRL_BYTE_SWAP BIT(24) | ||
168 | #define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25) | ||
169 | |||
170 | /* EIP197_PE_IN_DBUF/TBUF_THRES */ | ||
171 | #define EIP197_PE_IN_xBUF_THRES_MIN(n) ((n) << 8) | ||
172 | #define EIP197_PE_IN_xBUF_THRES_MAX(n) ((n) << 12) | ||
173 | |||
174 | /* EIP197_PE_OUT_DBUF_THRES */ | ||
175 | #define EIP197_PE_OUT_DBUF_THRES_MIN(n) ((n) << 0) | ||
176 | #define EIP197_PE_OUT_DBUF_THRES_MAX(n) ((n) << 4) | ||
177 | |||
178 | /* EIP197_PE_ICE_SCRATCH_CTRL */ | ||
179 | #define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER BIT(2) | ||
180 | #define EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN BIT(3) | ||
181 | #define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS BIT(24) | ||
182 | #define EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS BIT(25) | ||
183 | |||
184 | /* EIP197_PE_ICE_SCRATCH_RAM */ | ||
185 | #define EIP197_NUM_OF_SCRATCH_BLOCKS 32 | ||
186 | |||
187 | /* EIP197_PE_ICE_PUE/FPP_CTRL */ | ||
188 | #define EIP197_PE_ICE_x_CTRL_SW_RESET BIT(0) | ||
189 | #define EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR BIT(14) | ||
190 | #define EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR BIT(15) | ||
191 | |||
192 | /* EIP197_PE_ICE_RAM_CTRL */ | ||
193 | #define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN BIT(0) | ||
194 | #define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN BIT(1) | ||
195 | |||
196 | /* EIP197_PE_EIP96_FUNCTION_EN */ | ||
197 | #define EIP197_FUNCTION_RSVD (BIT(6) | BIT(15) | BIT(20) | BIT(23)) | ||
198 | #define EIP197_PROTOCOL_HASH_ONLY BIT(0) | ||
199 | #define EIP197_PROTOCOL_ENCRYPT_ONLY BIT(1) | ||
200 | #define EIP197_PROTOCOL_HASH_ENCRYPT BIT(2) | ||
201 | #define EIP197_PROTOCOL_HASH_DECRYPT BIT(3) | ||
202 | #define EIP197_PROTOCOL_ENCRYPT_HASH BIT(4) | ||
203 | #define EIP197_PROTOCOL_DECRYPT_HASH BIT(5) | ||
204 | #define EIP197_ALG_ARC4 BIT(7) | ||
205 | #define EIP197_ALG_AES_ECB BIT(8) | ||
206 | #define EIP197_ALG_AES_CBC BIT(9) | ||
207 | #define EIP197_ALG_AES_CTR_ICM BIT(10) | ||
208 | #define EIP197_ALG_AES_OFB BIT(11) | ||
209 | #define EIP197_ALG_AES_CFB BIT(12) | ||
210 | #define EIP197_ALG_DES_ECB BIT(13) | ||
211 | #define EIP197_ALG_DES_CBC BIT(14) | ||
212 | #define EIP197_ALG_DES_OFB BIT(16) | ||
213 | #define EIP197_ALG_DES_CFB BIT(17) | ||
214 | #define EIP197_ALG_3DES_ECB BIT(18) | ||
215 | #define EIP197_ALG_3DES_CBC BIT(19) | ||
216 | #define EIP197_ALG_3DES_OFB BIT(21) | ||
217 | #define EIP197_ALG_3DES_CFB BIT(22) | ||
218 | #define EIP197_ALG_MD5 BIT(24) | ||
219 | #define EIP197_ALG_HMAC_MD5 BIT(25) | ||
220 | #define EIP197_ALG_SHA1 BIT(26) | ||
221 | #define EIP197_ALG_HMAC_SHA1 BIT(27) | ||
222 | #define EIP197_ALG_SHA2 BIT(28) | ||
223 | #define EIP197_ALG_HMAC_SHA2 BIT(29) | ||
224 | #define EIP197_ALG_AES_XCBC_MAC BIT(30) | ||
225 | #define EIP197_ALG_GCM_HASH BIT(31) | ||
226 | |||
227 | /* EIP197_PE_EIP96_CONTEXT_CTRL */ | ||
228 | #define EIP197_CONTEXT_SIZE(n) (n) | ||
229 | #define EIP197_ADDRESS_MODE BIT(8) | ||
230 | #define EIP197_CONTROL_MODE BIT(9) | ||
231 | |||
232 | /* Context Control */ | ||
233 | struct safexcel_context_record { | ||
234 | u32 control0; | ||
235 | u32 control1; | ||
236 | |||
237 | __le32 data[12]; | ||
238 | } __packed; | ||
239 | |||
240 | /* control0 */ | ||
241 | #define CONTEXT_CONTROL_TYPE_NULL_OUT 0x0 | ||
242 | #define CONTEXT_CONTROL_TYPE_NULL_IN 0x1 | ||
243 | #define CONTEXT_CONTROL_TYPE_HASH_OUT 0x2 | ||
244 | #define CONTEXT_CONTROL_TYPE_HASH_IN 0x3 | ||
245 | #define CONTEXT_CONTROL_TYPE_CRYPTO_OUT 0x4 | ||
246 | #define CONTEXT_CONTROL_TYPE_CRYPTO_IN 0x5 | ||
247 | #define CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT 0x6 | ||
248 | #define CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN 0x7 | ||
249 | #define CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT 0x14 | ||
250 | #define CONTEXT_CONTROL_TYPE_HASH_DECRYPT_OUT 0x15 | ||
251 | #define CONTEXT_CONTROL_RESTART_HASH BIT(4) | ||
252 | #define CONTEXT_CONTROL_NO_FINISH_HASH BIT(5) | ||
253 | #define CONTEXT_CONTROL_SIZE(n) ((n) << 8) | ||
254 | #define CONTEXT_CONTROL_KEY_EN BIT(16) | ||
255 | #define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17) | ||
256 | #define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17) | ||
257 | #define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17) | ||
258 | #define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21) | ||
259 | #define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21) | ||
260 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23) | ||
261 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23) | ||
262 | #define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23) | ||
263 | #define CONTEXT_CONTROL_INV_FR (0x5 << 24) | ||
264 | #define CONTEXT_CONTROL_INV_TR (0x6 << 24) | ||
265 | |||
266 | /* control1 */ | ||
267 | #define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0) | ||
268 | #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0) | ||
269 | #define CONTEXT_CONTROL_IV0 BIT(5) | ||
270 | #define CONTEXT_CONTROL_IV1 BIT(6) | ||
271 | #define CONTEXT_CONTROL_IV2 BIT(7) | ||
272 | #define CONTEXT_CONTROL_IV3 BIT(8) | ||
273 | #define CONTEXT_CONTROL_DIGEST_CNT BIT(9) | ||
274 | #define CONTEXT_CONTROL_COUNTER_MODE BIT(10) | ||
275 | #define CONTEXT_CONTROL_HASH_STORE BIT(19) | ||
276 | |||
277 | /* EIP197_CS_RAM_CTRL */ | ||
278 | #define EIP197_TRC_ENABLE_0 BIT(4) | ||
279 | #define EIP197_TRC_ENABLE_1 BIT(5) | ||
280 | #define EIP197_TRC_ENABLE_2 BIT(6) | ||
281 | #define EIP197_TRC_ENABLE_MASK GENMASK(6, 4) | ||
282 | |||
283 | /* EIP197_TRC_PARAMS */ | ||
284 | #define EIP197_TRC_PARAMS_SW_RESET BIT(0) | ||
285 | #define EIP197_TRC_PARAMS_DATA_ACCESS BIT(2) | ||
286 | #define EIP197_TRC_PARAMS_HTABLE_SZ(x) ((x) << 4) | ||
287 | #define EIP197_TRC_PARAMS_BLK_TIMER_SPEED(x) ((x) << 10) | ||
288 | #define EIP197_TRC_PARAMS_RC_SZ_LARGE(n) ((n) << 18) | ||
289 | |||
290 | /* EIP197_TRC_FREECHAIN */ | ||
291 | #define EIP197_TRC_FREECHAIN_HEAD_PTR(p) (p) | ||
292 | #define EIP197_TRC_FREECHAIN_TAIL_PTR(p) ((p) << 16) | ||
293 | |||
294 | /* EIP197_TRC_PARAMS2 */ | ||
295 | #define EIP197_TRC_PARAMS2_HTABLE_PTR(p) (p) | ||
296 | #define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18) | ||
297 | |||
298 | /* Cache helpers */ | ||
299 | #define EIP197_CS_RC_MAX 52 | ||
300 | #define EIP197_CS_RC_SIZE (4 * sizeof(u32)) | ||
301 | #define EIP197_CS_RC_NEXT(x) (x) | ||
302 | #define EIP197_CS_RC_PREV(x) ((x) << 10) | ||
303 | #define EIP197_RC_NULL 0x3ff | ||
304 | #define EIP197_CS_TRC_REC_WC 59 | ||
305 | #define EIP197_CS_TRC_LG_REC_WC 73 | ||
306 | |||
307 | /* Result data */ | ||
308 | struct result_data_desc { | ||
309 | u32 packet_length:17; | ||
310 | u32 error_code:15; | ||
311 | |||
312 | u8 bypass_length:4; | ||
313 | u8 e15:1; | ||
314 | u16 rsvd0; | ||
315 | u8 hash_bytes:1; | ||
316 | u8 hash_length:6; | ||
317 | u8 generic_bytes:1; | ||
318 | u8 checksum:1; | ||
319 | u8 next_header:1; | ||
320 | u8 length:1; | ||
321 | |||
322 | u16 application_id; | ||
323 | u16 rsvd1; | ||
324 | |||
325 | u32 rsvd2; | ||
326 | } __packed; | ||
327 | |||
328 | |||
329 | /* Basic Result Descriptor format */ | ||
330 | struct safexcel_result_desc { | ||
331 | u32 particle_size:17; | ||
332 | u8 rsvd0:3; | ||
333 | u8 descriptor_overflow:1; | ||
334 | u8 buffer_overflow:1; | ||
335 | u8 last_seg:1; | ||
336 | u8 first_seg:1; | ||
337 | u16 result_size:8; | ||
338 | |||
339 | u32 rsvd1; | ||
340 | |||
341 | u32 data_lo; | ||
342 | u32 data_hi; | ||
343 | |||
344 | struct result_data_desc result_data; | ||
345 | } __packed; | ||
346 | |||
347 | struct safexcel_token { | ||
348 | u32 packet_length:17; | ||
349 | u8 stat:2; | ||
350 | u16 instructions:9; | ||
351 | u8 opcode:4; | ||
352 | } __packed; | ||
353 | |||
354 | #define EIP197_TOKEN_STAT_LAST_HASH BIT(0) | ||
355 | #define EIP197_TOKEN_STAT_LAST_PACKET BIT(1) | ||
356 | #define EIP197_TOKEN_OPCODE_DIRECTION 0x0 | ||
357 | #define EIP197_TOKEN_OPCODE_INSERT 0x2 | ||
358 | #define EIP197_TOKEN_OPCODE_NOOP EIP197_TOKEN_OPCODE_INSERT | ||
359 | #define EIP197_TOKEN_OPCODE_BYPASS GENMASK(3, 0) | ||
360 | |||
361 | static inline void eip197_noop_token(struct safexcel_token *token) | ||
362 | { | ||
363 | token->opcode = EIP197_TOKEN_OPCODE_NOOP; | ||
364 | token->packet_length = BIT(2); | ||
365 | } | ||
366 | |||
367 | /* Instructions */ | ||
368 | #define EIP197_TOKEN_INS_INSERT_HASH_DIGEST 0x1c | ||
369 | #define EIP197_TOKEN_INS_TYPE_OUTPUT BIT(5) | ||
370 | #define EIP197_TOKEN_INS_TYPE_HASH BIT(6) | ||
371 | #define EIP197_TOKEN_INS_TYPE_CRYTO BIT(7) | ||
372 | #define EIP197_TOKEN_INS_LAST BIT(8) | ||
373 | |||
374 | /* Processing Engine Control Data */ | ||
375 | struct safexcel_control_data_desc { | ||
376 | u32 packet_length:17; | ||
377 | u16 options:13; | ||
378 | u8 type:2; | ||
379 | |||
380 | u16 application_id; | ||
381 | u16 rsvd; | ||
382 | |||
383 | u8 refresh:2; | ||
384 | u32 context_lo:30; | ||
385 | u32 context_hi; | ||
386 | |||
387 | u32 control0; | ||
388 | u32 control1; | ||
389 | |||
390 | u32 token[EIP197_MAX_TOKENS]; | ||
391 | } __packed; | ||
392 | |||
393 | #define EIP197_OPTION_MAGIC_VALUE BIT(0) | ||
394 | #define EIP197_OPTION_64BIT_CTX BIT(1) | ||
395 | #define EIP197_OPTION_CTX_CTRL_IN_CMD BIT(8) | ||
396 | #define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9) | ||
397 | |||
398 | #define EIP197_TYPE_EXTENDED 0x3 | ||
399 | |||
400 | /* Basic Command Descriptor format */ | ||
401 | struct safexcel_command_desc { | ||
402 | u32 particle_size:17; | ||
403 | u8 rsvd0:5; | ||
404 | u8 last_seg:1; | ||
405 | u8 first_seg:1; | ||
406 | u16 additional_cdata_size:8; | ||
407 | |||
408 | u32 rsvd1; | ||
409 | |||
410 | u32 data_lo; | ||
411 | u32 data_hi; | ||
412 | |||
413 | struct safexcel_control_data_desc control_data; | ||
414 | } __packed; | ||
415 | |||
416 | /* | ||
417 | * Internal structures & functions | ||
418 | */ | ||
419 | |||
420 | enum eip197_fw { | ||
421 | FW_IFPP = 0, | ||
422 | FW_IPUE, | ||
423 | FW_NB | ||
424 | }; | ||
425 | |||
426 | struct safexcel_ring { | ||
427 | void *base; | ||
428 | void *base_end; | ||
429 | dma_addr_t base_dma; | ||
430 | |||
431 | /* write and read pointers */ | ||
432 | void *write; | ||
433 | void *read; | ||
434 | |||
435 | /* number of elements used in the ring */ | ||
436 | unsigned nr; | ||
437 | unsigned offset; | ||
438 | }; | ||
439 | |||
440 | enum safexcel_alg_type { | ||
441 | SAFEXCEL_ALG_TYPE_SKCIPHER, | ||
442 | SAFEXCEL_ALG_TYPE_AHASH, | ||
443 | }; | ||
444 | |||
445 | struct safexcel_request { | ||
446 | struct list_head list; | ||
447 | struct crypto_async_request *req; | ||
448 | }; | ||
449 | |||
450 | struct safexcel_config { | ||
451 | u32 rings; | ||
452 | |||
453 | u32 cd_size; | ||
454 | u32 cd_offset; | ||
455 | |||
456 | u32 rd_size; | ||
457 | u32 rd_offset; | ||
458 | }; | ||
459 | |||
460 | struct safexcel_work_data { | ||
461 | struct work_struct work; | ||
462 | struct safexcel_crypto_priv *priv; | ||
463 | int ring; | ||
464 | }; | ||
465 | |||
466 | struct safexcel_crypto_priv { | ||
467 | void __iomem *base; | ||
468 | struct device *dev; | ||
469 | struct clk *clk; | ||
470 | struct safexcel_config config; | ||
471 | |||
472 | /* context DMA pool */ | ||
473 | struct dma_pool *context_pool; | ||
474 | |||
475 | atomic_t ring_used; | ||
476 | |||
477 | struct { | ||
478 | spinlock_t lock; | ||
479 | spinlock_t egress_lock; | ||
480 | |||
481 | struct list_head list; | ||
482 | struct workqueue_struct *workqueue; | ||
483 | struct safexcel_work_data work_data; | ||
484 | |||
485 | /* command/result rings */ | ||
486 | struct safexcel_ring cdr; | ||
487 | struct safexcel_ring rdr; | ||
488 | |||
489 | /* queue */ | ||
490 | struct crypto_queue queue; | ||
491 | spinlock_t queue_lock; | ||
492 | bool need_dequeue; | ||
493 | } ring[EIP197_MAX_RINGS]; | ||
494 | }; | ||
495 | |||
496 | struct safexcel_context { | ||
497 | int (*send)(struct crypto_async_request *req, int ring, | ||
498 | struct safexcel_request *request, int *commands, | ||
499 | int *results); | ||
500 | int (*handle_result)(struct safexcel_crypto_priv *priv, int ring, | ||
501 | struct crypto_async_request *req, bool *complete, | ||
502 | int *ret); | ||
503 | struct safexcel_context_record *ctxr; | ||
504 | dma_addr_t ctxr_dma; | ||
505 | |||
506 | int ring; | ||
507 | bool needs_inv; | ||
508 | bool exit_inv; | ||
509 | |||
510 | /* Used for ahash requests */ | ||
511 | dma_addr_t result_dma; | ||
512 | void *cache; | ||
513 | dma_addr_t cache_dma; | ||
514 | unsigned int cache_sz; | ||
515 | }; | ||
516 | |||
517 | /* | ||
518 | * Template structure to describe the algorithms in order to register them. | ||
519 | * It also has the purpose to contain our private structure and is actually | ||
520 | * the only way I know in this framework to avoid having global pointers... | ||
521 | */ | ||
522 | struct safexcel_alg_template { | ||
523 | struct safexcel_crypto_priv *priv; | ||
524 | enum safexcel_alg_type type; | ||
525 | union { | ||
526 | struct skcipher_alg skcipher; | ||
527 | struct ahash_alg ahash; | ||
528 | } alg; | ||
529 | }; | ||
530 | |||
531 | struct safexcel_inv_result { | ||
532 | struct completion completion; | ||
533 | int error; | ||
534 | }; | ||
535 | |||
536 | void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring); | ||
537 | void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); | ||
538 | void safexcel_free_context(struct safexcel_crypto_priv *priv, | ||
539 | struct crypto_async_request *req, | ||
540 | int result_sz); | ||
541 | int safexcel_invalidate_cache(struct crypto_async_request *async, | ||
542 | struct safexcel_context *ctx, | ||
543 | struct safexcel_crypto_priv *priv, | ||
544 | dma_addr_t ctxr_dma, int ring, | ||
545 | struct safexcel_request *request); | ||
546 | int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, | ||
547 | struct safexcel_ring *cdr, | ||
548 | struct safexcel_ring *rdr); | ||
549 | int safexcel_select_ring(struct safexcel_crypto_priv *priv); | ||
550 | void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, | ||
551 | struct safexcel_ring *ring); | ||
552 | void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, | ||
553 | struct safexcel_ring *ring); | ||
554 | struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, | ||
555 | int ring_id, | ||
556 | bool first, bool last, | ||
557 | dma_addr_t data, u32 len, | ||
558 | u32 full_data_len, | ||
559 | dma_addr_t context); | ||
560 | struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv, | ||
561 | int ring_id, | ||
562 | bool first, bool last, | ||
563 | dma_addr_t data, u32 len); | ||
564 | void safexcel_inv_complete(struct crypto_async_request *req, int error); | ||
565 | |||
566 | /* available algorithms */ | ||
567 | extern struct safexcel_alg_template safexcel_alg_ecb_aes; | ||
568 | extern struct safexcel_alg_template safexcel_alg_cbc_aes; | ||
569 | extern struct safexcel_alg_template safexcel_alg_sha1; | ||
570 | extern struct safexcel_alg_template safexcel_alg_sha224; | ||
571 | extern struct safexcel_alg_template safexcel_alg_sha256; | ||
572 | extern struct safexcel_alg_template safexcel_alg_hmac_sha1; | ||
573 | |||
574 | #endif | ||
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c new file mode 100644 index 000000000000..d2207ac5ba19 --- /dev/null +++ b/drivers/crypto/inside-secure/safexcel_cipher.c | |||
@@ -0,0 +1,561 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017 Marvell | ||
3 | * | ||
4 | * Antoine Tenart <antoine.tenart@free-electrons.com> | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public | ||
7 | * License version 2. This program is licensed "as is" without any | ||
8 | * warranty of any kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #include <linux/device.h> | ||
12 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/dmapool.h> | ||
14 | |||
15 | #include <crypto/aes.h> | ||
16 | #include <crypto/skcipher.h> | ||
17 | |||
18 | #include "safexcel.h" | ||
19 | |||
20 | enum safexcel_cipher_direction { | ||
21 | SAFEXCEL_ENCRYPT, | ||
22 | SAFEXCEL_DECRYPT, | ||
23 | }; | ||
24 | |||
25 | struct safexcel_cipher_ctx { | ||
26 | struct safexcel_context base; | ||
27 | struct safexcel_crypto_priv *priv; | ||
28 | |||
29 | enum safexcel_cipher_direction direction; | ||
30 | u32 mode; | ||
31 | |||
32 | __le32 key[8]; | ||
33 | unsigned int key_len; | ||
34 | }; | ||
35 | |||
36 | static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, | ||
37 | struct crypto_async_request *async, | ||
38 | struct safexcel_command_desc *cdesc, | ||
39 | u32 length) | ||
40 | { | ||
41 | struct skcipher_request *req = skcipher_request_cast(async); | ||
42 | struct safexcel_token *token; | ||
43 | unsigned offset = 0; | ||
44 | |||
45 | if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) { | ||
46 | offset = AES_BLOCK_SIZE / sizeof(u32); | ||
47 | memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE); | ||
48 | |||
49 | cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD; | ||
50 | } | ||
51 | |||
52 | token = (struct safexcel_token *)(cdesc->control_data.token + offset); | ||
53 | |||
54 | token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; | ||
55 | token[0].packet_length = length; | ||
56 | token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET; | ||
57 | token[0].instructions = EIP197_TOKEN_INS_LAST | | ||
58 | EIP197_TOKEN_INS_TYPE_CRYTO | | ||
59 | EIP197_TOKEN_INS_TYPE_OUTPUT; | ||
60 | } | ||
61 | |||
62 | static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key, | ||
63 | unsigned int len) | ||
64 | { | ||
65 | struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); | ||
66 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
67 | struct crypto_aes_ctx aes; | ||
68 | int ret, i; | ||
69 | |||
70 | ret = crypto_aes_expand_key(&aes, key, len); | ||
71 | if (ret) { | ||
72 | crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
73 | return ret; | ||
74 | } | ||
75 | |||
76 | for (i = 0; i < len / sizeof(u32); i++) { | ||
77 | if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { | ||
78 | ctx->base.needs_inv = true; | ||
79 | break; | ||
80 | } | ||
81 | } | ||
82 | |||
83 | for (i = 0; i < len / sizeof(u32); i++) | ||
84 | ctx->key[i] = cpu_to_le32(aes.key_enc[i]); | ||
85 | |||
86 | ctx->key_len = len; | ||
87 | |||
88 | memzero_explicit(&aes, sizeof(aes)); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, | ||
93 | struct safexcel_command_desc *cdesc) | ||
94 | { | ||
95 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
96 | int ctrl_size; | ||
97 | |||
98 | if (ctx->direction == SAFEXCEL_ENCRYPT) | ||
99 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; | ||
100 | else | ||
101 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN; | ||
102 | |||
103 | cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN; | ||
104 | cdesc->control_data.control1 |= ctx->mode; | ||
105 | |||
106 | switch (ctx->key_len) { | ||
107 | case AES_KEYSIZE_128: | ||
108 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128; | ||
109 | ctrl_size = 4; | ||
110 | break; | ||
111 | case AES_KEYSIZE_192: | ||
112 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192; | ||
113 | ctrl_size = 6; | ||
114 | break; | ||
115 | case AES_KEYSIZE_256: | ||
116 | cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256; | ||
117 | ctrl_size = 8; | ||
118 | break; | ||
119 | default: | ||
120 | dev_err(priv->dev, "aes keysize not supported: %u\n", | ||
121 | ctx->key_len); | ||
122 | return -EINVAL; | ||
123 | } | ||
124 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size); | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, | ||
130 | struct crypto_async_request *async, | ||
131 | bool *should_complete, int *ret) | ||
132 | { | ||
133 | struct skcipher_request *req = skcipher_request_cast(async); | ||
134 | struct safexcel_result_desc *rdesc; | ||
135 | int ndesc = 0; | ||
136 | |||
137 | *ret = 0; | ||
138 | |||
139 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
140 | do { | ||
141 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); | ||
142 | if (IS_ERR(rdesc)) { | ||
143 | dev_err(priv->dev, | ||
144 | "cipher: result: could not retrieve the result descriptor\n"); | ||
145 | *ret = PTR_ERR(rdesc); | ||
146 | break; | ||
147 | } | ||
148 | |||
149 | if (rdesc->result_data.error_code) { | ||
150 | dev_err(priv->dev, | ||
151 | "cipher: result: result descriptor error (%d)\n", | ||
152 | rdesc->result_data.error_code); | ||
153 | *ret = -EIO; | ||
154 | } | ||
155 | |||
156 | ndesc++; | ||
157 | } while (!rdesc->last_seg); | ||
158 | |||
159 | safexcel_complete(priv, ring); | ||
160 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
161 | |||
162 | if (req->src == req->dst) { | ||
163 | dma_unmap_sg(priv->dev, req->src, | ||
164 | sg_nents_for_len(req->src, req->cryptlen), | ||
165 | DMA_BIDIRECTIONAL); | ||
166 | } else { | ||
167 | dma_unmap_sg(priv->dev, req->src, | ||
168 | sg_nents_for_len(req->src, req->cryptlen), | ||
169 | DMA_TO_DEVICE); | ||
170 | dma_unmap_sg(priv->dev, req->dst, | ||
171 | sg_nents_for_len(req->dst, req->cryptlen), | ||
172 | DMA_FROM_DEVICE); | ||
173 | } | ||
174 | |||
175 | *should_complete = true; | ||
176 | |||
177 | return ndesc; | ||
178 | } | ||
179 | |||
180 | static int safexcel_aes_send(struct crypto_async_request *async, | ||
181 | int ring, struct safexcel_request *request, | ||
182 | int *commands, int *results) | ||
183 | { | ||
184 | struct skcipher_request *req = skcipher_request_cast(async); | ||
185 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
186 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
187 | struct safexcel_command_desc *cdesc; | ||
188 | struct safexcel_result_desc *rdesc; | ||
189 | struct scatterlist *sg; | ||
190 | int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen; | ||
191 | int i, ret = 0; | ||
192 | |||
193 | if (req->src == req->dst) { | ||
194 | nr_src = dma_map_sg(priv->dev, req->src, | ||
195 | sg_nents_for_len(req->src, req->cryptlen), | ||
196 | DMA_BIDIRECTIONAL); | ||
197 | nr_dst = nr_src; | ||
198 | if (!nr_src) | ||
199 | return -EINVAL; | ||
200 | } else { | ||
201 | nr_src = dma_map_sg(priv->dev, req->src, | ||
202 | sg_nents_for_len(req->src, req->cryptlen), | ||
203 | DMA_TO_DEVICE); | ||
204 | if (!nr_src) | ||
205 | return -EINVAL; | ||
206 | |||
207 | nr_dst = dma_map_sg(priv->dev, req->dst, | ||
208 | sg_nents_for_len(req->dst, req->cryptlen), | ||
209 | DMA_FROM_DEVICE); | ||
210 | if (!nr_dst) { | ||
211 | dma_unmap_sg(priv->dev, req->src, | ||
212 | sg_nents_for_len(req->src, req->cryptlen), | ||
213 | DMA_TO_DEVICE); | ||
214 | return -EINVAL; | ||
215 | } | ||
216 | } | ||
217 | |||
218 | memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); | ||
219 | |||
220 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
221 | |||
222 | /* command descriptors */ | ||
223 | for_each_sg(req->src, sg, nr_src, i) { | ||
224 | int len = sg_dma_len(sg); | ||
225 | |||
226 | /* Do not overflow the request */ | ||
227 | if (queued - len < 0) | ||
228 | len = queued; | ||
229 | |||
230 | cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len), | ||
231 | sg_dma_address(sg), len, req->cryptlen, | ||
232 | ctx->base.ctxr_dma); | ||
233 | if (IS_ERR(cdesc)) { | ||
234 | /* No space left in the command descriptor ring */ | ||
235 | ret = PTR_ERR(cdesc); | ||
236 | goto cdesc_rollback; | ||
237 | } | ||
238 | n_cdesc++; | ||
239 | |||
240 | if (n_cdesc == 1) { | ||
241 | safexcel_context_control(ctx, cdesc); | ||
242 | safexcel_cipher_token(ctx, async, cdesc, req->cryptlen); | ||
243 | } | ||
244 | |||
245 | queued -= len; | ||
246 | if (!queued) | ||
247 | break; | ||
248 | } | ||
249 | |||
250 | /* result descriptors */ | ||
251 | for_each_sg(req->dst, sg, nr_dst, i) { | ||
252 | bool first = !i, last = (i == nr_dst - 1); | ||
253 | u32 len = sg_dma_len(sg); | ||
254 | |||
255 | rdesc = safexcel_add_rdesc(priv, ring, first, last, | ||
256 | sg_dma_address(sg), len); | ||
257 | if (IS_ERR(rdesc)) { | ||
258 | /* No space left in the result descriptor ring */ | ||
259 | ret = PTR_ERR(rdesc); | ||
260 | goto rdesc_rollback; | ||
261 | } | ||
262 | n_rdesc++; | ||
263 | } | ||
264 | |||
265 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
266 | |||
267 | request->req = &req->base; | ||
268 | ctx->base.handle_result = safexcel_handle_result; | ||
269 | |||
270 | *commands = n_cdesc; | ||
271 | *results = n_rdesc; | ||
272 | return 0; | ||
273 | |||
274 | rdesc_rollback: | ||
275 | for (i = 0; i < n_rdesc; i++) | ||
276 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr); | ||
277 | cdesc_rollback: | ||
278 | for (i = 0; i < n_cdesc; i++) | ||
279 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); | ||
280 | |||
281 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
282 | |||
283 | if (req->src == req->dst) { | ||
284 | dma_unmap_sg(priv->dev, req->src, | ||
285 | sg_nents_for_len(req->src, req->cryptlen), | ||
286 | DMA_BIDIRECTIONAL); | ||
287 | } else { | ||
288 | dma_unmap_sg(priv->dev, req->src, | ||
289 | sg_nents_for_len(req->src, req->cryptlen), | ||
290 | DMA_TO_DEVICE); | ||
291 | dma_unmap_sg(priv->dev, req->dst, | ||
292 | sg_nents_for_len(req->dst, req->cryptlen), | ||
293 | DMA_FROM_DEVICE); | ||
294 | } | ||
295 | |||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | ||
300 | int ring, | ||
301 | struct crypto_async_request *async, | ||
302 | bool *should_complete, int *ret) | ||
303 | { | ||
304 | struct skcipher_request *req = skcipher_request_cast(async); | ||
305 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
306 | struct safexcel_result_desc *rdesc; | ||
307 | int ndesc = 0, enq_ret; | ||
308 | |||
309 | *ret = 0; | ||
310 | |||
311 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
312 | do { | ||
313 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); | ||
314 | if (IS_ERR(rdesc)) { | ||
315 | dev_err(priv->dev, | ||
316 | "cipher: invalidate: could not retrieve the result descriptor\n"); | ||
317 | *ret = PTR_ERR(rdesc); | ||
318 | break; | ||
319 | } | ||
320 | |||
321 | if (rdesc->result_data.error_code) { | ||
322 | dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n", | ||
323 | rdesc->result_data.error_code); | ||
324 | *ret = -EIO; | ||
325 | } | ||
326 | |||
327 | ndesc++; | ||
328 | } while (!rdesc->last_seg); | ||
329 | |||
330 | safexcel_complete(priv, ring); | ||
331 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
332 | |||
333 | if (ctx->base.exit_inv) { | ||
334 | dma_pool_free(priv->context_pool, ctx->base.ctxr, | ||
335 | ctx->base.ctxr_dma); | ||
336 | |||
337 | *should_complete = true; | ||
338 | |||
339 | return ndesc; | ||
340 | } | ||
341 | |||
342 | ring = safexcel_select_ring(priv); | ||
343 | ctx->base.ring = ring; | ||
344 | ctx->base.needs_inv = false; | ||
345 | ctx->base.send = safexcel_aes_send; | ||
346 | |||
347 | spin_lock_bh(&priv->ring[ring].queue_lock); | ||
348 | enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); | ||
349 | spin_unlock_bh(&priv->ring[ring].queue_lock); | ||
350 | |||
351 | if (enq_ret != -EINPROGRESS) | ||
352 | *ret = enq_ret; | ||
353 | |||
354 | if (!priv->ring[ring].need_dequeue) | ||
355 | safexcel_dequeue(priv, ring); | ||
356 | |||
357 | *should_complete = false; | ||
358 | |||
359 | return ndesc; | ||
360 | } | ||
361 | |||
362 | static int safexcel_cipher_send_inv(struct crypto_async_request *async, | ||
363 | int ring, struct safexcel_request *request, | ||
364 | int *commands, int *results) | ||
365 | { | ||
366 | struct skcipher_request *req = skcipher_request_cast(async); | ||
367 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
368 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
369 | int ret; | ||
370 | |||
371 | ctx->base.handle_result = safexcel_handle_inv_result; | ||
372 | |||
373 | ret = safexcel_invalidate_cache(async, &ctx->base, priv, | ||
374 | ctx->base.ctxr_dma, ring, request); | ||
375 | if (unlikely(ret)) | ||
376 | return ret; | ||
377 | |||
378 | *commands = 1; | ||
379 | *results = 1; | ||
380 | |||
381 | return 0; | ||
382 | } | ||
383 | |||
384 | static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) | ||
385 | { | ||
386 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
387 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
388 | struct skcipher_request req; | ||
389 | struct safexcel_inv_result result = { 0 }; | ||
390 | int ring = ctx->base.ring; | ||
391 | |||
392 | memset(&req, 0, sizeof(struct skcipher_request)); | ||
393 | |||
394 | /* create invalidation request */ | ||
395 | init_completion(&result.completion); | ||
396 | skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
397 | safexcel_inv_complete, &result); | ||
398 | |||
399 | skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); | ||
400 | ctx = crypto_tfm_ctx(req.base.tfm); | ||
401 | ctx->base.exit_inv = true; | ||
402 | ctx->base.send = safexcel_cipher_send_inv; | ||
403 | |||
404 | spin_lock_bh(&priv->ring[ring].queue_lock); | ||
405 | crypto_enqueue_request(&priv->ring[ring].queue, &req.base); | ||
406 | spin_unlock_bh(&priv->ring[ring].queue_lock); | ||
407 | |||
408 | if (!priv->ring[ring].need_dequeue) | ||
409 | safexcel_dequeue(priv, ring); | ||
410 | |||
411 | wait_for_completion_interruptible(&result.completion); | ||
412 | |||
413 | if (result.error) { | ||
414 | dev_warn(priv->dev, | ||
415 | "cipher: sync: invalidate: completion error %d\n", | ||
416 | result.error); | ||
417 | return result.error; | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static int safexcel_aes(struct skcipher_request *req, | ||
424 | enum safexcel_cipher_direction dir, u32 mode) | ||
425 | { | ||
426 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | ||
427 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
428 | int ret, ring; | ||
429 | |||
430 | ctx->direction = dir; | ||
431 | ctx->mode = mode; | ||
432 | |||
433 | if (ctx->base.ctxr) { | ||
434 | if (ctx->base.needs_inv) | ||
435 | ctx->base.send = safexcel_cipher_send_inv; | ||
436 | } else { | ||
437 | ctx->base.ring = safexcel_select_ring(priv); | ||
438 | ctx->base.send = safexcel_aes_send; | ||
439 | |||
440 | ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, | ||
441 | EIP197_GFP_FLAGS(req->base), | ||
442 | &ctx->base.ctxr_dma); | ||
443 | if (!ctx->base.ctxr) | ||
444 | return -ENOMEM; | ||
445 | } | ||
446 | |||
447 | ring = ctx->base.ring; | ||
448 | |||
449 | spin_lock_bh(&priv->ring[ring].queue_lock); | ||
450 | ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); | ||
451 | spin_unlock_bh(&priv->ring[ring].queue_lock); | ||
452 | |||
453 | if (!priv->ring[ring].need_dequeue) | ||
454 | safexcel_dequeue(priv, ring); | ||
455 | |||
456 | return ret; | ||
457 | } | ||
458 | |||
459 | static int safexcel_ecb_aes_encrypt(struct skcipher_request *req) | ||
460 | { | ||
461 | return safexcel_aes(req, SAFEXCEL_ENCRYPT, | ||
462 | CONTEXT_CONTROL_CRYPTO_MODE_ECB); | ||
463 | } | ||
464 | |||
465 | static int safexcel_ecb_aes_decrypt(struct skcipher_request *req) | ||
466 | { | ||
467 | return safexcel_aes(req, SAFEXCEL_DECRYPT, | ||
468 | CONTEXT_CONTROL_CRYPTO_MODE_ECB); | ||
469 | } | ||
470 | |||
471 | static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) | ||
472 | { | ||
473 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
474 | struct safexcel_alg_template *tmpl = | ||
475 | container_of(tfm->__crt_alg, struct safexcel_alg_template, | ||
476 | alg.skcipher.base); | ||
477 | |||
478 | ctx->priv = tmpl->priv; | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm) | ||
484 | { | ||
485 | struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); | ||
486 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
487 | int ret; | ||
488 | |||
489 | memzero_explicit(ctx->key, 8 * sizeof(u32)); | ||
490 | |||
491 | /* context not allocated, skip invalidation */ | ||
492 | if (!ctx->base.ctxr) | ||
493 | return; | ||
494 | |||
495 | memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32)); | ||
496 | |||
497 | ret = safexcel_cipher_exit_inv(tfm); | ||
498 | if (ret) | ||
499 | dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); | ||
500 | } | ||
501 | |||
502 | struct safexcel_alg_template safexcel_alg_ecb_aes = { | ||
503 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | ||
504 | .alg.skcipher = { | ||
505 | .setkey = safexcel_aes_setkey, | ||
506 | .encrypt = safexcel_ecb_aes_encrypt, | ||
507 | .decrypt = safexcel_ecb_aes_decrypt, | ||
508 | .min_keysize = AES_MIN_KEY_SIZE, | ||
509 | .max_keysize = AES_MAX_KEY_SIZE, | ||
510 | .base = { | ||
511 | .cra_name = "ecb(aes)", | ||
512 | .cra_driver_name = "safexcel-ecb-aes", | ||
513 | .cra_priority = 300, | ||
514 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | | ||
515 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
516 | .cra_blocksize = AES_BLOCK_SIZE, | ||
517 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
518 | .cra_alignmask = 0, | ||
519 | .cra_init = safexcel_skcipher_cra_init, | ||
520 | .cra_exit = safexcel_skcipher_cra_exit, | ||
521 | .cra_module = THIS_MODULE, | ||
522 | }, | ||
523 | }, | ||
524 | }; | ||
525 | |||
526 | static int safexcel_cbc_aes_encrypt(struct skcipher_request *req) | ||
527 | { | ||
528 | return safexcel_aes(req, SAFEXCEL_ENCRYPT, | ||
529 | CONTEXT_CONTROL_CRYPTO_MODE_CBC); | ||
530 | } | ||
531 | |||
532 | static int safexcel_cbc_aes_decrypt(struct skcipher_request *req) | ||
533 | { | ||
534 | return safexcel_aes(req, SAFEXCEL_DECRYPT, | ||
535 | CONTEXT_CONTROL_CRYPTO_MODE_CBC); | ||
536 | } | ||
537 | |||
538 | struct safexcel_alg_template safexcel_alg_cbc_aes = { | ||
539 | .type = SAFEXCEL_ALG_TYPE_SKCIPHER, | ||
540 | .alg.skcipher = { | ||
541 | .setkey = safexcel_aes_setkey, | ||
542 | .encrypt = safexcel_cbc_aes_encrypt, | ||
543 | .decrypt = safexcel_cbc_aes_decrypt, | ||
544 | .min_keysize = AES_MIN_KEY_SIZE, | ||
545 | .max_keysize = AES_MAX_KEY_SIZE, | ||
546 | .ivsize = AES_BLOCK_SIZE, | ||
547 | .base = { | ||
548 | .cra_name = "cbc(aes)", | ||
549 | .cra_driver_name = "safexcel-cbc-aes", | ||
550 | .cra_priority = 300, | ||
551 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | | ||
552 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
553 | .cra_blocksize = AES_BLOCK_SIZE, | ||
554 | .cra_ctxsize = sizeof(struct safexcel_cipher_ctx), | ||
555 | .cra_alignmask = 0, | ||
556 | .cra_init = safexcel_skcipher_cra_init, | ||
557 | .cra_exit = safexcel_skcipher_cra_exit, | ||
558 | .cra_module = THIS_MODULE, | ||
559 | }, | ||
560 | }, | ||
561 | }; | ||
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c new file mode 100644 index 000000000000..8527a5899a2f --- /dev/null +++ b/drivers/crypto/inside-secure/safexcel_hash.c | |||
@@ -0,0 +1,1052 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017 Marvell | ||
3 | * | ||
4 | * Antoine Tenart <antoine.tenart@free-electrons.com> | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public | ||
7 | * License version 2. This program is licensed "as is" without any | ||
8 | * warranty of any kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #include <crypto/hmac.h> | ||
12 | #include <crypto/sha.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/dmapool.h> | ||
16 | |||
17 | |||
18 | #include "safexcel.h" | ||
19 | |||
20 | struct safexcel_ahash_ctx { | ||
21 | struct safexcel_context base; | ||
22 | struct safexcel_crypto_priv *priv; | ||
23 | |||
24 | u32 alg; | ||
25 | u32 digest; | ||
26 | |||
27 | u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)]; | ||
28 | u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)]; | ||
29 | }; | ||
30 | |||
31 | struct safexcel_ahash_req { | ||
32 | bool last_req; | ||
33 | bool finish; | ||
34 | bool hmac; | ||
35 | |||
36 | u8 state_sz; /* expected sate size, only set once */ | ||
37 | u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; | ||
38 | |||
39 | u64 len; | ||
40 | u64 processed; | ||
41 | |||
42 | u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); | ||
43 | u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); | ||
44 | }; | ||
45 | |||
46 | struct safexcel_ahash_export_state { | ||
47 | u64 len; | ||
48 | u64 processed; | ||
49 | |||
50 | u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; | ||
51 | u8 cache[SHA256_BLOCK_SIZE]; | ||
52 | }; | ||
53 | |||
54 | static void safexcel_hash_token(struct safexcel_command_desc *cdesc, | ||
55 | u32 input_length, u32 result_length) | ||
56 | { | ||
57 | struct safexcel_token *token = | ||
58 | (struct safexcel_token *)cdesc->control_data.token; | ||
59 | |||
60 | token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION; | ||
61 | token[0].packet_length = input_length; | ||
62 | token[0].stat = EIP197_TOKEN_STAT_LAST_HASH; | ||
63 | token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH; | ||
64 | |||
65 | token[1].opcode = EIP197_TOKEN_OPCODE_INSERT; | ||
66 | token[1].packet_length = result_length; | ||
67 | token[1].stat = EIP197_TOKEN_STAT_LAST_HASH | | ||
68 | EIP197_TOKEN_STAT_LAST_PACKET; | ||
69 | token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT | | ||
70 | EIP197_TOKEN_INS_INSERT_HASH_DIGEST; | ||
71 | } | ||
72 | |||
73 | static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, | ||
74 | struct safexcel_ahash_req *req, | ||
75 | struct safexcel_command_desc *cdesc, | ||
76 | unsigned int digestsize, | ||
77 | unsigned int blocksize) | ||
78 | { | ||
79 | int i; | ||
80 | |||
81 | cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT; | ||
82 | cdesc->control_data.control0 |= ctx->alg; | ||
83 | cdesc->control_data.control0 |= ctx->digest; | ||
84 | |||
85 | if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { | ||
86 | if (req->processed) { | ||
87 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) | ||
88 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); | ||
89 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 || | ||
90 | ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) | ||
91 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9); | ||
92 | |||
93 | cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT; | ||
94 | } else { | ||
95 | cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH; | ||
96 | } | ||
97 | |||
98 | if (!req->finish) | ||
99 | cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH; | ||
100 | |||
101 | /* | ||
102 | * Copy the input digest if needed, and setup the context | ||
103 | * fields. Do this now as we need it to setup the first command | ||
104 | * descriptor. | ||
105 | */ | ||
106 | if (req->processed) { | ||
107 | for (i = 0; i < digestsize / sizeof(u32); i++) | ||
108 | ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]); | ||
109 | |||
110 | if (req->finish) | ||
111 | ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize); | ||
112 | } | ||
113 | } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) { | ||
114 | cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10); | ||
115 | |||
116 | memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize); | ||
117 | memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32), | ||
118 | ctx->opad, digestsize); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, | ||
123 | struct crypto_async_request *async, | ||
124 | bool *should_complete, int *ret) | ||
125 | { | ||
126 | struct safexcel_result_desc *rdesc; | ||
127 | struct ahash_request *areq = ahash_request_cast(async); | ||
128 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
129 | struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); | ||
130 | int cache_len, result_sz = sreq->state_sz; | ||
131 | |||
132 | *ret = 0; | ||
133 | |||
134 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
135 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); | ||
136 | if (IS_ERR(rdesc)) { | ||
137 | dev_err(priv->dev, | ||
138 | "hash: result: could not retrieve the result descriptor\n"); | ||
139 | *ret = PTR_ERR(rdesc); | ||
140 | } else if (rdesc->result_data.error_code) { | ||
141 | dev_err(priv->dev, | ||
142 | "hash: result: result descriptor error (%d)\n", | ||
143 | rdesc->result_data.error_code); | ||
144 | *ret = -EINVAL; | ||
145 | } | ||
146 | |||
147 | safexcel_complete(priv, ring); | ||
148 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
149 | |||
150 | if (sreq->finish) | ||
151 | result_sz = crypto_ahash_digestsize(ahash); | ||
152 | memcpy(sreq->state, areq->result, result_sz); | ||
153 | |||
154 | dma_unmap_sg(priv->dev, areq->src, | ||
155 | sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); | ||
156 | |||
157 | safexcel_free_context(priv, async, sreq->state_sz); | ||
158 | |||
159 | cache_len = sreq->len - sreq->processed; | ||
160 | if (cache_len) | ||
161 | memcpy(sreq->cache, sreq->cache_next, cache_len); | ||
162 | |||
163 | *should_complete = true; | ||
164 | |||
165 | return 1; | ||
166 | } | ||
167 | |||
168 | static int safexcel_ahash_send(struct crypto_async_request *async, int ring, | ||
169 | struct safexcel_request *request, int *commands, | ||
170 | int *results) | ||
171 | { | ||
172 | struct ahash_request *areq = ahash_request_cast(async); | ||
173 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
174 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
175 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
176 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
177 | struct safexcel_command_desc *cdesc, *first_cdesc = NULL; | ||
178 | struct safexcel_result_desc *rdesc; | ||
179 | struct scatterlist *sg; | ||
180 | int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; | ||
181 | |||
182 | queued = len = req->len - req->processed; | ||
183 | if (queued < crypto_ahash_blocksize(ahash)) | ||
184 | cache_len = queued; | ||
185 | else | ||
186 | cache_len = queued - areq->nbytes; | ||
187 | |||
188 | /* | ||
189 | * If this is not the last request and the queued data does not fit | ||
190 | * into full blocks, cache it for the next send() call. | ||
191 | */ | ||
192 | extra = queued & (crypto_ahash_blocksize(ahash) - 1); | ||
193 | if (!req->last_req && extra) { | ||
194 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | ||
195 | req->cache_next, extra, areq->nbytes - extra); | ||
196 | |||
197 | queued -= extra; | ||
198 | len -= extra; | ||
199 | } | ||
200 | |||
201 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
202 | |||
203 | /* Add a command descriptor for the cached data, if any */ | ||
204 | if (cache_len) { | ||
205 | ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async)); | ||
206 | if (!ctx->base.cache) { | ||
207 | ret = -ENOMEM; | ||
208 | goto unlock; | ||
209 | } | ||
210 | memcpy(ctx->base.cache, req->cache, cache_len); | ||
211 | ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache, | ||
212 | cache_len, DMA_TO_DEVICE); | ||
213 | if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) { | ||
214 | ret = -EINVAL; | ||
215 | goto free_cache; | ||
216 | } | ||
217 | |||
218 | ctx->base.cache_sz = cache_len; | ||
219 | first_cdesc = safexcel_add_cdesc(priv, ring, 1, | ||
220 | (cache_len == len), | ||
221 | ctx->base.cache_dma, | ||
222 | cache_len, len, | ||
223 | ctx->base.ctxr_dma); | ||
224 | if (IS_ERR(first_cdesc)) { | ||
225 | ret = PTR_ERR(first_cdesc); | ||
226 | goto unmap_cache; | ||
227 | } | ||
228 | n_cdesc++; | ||
229 | |||
230 | queued -= cache_len; | ||
231 | if (!queued) | ||
232 | goto send_command; | ||
233 | } | ||
234 | |||
235 | /* Now handle the current ahash request buffer(s) */ | ||
236 | nents = dma_map_sg(priv->dev, areq->src, | ||
237 | sg_nents_for_len(areq->src, areq->nbytes), | ||
238 | DMA_TO_DEVICE); | ||
239 | if (!nents) { | ||
240 | ret = -ENOMEM; | ||
241 | goto cdesc_rollback; | ||
242 | } | ||
243 | |||
244 | for_each_sg(areq->src, sg, nents, i) { | ||
245 | int sglen = sg_dma_len(sg); | ||
246 | |||
247 | /* Do not overflow the request */ | ||
248 | if (queued - sglen < 0) | ||
249 | sglen = queued; | ||
250 | |||
251 | cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, | ||
252 | !(queued - sglen), sg_dma_address(sg), | ||
253 | sglen, len, ctx->base.ctxr_dma); | ||
254 | if (IS_ERR(cdesc)) { | ||
255 | ret = PTR_ERR(cdesc); | ||
256 | goto cdesc_rollback; | ||
257 | } | ||
258 | n_cdesc++; | ||
259 | |||
260 | if (n_cdesc == 1) | ||
261 | first_cdesc = cdesc; | ||
262 | |||
263 | queued -= sglen; | ||
264 | if (!queued) | ||
265 | break; | ||
266 | } | ||
267 | |||
268 | send_command: | ||
269 | /* Setup the context options */ | ||
270 | safexcel_context_control(ctx, req, first_cdesc, req->state_sz, | ||
271 | crypto_ahash_blocksize(ahash)); | ||
272 | |||
273 | /* Add the token */ | ||
274 | safexcel_hash_token(first_cdesc, len, req->state_sz); | ||
275 | |||
276 | ctx->base.result_dma = dma_map_single(priv->dev, areq->result, | ||
277 | req->state_sz, DMA_FROM_DEVICE); | ||
278 | if (dma_mapping_error(priv->dev, ctx->base.result_dma)) { | ||
279 | ret = -EINVAL; | ||
280 | goto cdesc_rollback; | ||
281 | } | ||
282 | |||
283 | /* Add a result descriptor */ | ||
284 | rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma, | ||
285 | req->state_sz); | ||
286 | if (IS_ERR(rdesc)) { | ||
287 | ret = PTR_ERR(rdesc); | ||
288 | goto cdesc_rollback; | ||
289 | } | ||
290 | |||
291 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
292 | |||
293 | req->processed += len; | ||
294 | request->req = &areq->base; | ||
295 | ctx->base.handle_result = safexcel_handle_result; | ||
296 | |||
297 | *commands = n_cdesc; | ||
298 | *results = 1; | ||
299 | return 0; | ||
300 | |||
301 | cdesc_rollback: | ||
302 | for (i = 0; i < n_cdesc; i++) | ||
303 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); | ||
304 | unmap_cache: | ||
305 | if (ctx->base.cache_dma) { | ||
306 | dma_unmap_single(priv->dev, ctx->base.cache_dma, | ||
307 | ctx->base.cache_sz, DMA_TO_DEVICE); | ||
308 | ctx->base.cache_sz = 0; | ||
309 | } | ||
310 | free_cache: | ||
311 | if (ctx->base.cache) { | ||
312 | kfree(ctx->base.cache); | ||
313 | ctx->base.cache = NULL; | ||
314 | } | ||
315 | |||
316 | unlock: | ||
317 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq) | ||
322 | { | ||
323 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
324 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
325 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
326 | unsigned int state_w_sz = req->state_sz / sizeof(u32); | ||
327 | int i; | ||
328 | |||
329 | for (i = 0; i < state_w_sz; i++) | ||
330 | if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i])) | ||
331 | return true; | ||
332 | |||
333 | if (ctx->base.ctxr->data[state_w_sz] != | ||
334 | cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash))) | ||
335 | return true; | ||
336 | |||
337 | return false; | ||
338 | } | ||
339 | |||
340 | static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, | ||
341 | int ring, | ||
342 | struct crypto_async_request *async, | ||
343 | bool *should_complete, int *ret) | ||
344 | { | ||
345 | struct safexcel_result_desc *rdesc; | ||
346 | struct ahash_request *areq = ahash_request_cast(async); | ||
347 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
348 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash); | ||
349 | int enq_ret; | ||
350 | |||
351 | *ret = 0; | ||
352 | |||
353 | spin_lock_bh(&priv->ring[ring].egress_lock); | ||
354 | rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); | ||
355 | if (IS_ERR(rdesc)) { | ||
356 | dev_err(priv->dev, | ||
357 | "hash: invalidate: could not retrieve the result descriptor\n"); | ||
358 | *ret = PTR_ERR(rdesc); | ||
359 | } else if (rdesc->result_data.error_code) { | ||
360 | dev_err(priv->dev, | ||
361 | "hash: invalidate: result descriptor error (%d)\n", | ||
362 | rdesc->result_data.error_code); | ||
363 | *ret = -EINVAL; | ||
364 | } | ||
365 | |||
366 | safexcel_complete(priv, ring); | ||
367 | spin_unlock_bh(&priv->ring[ring].egress_lock); | ||
368 | |||
369 | if (ctx->base.exit_inv) { | ||
370 | dma_pool_free(priv->context_pool, ctx->base.ctxr, | ||
371 | ctx->base.ctxr_dma); | ||
372 | |||
373 | *should_complete = true; | ||
374 | return 1; | ||
375 | } | ||
376 | |||
377 | ring = safexcel_select_ring(priv); | ||
378 | ctx->base.ring = ring; | ||
379 | ctx->base.needs_inv = false; | ||
380 | ctx->base.send = safexcel_ahash_send; | ||
381 | |||
382 | spin_lock_bh(&priv->ring[ring].queue_lock); | ||
383 | enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); | ||
384 | spin_unlock_bh(&priv->ring[ring].queue_lock); | ||
385 | |||
386 | if (enq_ret != -EINPROGRESS) | ||
387 | *ret = enq_ret; | ||
388 | |||
389 | if (!priv->ring[ring].need_dequeue) | ||
390 | safexcel_dequeue(priv, ring); | ||
391 | |||
392 | *should_complete = false; | ||
393 | |||
394 | return 1; | ||
395 | } | ||
396 | |||
397 | static int safexcel_ahash_send_inv(struct crypto_async_request *async, | ||
398 | int ring, struct safexcel_request *request, | ||
399 | int *commands, int *results) | ||
400 | { | ||
401 | struct ahash_request *areq = ahash_request_cast(async); | ||
402 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
403 | int ret; | ||
404 | |||
405 | ctx->base.handle_result = safexcel_handle_inv_result; | ||
406 | ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, | ||
407 | ctx->base.ctxr_dma, ring, request); | ||
408 | if (unlikely(ret)) | ||
409 | return ret; | ||
410 | |||
411 | *commands = 1; | ||
412 | *results = 1; | ||
413 | |||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) | ||
418 | { | ||
419 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
420 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
421 | struct ahash_request req; | ||
422 | struct safexcel_inv_result result = { 0 }; | ||
423 | int ring = ctx->base.ring; | ||
424 | |||
425 | memset(&req, 0, sizeof(struct ahash_request)); | ||
426 | |||
427 | /* create invalidation request */ | ||
428 | init_completion(&result.completion); | ||
429 | ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
430 | safexcel_inv_complete, &result); | ||
431 | |||
432 | ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); | ||
433 | ctx = crypto_tfm_ctx(req.base.tfm); | ||
434 | ctx->base.exit_inv = true; | ||
435 | ctx->base.send = safexcel_ahash_send_inv; | ||
436 | |||
437 | spin_lock_bh(&priv->ring[ring].queue_lock); | ||
438 | crypto_enqueue_request(&priv->ring[ring].queue, &req.base); | ||
439 | spin_unlock_bh(&priv->ring[ring].queue_lock); | ||
440 | |||
441 | if (!priv->ring[ring].need_dequeue) | ||
442 | safexcel_dequeue(priv, ring); | ||
443 | |||
444 | wait_for_completion_interruptible(&result.completion); | ||
445 | |||
446 | if (result.error) { | ||
447 | dev_warn(priv->dev, "hash: completion error (%d)\n", | ||
448 | result.error); | ||
449 | return result.error; | ||
450 | } | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static int safexcel_ahash_cache(struct ahash_request *areq) | ||
456 | { | ||
457 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
458 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
459 | int queued, cache_len; | ||
460 | |||
461 | cache_len = req->len - areq->nbytes - req->processed; | ||
462 | queued = req->len - req->processed; | ||
463 | |||
464 | /* | ||
465 | * In case there isn't enough bytes to proceed (less than a | ||
466 | * block size), cache the data until we have enough. | ||
467 | */ | ||
468 | if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) { | ||
469 | sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | ||
470 | req->cache + cache_len, | ||
471 | areq->nbytes, 0); | ||
472 | return areq->nbytes; | ||
473 | } | ||
474 | |||
475 | /* We could'nt cache all the data */ | ||
476 | return -E2BIG; | ||
477 | } | ||
478 | |||
479 | static int safexcel_ahash_enqueue(struct ahash_request *areq) | ||
480 | { | ||
481 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
482 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
483 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
484 | int ret, ring; | ||
485 | |||
486 | ctx->base.send = safexcel_ahash_send; | ||
487 | |||
488 | if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) | ||
489 | ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); | ||
490 | |||
491 | if (ctx->base.ctxr) { | ||
492 | if (ctx->base.needs_inv) | ||
493 | ctx->base.send = safexcel_ahash_send_inv; | ||
494 | } else { | ||
495 | ctx->base.ring = safexcel_select_ring(priv); | ||
496 | ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, | ||
497 | EIP197_GFP_FLAGS(areq->base), | ||
498 | &ctx->base.ctxr_dma); | ||
499 | if (!ctx->base.ctxr) | ||
500 | return -ENOMEM; | ||
501 | } | ||
502 | |||
503 | ring = ctx->base.ring; | ||
504 | |||
505 | spin_lock_bh(&priv->ring[ring].queue_lock); | ||
506 | ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base); | ||
507 | spin_unlock_bh(&priv->ring[ring].queue_lock); | ||
508 | |||
509 | if (!priv->ring[ring].need_dequeue) | ||
510 | safexcel_dequeue(priv, ring); | ||
511 | |||
512 | return ret; | ||
513 | } | ||
514 | |||
515 | static int safexcel_ahash_update(struct ahash_request *areq) | ||
516 | { | ||
517 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
518 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
519 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
520 | |||
521 | /* If the request is 0 length, do nothing */ | ||
522 | if (!areq->nbytes) | ||
523 | return 0; | ||
524 | |||
525 | req->len += areq->nbytes; | ||
526 | |||
527 | safexcel_ahash_cache(areq); | ||
528 | |||
529 | /* | ||
530 | * We're not doing partial updates when performing an hmac request. | ||
531 | * Everything will be handled by the final() call. | ||
532 | */ | ||
533 | if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) | ||
534 | return 0; | ||
535 | |||
536 | if (req->hmac) | ||
537 | return safexcel_ahash_enqueue(areq); | ||
538 | |||
539 | if (!req->last_req && | ||
540 | req->len - req->processed > crypto_ahash_blocksize(ahash)) | ||
541 | return safexcel_ahash_enqueue(areq); | ||
542 | |||
543 | return 0; | ||
544 | } | ||
545 | |||
546 | static int safexcel_ahash_final(struct ahash_request *areq) | ||
547 | { | ||
548 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
549 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
550 | |||
551 | req->last_req = true; | ||
552 | req->finish = true; | ||
553 | |||
554 | /* If we have an overall 0 length request */ | ||
555 | if (!(req->len + areq->nbytes)) { | ||
556 | if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) | ||
557 | memcpy(areq->result, sha1_zero_message_hash, | ||
558 | SHA1_DIGEST_SIZE); | ||
559 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224) | ||
560 | memcpy(areq->result, sha224_zero_message_hash, | ||
561 | SHA224_DIGEST_SIZE); | ||
562 | else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) | ||
563 | memcpy(areq->result, sha256_zero_message_hash, | ||
564 | SHA256_DIGEST_SIZE); | ||
565 | |||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | return safexcel_ahash_enqueue(areq); | ||
570 | } | ||
571 | |||
572 | static int safexcel_ahash_finup(struct ahash_request *areq) | ||
573 | { | ||
574 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
575 | |||
576 | req->last_req = true; | ||
577 | req->finish = true; | ||
578 | |||
579 | safexcel_ahash_update(areq); | ||
580 | return safexcel_ahash_final(areq); | ||
581 | } | ||
582 | |||
583 | static int safexcel_ahash_export(struct ahash_request *areq, void *out) | ||
584 | { | ||
585 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
586 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
587 | struct safexcel_ahash_export_state *export = out; | ||
588 | |||
589 | export->len = req->len; | ||
590 | export->processed = req->processed; | ||
591 | |||
592 | memcpy(export->state, req->state, req->state_sz); | ||
593 | memset(export->cache, 0, crypto_ahash_blocksize(ahash)); | ||
594 | memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static int safexcel_ahash_import(struct ahash_request *areq, const void *in) | ||
600 | { | ||
601 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); | ||
602 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
603 | const struct safexcel_ahash_export_state *export = in; | ||
604 | int ret; | ||
605 | |||
606 | ret = crypto_ahash_init(areq); | ||
607 | if (ret) | ||
608 | return ret; | ||
609 | |||
610 | req->len = export->len; | ||
611 | req->processed = export->processed; | ||
612 | |||
613 | memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash)); | ||
614 | memcpy(req->state, export->state, req->state_sz); | ||
615 | |||
616 | return 0; | ||
617 | } | ||
618 | |||
619 | static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) | ||
620 | { | ||
621 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
622 | struct safexcel_alg_template *tmpl = | ||
623 | container_of(__crypto_ahash_alg(tfm->__crt_alg), | ||
624 | struct safexcel_alg_template, alg.ahash); | ||
625 | |||
626 | ctx->priv = tmpl->priv; | ||
627 | |||
628 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | ||
629 | sizeof(struct safexcel_ahash_req)); | ||
630 | return 0; | ||
631 | } | ||
632 | |||
633 | static int safexcel_sha1_init(struct ahash_request *areq) | ||
634 | { | ||
635 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
636 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
637 | |||
638 | memset(req, 0, sizeof(*req)); | ||
639 | |||
640 | req->state[0] = SHA1_H0; | ||
641 | req->state[1] = SHA1_H1; | ||
642 | req->state[2] = SHA1_H2; | ||
643 | req->state[3] = SHA1_H3; | ||
644 | req->state[4] = SHA1_H4; | ||
645 | |||
646 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1; | ||
647 | ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
648 | req->state_sz = SHA1_DIGEST_SIZE; | ||
649 | |||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | static int safexcel_sha1_digest(struct ahash_request *areq) | ||
654 | { | ||
655 | int ret = safexcel_sha1_init(areq); | ||
656 | |||
657 | if (ret) | ||
658 | return ret; | ||
659 | |||
660 | return safexcel_ahash_finup(areq); | ||
661 | } | ||
662 | |||
663 | static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm) | ||
664 | { | ||
665 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
666 | struct safexcel_crypto_priv *priv = ctx->priv; | ||
667 | int ret; | ||
668 | |||
669 | /* context not allocated, skip invalidation */ | ||
670 | if (!ctx->base.ctxr) | ||
671 | return; | ||
672 | |||
673 | ret = safexcel_ahash_exit_inv(tfm); | ||
674 | if (ret) | ||
675 | dev_warn(priv->dev, "hash: invalidation error %d\n", ret); | ||
676 | } | ||
677 | |||
678 | struct safexcel_alg_template safexcel_alg_sha1 = { | ||
679 | .type = SAFEXCEL_ALG_TYPE_AHASH, | ||
680 | .alg.ahash = { | ||
681 | .init = safexcel_sha1_init, | ||
682 | .update = safexcel_ahash_update, | ||
683 | .final = safexcel_ahash_final, | ||
684 | .finup = safexcel_ahash_finup, | ||
685 | .digest = safexcel_sha1_digest, | ||
686 | .export = safexcel_ahash_export, | ||
687 | .import = safexcel_ahash_import, | ||
688 | .halg = { | ||
689 | .digestsize = SHA1_DIGEST_SIZE, | ||
690 | .statesize = sizeof(struct safexcel_ahash_export_state), | ||
691 | .base = { | ||
692 | .cra_name = "sha1", | ||
693 | .cra_driver_name = "safexcel-sha1", | ||
694 | .cra_priority = 300, | ||
695 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
696 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
697 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
698 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | ||
699 | .cra_init = safexcel_ahash_cra_init, | ||
700 | .cra_exit = safexcel_ahash_cra_exit, | ||
701 | .cra_module = THIS_MODULE, | ||
702 | }, | ||
703 | }, | ||
704 | }, | ||
705 | }; | ||
706 | |||
707 | static int safexcel_hmac_sha1_init(struct ahash_request *areq) | ||
708 | { | ||
709 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
710 | |||
711 | safexcel_sha1_init(areq); | ||
712 | ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC; | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | static int safexcel_hmac_sha1_digest(struct ahash_request *areq) | ||
717 | { | ||
718 | int ret = safexcel_hmac_sha1_init(areq); | ||
719 | |||
720 | if (ret) | ||
721 | return ret; | ||
722 | |||
723 | return safexcel_ahash_finup(areq); | ||
724 | } | ||
725 | |||
726 | struct safexcel_ahash_result { | ||
727 | struct completion completion; | ||
728 | int error; | ||
729 | }; | ||
730 | |||
731 | static void safexcel_ahash_complete(struct crypto_async_request *req, int error) | ||
732 | { | ||
733 | struct safexcel_ahash_result *result = req->data; | ||
734 | |||
735 | if (error == -EINPROGRESS) | ||
736 | return; | ||
737 | |||
738 | result->error = error; | ||
739 | complete(&result->completion); | ||
740 | } | ||
741 | |||
742 | static int safexcel_hmac_init_pad(struct ahash_request *areq, | ||
743 | unsigned int blocksize, const u8 *key, | ||
744 | unsigned int keylen, u8 *ipad, u8 *opad) | ||
745 | { | ||
746 | struct safexcel_ahash_result result; | ||
747 | struct scatterlist sg; | ||
748 | int ret, i; | ||
749 | u8 *keydup; | ||
750 | |||
751 | if (keylen <= blocksize) { | ||
752 | memcpy(ipad, key, keylen); | ||
753 | } else { | ||
754 | keydup = kmemdup(key, keylen, GFP_KERNEL); | ||
755 | if (!keydup) | ||
756 | return -ENOMEM; | ||
757 | |||
758 | ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
759 | safexcel_ahash_complete, &result); | ||
760 | sg_init_one(&sg, keydup, keylen); | ||
761 | ahash_request_set_crypt(areq, &sg, ipad, keylen); | ||
762 | init_completion(&result.completion); | ||
763 | |||
764 | ret = crypto_ahash_digest(areq); | ||
765 | if (ret == -EINPROGRESS) { | ||
766 | wait_for_completion_interruptible(&result.completion); | ||
767 | ret = result.error; | ||
768 | } | ||
769 | |||
770 | /* Avoid leaking */ | ||
771 | memzero_explicit(keydup, keylen); | ||
772 | kfree(keydup); | ||
773 | |||
774 | if (ret) | ||
775 | return ret; | ||
776 | |||
777 | keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq)); | ||
778 | } | ||
779 | |||
780 | memset(ipad + keylen, 0, blocksize - keylen); | ||
781 | memcpy(opad, ipad, blocksize); | ||
782 | |||
783 | for (i = 0; i < blocksize; i++) { | ||
784 | ipad[i] ^= HMAC_IPAD_VALUE; | ||
785 | opad[i] ^= HMAC_OPAD_VALUE; | ||
786 | } | ||
787 | |||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | static int safexcel_hmac_init_iv(struct ahash_request *areq, | ||
792 | unsigned int blocksize, u8 *pad, void *state) | ||
793 | { | ||
794 | struct safexcel_ahash_result result; | ||
795 | struct safexcel_ahash_req *req; | ||
796 | struct scatterlist sg; | ||
797 | int ret; | ||
798 | |||
799 | ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
800 | safexcel_ahash_complete, &result); | ||
801 | sg_init_one(&sg, pad, blocksize); | ||
802 | ahash_request_set_crypt(areq, &sg, pad, blocksize); | ||
803 | init_completion(&result.completion); | ||
804 | |||
805 | ret = crypto_ahash_init(areq); | ||
806 | if (ret) | ||
807 | return ret; | ||
808 | |||
809 | req = ahash_request_ctx(areq); | ||
810 | req->hmac = true; | ||
811 | req->last_req = true; | ||
812 | |||
813 | ret = crypto_ahash_update(areq); | ||
814 | if (ret && ret != -EINPROGRESS) | ||
815 | return ret; | ||
816 | |||
817 | wait_for_completion_interruptible(&result.completion); | ||
818 | if (result.error) | ||
819 | return result.error; | ||
820 | |||
821 | return crypto_ahash_export(areq, state); | ||
822 | } | ||
823 | |||
824 | static int safexcel_hmac_setkey(const char *alg, const u8 *key, | ||
825 | unsigned int keylen, void *istate, void *ostate) | ||
826 | { | ||
827 | struct ahash_request *areq; | ||
828 | struct crypto_ahash *tfm; | ||
829 | unsigned int blocksize; | ||
830 | u8 *ipad, *opad; | ||
831 | int ret; | ||
832 | |||
833 | tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH, | ||
834 | CRYPTO_ALG_TYPE_AHASH_MASK); | ||
835 | if (IS_ERR(tfm)) | ||
836 | return PTR_ERR(tfm); | ||
837 | |||
838 | areq = ahash_request_alloc(tfm, GFP_KERNEL); | ||
839 | if (!areq) { | ||
840 | ret = -ENOMEM; | ||
841 | goto free_ahash; | ||
842 | } | ||
843 | |||
844 | crypto_ahash_clear_flags(tfm, ~0); | ||
845 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | ||
846 | |||
847 | ipad = kzalloc(2 * blocksize, GFP_KERNEL); | ||
848 | if (!ipad) { | ||
849 | ret = -ENOMEM; | ||
850 | goto free_request; | ||
851 | } | ||
852 | |||
853 | opad = ipad + blocksize; | ||
854 | |||
855 | ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad); | ||
856 | if (ret) | ||
857 | goto free_ipad; | ||
858 | |||
859 | ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate); | ||
860 | if (ret) | ||
861 | goto free_ipad; | ||
862 | |||
863 | ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate); | ||
864 | |||
865 | free_ipad: | ||
866 | kfree(ipad); | ||
867 | free_request: | ||
868 | ahash_request_free(areq); | ||
869 | free_ahash: | ||
870 | crypto_free_ahash(tfm); | ||
871 | |||
872 | return ret; | ||
873 | } | ||
874 | |||
875 | static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, | ||
876 | unsigned int keylen) | ||
877 | { | ||
878 | struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | ||
879 | struct safexcel_ahash_export_state istate, ostate; | ||
880 | int ret, i; | ||
881 | |||
882 | ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate); | ||
883 | if (ret) | ||
884 | return ret; | ||
885 | |||
886 | memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); | ||
887 | memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); | ||
888 | |||
889 | for (i = 0; i < ARRAY_SIZE(istate.state); i++) { | ||
890 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || | ||
891 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { | ||
892 | ctx->base.needs_inv = true; | ||
893 | break; | ||
894 | } | ||
895 | } | ||
896 | |||
897 | return 0; | ||
898 | } | ||
899 | |||
900 | struct safexcel_alg_template safexcel_alg_hmac_sha1 = { | ||
901 | .type = SAFEXCEL_ALG_TYPE_AHASH, | ||
902 | .alg.ahash = { | ||
903 | .init = safexcel_hmac_sha1_init, | ||
904 | .update = safexcel_ahash_update, | ||
905 | .final = safexcel_ahash_final, | ||
906 | .finup = safexcel_ahash_finup, | ||
907 | .digest = safexcel_hmac_sha1_digest, | ||
908 | .setkey = safexcel_hmac_sha1_setkey, | ||
909 | .export = safexcel_ahash_export, | ||
910 | .import = safexcel_ahash_import, | ||
911 | .halg = { | ||
912 | .digestsize = SHA1_DIGEST_SIZE, | ||
913 | .statesize = sizeof(struct safexcel_ahash_export_state), | ||
914 | .base = { | ||
915 | .cra_name = "hmac(sha1)", | ||
916 | .cra_driver_name = "safexcel-hmac-sha1", | ||
917 | .cra_priority = 300, | ||
918 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
919 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
920 | .cra_blocksize = SHA1_BLOCK_SIZE, | ||
921 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | ||
922 | .cra_init = safexcel_ahash_cra_init, | ||
923 | .cra_exit = safexcel_ahash_cra_exit, | ||
924 | .cra_module = THIS_MODULE, | ||
925 | }, | ||
926 | }, | ||
927 | }, | ||
928 | }; | ||
929 | |||
930 | static int safexcel_sha256_init(struct ahash_request *areq) | ||
931 | { | ||
932 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
933 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
934 | |||
935 | memset(req, 0, sizeof(*req)); | ||
936 | |||
937 | req->state[0] = SHA256_H0; | ||
938 | req->state[1] = SHA256_H1; | ||
939 | req->state[2] = SHA256_H2; | ||
940 | req->state[3] = SHA256_H3; | ||
941 | req->state[4] = SHA256_H4; | ||
942 | req->state[5] = SHA256_H5; | ||
943 | req->state[6] = SHA256_H6; | ||
944 | req->state[7] = SHA256_H7; | ||
945 | |||
946 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256; | ||
947 | ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
948 | req->state_sz = SHA256_DIGEST_SIZE; | ||
949 | |||
950 | return 0; | ||
951 | } | ||
952 | |||
953 | static int safexcel_sha256_digest(struct ahash_request *areq) | ||
954 | { | ||
955 | int ret = safexcel_sha256_init(areq); | ||
956 | |||
957 | if (ret) | ||
958 | return ret; | ||
959 | |||
960 | return safexcel_ahash_finup(areq); | ||
961 | } | ||
962 | |||
963 | struct safexcel_alg_template safexcel_alg_sha256 = { | ||
964 | .type = SAFEXCEL_ALG_TYPE_AHASH, | ||
965 | .alg.ahash = { | ||
966 | .init = safexcel_sha256_init, | ||
967 | .update = safexcel_ahash_update, | ||
968 | .final = safexcel_ahash_final, | ||
969 | .finup = safexcel_ahash_finup, | ||
970 | .digest = safexcel_sha256_digest, | ||
971 | .export = safexcel_ahash_export, | ||
972 | .import = safexcel_ahash_import, | ||
973 | .halg = { | ||
974 | .digestsize = SHA256_DIGEST_SIZE, | ||
975 | .statesize = sizeof(struct safexcel_ahash_export_state), | ||
976 | .base = { | ||
977 | .cra_name = "sha256", | ||
978 | .cra_driver_name = "safexcel-sha256", | ||
979 | .cra_priority = 300, | ||
980 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
981 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
982 | .cra_blocksize = SHA256_BLOCK_SIZE, | ||
983 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | ||
984 | .cra_init = safexcel_ahash_cra_init, | ||
985 | .cra_exit = safexcel_ahash_cra_exit, | ||
986 | .cra_module = THIS_MODULE, | ||
987 | }, | ||
988 | }, | ||
989 | }, | ||
990 | }; | ||
991 | |||
992 | static int safexcel_sha224_init(struct ahash_request *areq) | ||
993 | { | ||
994 | struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); | ||
995 | struct safexcel_ahash_req *req = ahash_request_ctx(areq); | ||
996 | |||
997 | memset(req, 0, sizeof(*req)); | ||
998 | |||
999 | req->state[0] = SHA224_H0; | ||
1000 | req->state[1] = SHA224_H1; | ||
1001 | req->state[2] = SHA224_H2; | ||
1002 | req->state[3] = SHA224_H3; | ||
1003 | req->state[4] = SHA224_H4; | ||
1004 | req->state[5] = SHA224_H5; | ||
1005 | req->state[6] = SHA224_H6; | ||
1006 | req->state[7] = SHA224_H7; | ||
1007 | |||
1008 | ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224; | ||
1009 | ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; | ||
1010 | req->state_sz = SHA256_DIGEST_SIZE; | ||
1011 | |||
1012 | return 0; | ||
1013 | } | ||
1014 | |||
1015 | static int safexcel_sha224_digest(struct ahash_request *areq) | ||
1016 | { | ||
1017 | int ret = safexcel_sha224_init(areq); | ||
1018 | |||
1019 | if (ret) | ||
1020 | return ret; | ||
1021 | |||
1022 | return safexcel_ahash_finup(areq); | ||
1023 | } | ||
1024 | |||
1025 | struct safexcel_alg_template safexcel_alg_sha224 = { | ||
1026 | .type = SAFEXCEL_ALG_TYPE_AHASH, | ||
1027 | .alg.ahash = { | ||
1028 | .init = safexcel_sha224_init, | ||
1029 | .update = safexcel_ahash_update, | ||
1030 | .final = safexcel_ahash_final, | ||
1031 | .finup = safexcel_ahash_finup, | ||
1032 | .digest = safexcel_sha224_digest, | ||
1033 | .export = safexcel_ahash_export, | ||
1034 | .import = safexcel_ahash_import, | ||
1035 | .halg = { | ||
1036 | .digestsize = SHA224_DIGEST_SIZE, | ||
1037 | .statesize = sizeof(struct safexcel_ahash_export_state), | ||
1038 | .base = { | ||
1039 | .cra_name = "sha224", | ||
1040 | .cra_driver_name = "safexcel-sha224", | ||
1041 | .cra_priority = 300, | ||
1042 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
1043 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
1044 | .cra_blocksize = SHA224_BLOCK_SIZE, | ||
1045 | .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), | ||
1046 | .cra_init = safexcel_ahash_cra_init, | ||
1047 | .cra_exit = safexcel_ahash_cra_exit, | ||
1048 | .cra_module = THIS_MODULE, | ||
1049 | }, | ||
1050 | }, | ||
1051 | }, | ||
1052 | }; | ||
diff --git a/drivers/crypto/inside-secure/safexcel_ring.c b/drivers/crypto/inside-secure/safexcel_ring.c new file mode 100644 index 000000000000..c9d2a8716b5b --- /dev/null +++ b/drivers/crypto/inside-secure/safexcel_ring.c | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017 Marvell | ||
3 | * | ||
4 | * Antoine Tenart <antoine.tenart@free-electrons.com> | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public | ||
7 | * License version 2. This program is licensed "as is" without any | ||
8 | * warranty of any kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | |||
14 | #include "safexcel.h" | ||
15 | |||
16 | int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, | ||
17 | struct safexcel_ring *cdr, | ||
18 | struct safexcel_ring *rdr) | ||
19 | { | ||
20 | cdr->offset = sizeof(u32) * priv->config.cd_offset; | ||
21 | cdr->base = dmam_alloc_coherent(priv->dev, | ||
22 | cdr->offset * EIP197_DEFAULT_RING_SIZE, | ||
23 | &cdr->base_dma, GFP_KERNEL); | ||
24 | if (!cdr->base) | ||
25 | return -ENOMEM; | ||
26 | cdr->write = cdr->base; | ||
27 | cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE; | ||
28 | cdr->read = cdr->base; | ||
29 | |||
30 | rdr->offset = sizeof(u32) * priv->config.rd_offset; | ||
31 | rdr->base = dmam_alloc_coherent(priv->dev, | ||
32 | rdr->offset * EIP197_DEFAULT_RING_SIZE, | ||
33 | &rdr->base_dma, GFP_KERNEL); | ||
34 | if (!rdr->base) | ||
35 | return -ENOMEM; | ||
36 | rdr->write = rdr->base; | ||
37 | rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE; | ||
38 | rdr->read = rdr->base; | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | inline int safexcel_select_ring(struct safexcel_crypto_priv *priv) | ||
44 | { | ||
45 | return (atomic_inc_return(&priv->ring_used) % priv->config.rings); | ||
46 | } | ||
47 | |||
48 | static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv, | ||
49 | struct safexcel_ring *ring) | ||
50 | { | ||
51 | void *ptr = ring->write; | ||
52 | |||
53 | if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1) | ||
54 | return ERR_PTR(-ENOMEM); | ||
55 | |||
56 | ring->write += ring->offset; | ||
57 | if (ring->write == ring->base_end) | ||
58 | ring->write = ring->base; | ||
59 | |||
60 | ring->nr++; | ||
61 | return ptr; | ||
62 | } | ||
63 | |||
64 | void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, | ||
65 | struct safexcel_ring *ring) | ||
66 | { | ||
67 | void *ptr = ring->read; | ||
68 | |||
69 | if (!ring->nr) | ||
70 | return ERR_PTR(-ENOENT); | ||
71 | |||
72 | ring->read += ring->offset; | ||
73 | if (ring->read == ring->base_end) | ||
74 | ring->read = ring->base; | ||
75 | |||
76 | ring->nr--; | ||
77 | return ptr; | ||
78 | } | ||
79 | |||
80 | void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, | ||
81 | struct safexcel_ring *ring) | ||
82 | { | ||
83 | if (!ring->nr) | ||
84 | return; | ||
85 | |||
86 | if (ring->write == ring->base) | ||
87 | ring->write = ring->base_end - ring->offset; | ||
88 | else | ||
89 | ring->write -= ring->offset; | ||
90 | |||
91 | ring->nr--; | ||
92 | } | ||
93 | |||
94 | struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, | ||
95 | int ring_id, | ||
96 | bool first, bool last, | ||
97 | dma_addr_t data, u32 data_len, | ||
98 | u32 full_data_len, | ||
99 | dma_addr_t context) { | ||
100 | struct safexcel_command_desc *cdesc; | ||
101 | int i; | ||
102 | |||
103 | cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr); | ||
104 | if (IS_ERR(cdesc)) | ||
105 | return cdesc; | ||
106 | |||
107 | memset(cdesc, 0, sizeof(struct safexcel_command_desc)); | ||
108 | |||
109 | cdesc->first_seg = first; | ||
110 | cdesc->last_seg = last; | ||
111 | cdesc->particle_size = data_len; | ||
112 | cdesc->data_lo = lower_32_bits(data); | ||
113 | cdesc->data_hi = upper_32_bits(data); | ||
114 | |||
115 | if (first && context) { | ||
116 | struct safexcel_token *token = | ||
117 | (struct safexcel_token *)cdesc->control_data.token; | ||
118 | |||
119 | cdesc->control_data.packet_length = full_data_len; | ||
120 | cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE | | ||
121 | EIP197_OPTION_64BIT_CTX | | ||
122 | EIP197_OPTION_CTX_CTRL_IN_CMD; | ||
123 | cdesc->control_data.context_lo = | ||
124 | (lower_32_bits(context) & GENMASK(31, 2)) >> 2; | ||
125 | cdesc->control_data.context_hi = upper_32_bits(context); | ||
126 | |||
127 | /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */ | ||
128 | cdesc->control_data.refresh = 2; | ||
129 | |||
130 | for (i = 0; i < EIP197_MAX_TOKENS; i++) | ||
131 | eip197_noop_token(&token[i]); | ||
132 | } | ||
133 | |||
134 | return cdesc; | ||
135 | } | ||
136 | |||
137 | struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv, | ||
138 | int ring_id, | ||
139 | bool first, bool last, | ||
140 | dma_addr_t data, u32 len) | ||
141 | { | ||
142 | struct safexcel_result_desc *rdesc; | ||
143 | |||
144 | rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr); | ||
145 | if (IS_ERR(rdesc)) | ||
146 | return rdesc; | ||
147 | |||
148 | memset(rdesc, 0, sizeof(struct safexcel_result_desc)); | ||
149 | |||
150 | rdesc->first_seg = first; | ||
151 | rdesc->last_seg = last; | ||
152 | rdesc->particle_size = len; | ||
153 | rdesc->data_lo = lower_32_bits(data); | ||
154 | rdesc->data_hi = upper_32_bits(data); | ||
155 | |||
156 | return rdesc; | ||
157 | } | ||
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 771dd26c7076..427cbe012729 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <crypto/ctr.h> | 23 | #include <crypto/ctr.h> |
24 | #include <crypto/des.h> | 24 | #include <crypto/des.h> |
25 | #include <crypto/aes.h> | 25 | #include <crypto/aes.h> |
26 | #include <crypto/hmac.h> | ||
26 | #include <crypto/sha.h> | 27 | #include <crypto/sha.h> |
27 | #include <crypto/algapi.h> | 28 | #include <crypto/algapi.h> |
28 | #include <crypto/internal/aead.h> | 29 | #include <crypto/internal/aead.h> |
@@ -90,8 +91,6 @@ | |||
90 | #define CTL_FLAG_PERFORM_AEAD 0x0008 | 91 | #define CTL_FLAG_PERFORM_AEAD 0x0008 |
91 | #define CTL_FLAG_MASK 0x000f | 92 | #define CTL_FLAG_MASK 0x000f |
92 | 93 | ||
93 | #define HMAC_IPAD_VALUE 0x36 | ||
94 | #define HMAC_OPAD_VALUE 0x5C | ||
95 | #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE | 94 | #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE |
96 | 95 | ||
97 | #define MD5_DIGEST_SIZE 16 | 96 | #define MD5_DIGEST_SIZE 16 |
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 77c0fb936f47..e61b08566093 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * by the Free Software Foundation. | 12 | * by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/hmac.h> | ||
15 | #include <crypto/md5.h> | 16 | #include <crypto/md5.h> |
16 | #include <crypto/sha.h> | 17 | #include <crypto/sha.h> |
17 | 18 | ||
@@ -1164,8 +1165,8 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req, | |||
1164 | memcpy(opad, ipad, blocksize); | 1165 | memcpy(opad, ipad, blocksize); |
1165 | 1166 | ||
1166 | for (i = 0; i < blocksize; i++) { | 1167 | for (i = 0; i < blocksize; i++) { |
1167 | ipad[i] ^= 0x36; | 1168 | ipad[i] ^= HMAC_IPAD_VALUE; |
1168 | opad[i] ^= 0x5c; | 1169 | opad[i] ^= HMAC_OPAD_VALUE; |
1169 | } | 1170 | } |
1170 | 1171 | ||
1171 | return 0; | 1172 | return 0; |
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c index b6ecc288b540..000b6500a22d 100644 --- a/drivers/crypto/mediatek/mtk-platform.c +++ b/drivers/crypto/mediatek/mtk-platform.c | |||
@@ -504,19 +504,14 @@ static int mtk_crypto_probe(struct platform_device *pdev) | |||
504 | } | 504 | } |
505 | } | 505 | } |
506 | 506 | ||
507 | cryp->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); | ||
508 | cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); | 507 | cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); |
509 | if (IS_ERR(cryp->clk_ethif) || IS_ERR(cryp->clk_cryp)) | 508 | if (IS_ERR(cryp->clk_cryp)) |
510 | return -EPROBE_DEFER; | 509 | return -EPROBE_DEFER; |
511 | 510 | ||
512 | cryp->dev = &pdev->dev; | 511 | cryp->dev = &pdev->dev; |
513 | pm_runtime_enable(cryp->dev); | 512 | pm_runtime_enable(cryp->dev); |
514 | pm_runtime_get_sync(cryp->dev); | 513 | pm_runtime_get_sync(cryp->dev); |
515 | 514 | ||
516 | err = clk_prepare_enable(cryp->clk_ethif); | ||
517 | if (err) | ||
518 | goto err_clk_ethif; | ||
519 | |||
520 | err = clk_prepare_enable(cryp->clk_cryp); | 515 | err = clk_prepare_enable(cryp->clk_cryp); |
521 | if (err) | 516 | if (err) |
522 | goto err_clk_cryp; | 517 | goto err_clk_cryp; |
@@ -559,8 +554,6 @@ err_engine: | |||
559 | err_resource: | 554 | err_resource: |
560 | clk_disable_unprepare(cryp->clk_cryp); | 555 | clk_disable_unprepare(cryp->clk_cryp); |
561 | err_clk_cryp: | 556 | err_clk_cryp: |
562 | clk_disable_unprepare(cryp->clk_ethif); | ||
563 | err_clk_ethif: | ||
564 | pm_runtime_put_sync(cryp->dev); | 557 | pm_runtime_put_sync(cryp->dev); |
565 | pm_runtime_disable(cryp->dev); | 558 | pm_runtime_disable(cryp->dev); |
566 | 559 | ||
@@ -576,7 +569,6 @@ static int mtk_crypto_remove(struct platform_device *pdev) | |||
576 | mtk_desc_dma_free(cryp); | 569 | mtk_desc_dma_free(cryp); |
577 | 570 | ||
578 | clk_disable_unprepare(cryp->clk_cryp); | 571 | clk_disable_unprepare(cryp->clk_cryp); |
579 | clk_disable_unprepare(cryp->clk_ethif); | ||
580 | 572 | ||
581 | pm_runtime_put_sync(cryp->dev); | 573 | pm_runtime_put_sync(cryp->dev); |
582 | pm_runtime_disable(cryp->dev); | 574 | pm_runtime_disable(cryp->dev); |
@@ -596,7 +588,6 @@ static struct platform_driver mtk_crypto_driver = { | |||
596 | .remove = mtk_crypto_remove, | 588 | .remove = mtk_crypto_remove, |
597 | .driver = { | 589 | .driver = { |
598 | .name = "mtk-crypto", | 590 | .name = "mtk-crypto", |
599 | .owner = THIS_MODULE, | ||
600 | .of_match_table = of_crypto_id, | 591 | .of_match_table = of_crypto_id, |
601 | }, | 592 | }, |
602 | }; | 593 | }; |
diff --git a/drivers/crypto/mediatek/mtk-platform.h b/drivers/crypto/mediatek/mtk-platform.h index 303c152dc931..f0831f1742ab 100644 --- a/drivers/crypto/mediatek/mtk-platform.h +++ b/drivers/crypto/mediatek/mtk-platform.h | |||
@@ -200,7 +200,6 @@ struct mtk_sha_rec { | |||
200 | * struct mtk_cryp - Cryptographic device | 200 | * struct mtk_cryp - Cryptographic device |
201 | * @base: pointer to mapped register I/O base | 201 | * @base: pointer to mapped register I/O base |
202 | * @dev: pointer to device | 202 | * @dev: pointer to device |
203 | * @clk_ethif: pointer to ethif clock | ||
204 | * @clk_cryp: pointer to crypto clock | 203 | * @clk_cryp: pointer to crypto clock |
205 | * @irq: global system and rings IRQ | 204 | * @irq: global system and rings IRQ |
206 | * @ring: pointer to descriptor rings | 205 | * @ring: pointer to descriptor rings |
@@ -215,7 +214,6 @@ struct mtk_sha_rec { | |||
215 | struct mtk_cryp { | 214 | struct mtk_cryp { |
216 | void __iomem *base; | 215 | void __iomem *base; |
217 | struct device *dev; | 216 | struct device *dev; |
218 | struct clk *clk_ethif; | ||
219 | struct clk *clk_cryp; | 217 | struct clk *clk_cryp; |
220 | int irq[MTK_IRQ_NUM]; | 218 | int irq[MTK_IRQ_NUM]; |
221 | 219 | ||
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c index 2226f12d1c7a..5f4f845adbb8 100644 --- a/drivers/crypto/mediatek/mtk-sha.c +++ b/drivers/crypto/mediatek/mtk-sha.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * Some ideas are from atmel-sha.c and omap-sham.c drivers. | 12 | * Some ideas are from atmel-sha.c and omap-sham.c drivers. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <crypto/hmac.h> | ||
15 | #include <crypto/sha.h> | 16 | #include <crypto/sha.h> |
16 | #include "mtk-platform.h" | 17 | #include "mtk-platform.h" |
17 | 18 | ||
@@ -825,8 +826,8 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
825 | memcpy(bctx->opad, bctx->ipad, bs); | 826 | memcpy(bctx->opad, bctx->ipad, bs); |
826 | 827 | ||
827 | for (i = 0; i < bs; i++) { | 828 | for (i = 0; i < bs; i++) { |
828 | bctx->ipad[i] ^= 0x36; | 829 | bctx->ipad[i] ^= HMAC_IPAD_VALUE; |
829 | bctx->opad[i] ^= 0x5c; | 830 | bctx->opad[i] ^= HMAC_OPAD_VALUE; |
830 | } | 831 | } |
831 | 832 | ||
832 | return 0; | 833 | return 0; |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 451fa18c1c7b..bf25f415eea6 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/clk.h> | 20 | #include <linux/clk.h> |
21 | #include <crypto/hmac.h> | ||
21 | #include <crypto/internal/hash.h> | 22 | #include <crypto/internal/hash.h> |
22 | #include <crypto/sha.h> | 23 | #include <crypto/sha.h> |
23 | #include <linux/of.h> | 24 | #include <linux/of.h> |
@@ -822,8 +823,8 @@ static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, | |||
822 | memcpy(opad, ipad, bs); | 823 | memcpy(opad, ipad, bs); |
823 | 824 | ||
824 | for (i = 0; i < bs; i++) { | 825 | for (i = 0; i < bs; i++) { |
825 | ipad[i] ^= 0x36; | 826 | ipad[i] ^= HMAC_IPAD_VALUE; |
826 | opad[i] ^= 0x5c; | 827 | opad[i] ^= HMAC_OPAD_VALUE; |
827 | } | 828 | } |
828 | 829 | ||
829 | rc = crypto_shash_init(shash) ? : | 830 | rc = crypto_shash_init(shash) ? : |
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 4ecb77aa60e1..269451375b63 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c | |||
@@ -2169,7 +2169,7 @@ static int n2_mau_remove(struct platform_device *dev) | |||
2169 | return 0; | 2169 | return 0; |
2170 | } | 2170 | } |
2171 | 2171 | ||
2172 | static struct of_device_id n2_crypto_match[] = { | 2172 | static const struct of_device_id n2_crypto_match[] = { |
2173 | { | 2173 | { |
2174 | .name = "n2cp", | 2174 | .name = "n2cp", |
2175 | .compatible = "SUNW,n2-cwq", | 2175 | .compatible = "SUNW,n2-cwq", |
@@ -2196,7 +2196,7 @@ static struct platform_driver n2_crypto_driver = { | |||
2196 | .remove = n2_crypto_remove, | 2196 | .remove = n2_crypto_remove, |
2197 | }; | 2197 | }; |
2198 | 2198 | ||
2199 | static struct of_device_id n2_mau_match[] = { | 2199 | static const struct of_device_id n2_mau_match[] = { |
2200 | { | 2200 | { |
2201 | .name = "ncp", | 2201 | .name = "ncp", |
2202 | .compatible = "SUNW,n2-mau", | 2202 | .compatible = "SUNW,n2-mau", |
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c new file mode 100644 index 000000000000..7d4f8a4be6d8 --- /dev/null +++ b/drivers/crypto/omap-aes-gcm.c | |||
@@ -0,0 +1,408 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for OMAP AES GCM HW acceleration. | ||
5 | * | ||
6 | * Copyright (c) 2016 Texas Instruments Incorporated | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/errno.h> | ||
15 | #include <linux/scatterlist.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/dmaengine.h> | ||
18 | #include <linux/omap-dma.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <crypto/aes.h> | ||
21 | #include <crypto/scatterwalk.h> | ||
22 | #include <crypto/skcipher.h> | ||
23 | #include <crypto/internal/aead.h> | ||
24 | |||
25 | #include "omap-crypto.h" | ||
26 | #include "omap-aes.h" | ||
27 | |||
28 | static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, | ||
29 | struct aead_request *req); | ||
30 | |||
31 | static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret) | ||
32 | { | ||
33 | struct aead_request *req = dd->aead_req; | ||
34 | |||
35 | dd->flags &= ~FLAGS_BUSY; | ||
36 | dd->in_sg = NULL; | ||
37 | dd->out_sg = NULL; | ||
38 | |||
39 | req->base.complete(&req->base, ret); | ||
40 | } | ||
41 | |||
42 | static void omap_aes_gcm_done_task(struct omap_aes_dev *dd) | ||
43 | { | ||
44 | u8 *tag; | ||
45 | int alen, clen, i, ret = 0, nsg; | ||
46 | struct omap_aes_reqctx *rctx; | ||
47 | |||
48 | alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE); | ||
49 | clen = ALIGN(dd->total, AES_BLOCK_SIZE); | ||
50 | rctx = aead_request_ctx(dd->aead_req); | ||
51 | |||
52 | nsg = !!(dd->assoc_len && dd->total); | ||
53 | |||
54 | dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, | ||
55 | DMA_FROM_DEVICE); | ||
56 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); | ||
57 | dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE); | ||
58 | omap_aes_crypt_dma_stop(dd); | ||
59 | |||
60 | omap_crypto_cleanup(dd->out_sg, dd->orig_out, | ||
61 | dd->aead_req->assoclen, dd->total, | ||
62 | FLAGS_OUT_DATA_ST_SHIFT, dd->flags); | ||
63 | |||
64 | if (dd->flags & FLAGS_ENCRYPT) | ||
65 | scatterwalk_map_and_copy(rctx->auth_tag, | ||
66 | dd->aead_req->dst, | ||
67 | dd->total + dd->aead_req->assoclen, | ||
68 | dd->authsize, 1); | ||
69 | |||
70 | omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen, | ||
71 | FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags); | ||
72 | |||
73 | omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen, | ||
74 | FLAGS_IN_DATA_ST_SHIFT, dd->flags); | ||
75 | |||
76 | if (!(dd->flags & FLAGS_ENCRYPT)) { | ||
77 | tag = (u8 *)rctx->auth_tag; | ||
78 | for (i = 0; i < dd->authsize; i++) { | ||
79 | if (tag[i]) { | ||
80 | dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n"); | ||
81 | ret = -EBADMSG; | ||
82 | } | ||
83 | } | ||
84 | } | ||
85 | |||
86 | omap_aes_gcm_finish_req(dd, ret); | ||
87 | omap_aes_gcm_handle_queue(dd, NULL); | ||
88 | } | ||
89 | |||
90 | static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd, | ||
91 | struct aead_request *req) | ||
92 | { | ||
93 | int alen, clen, cryptlen, assoclen, ret; | ||
94 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
95 | unsigned int authlen = crypto_aead_authsize(aead); | ||
96 | struct scatterlist *tmp, sg_arr[2]; | ||
97 | int nsg; | ||
98 | u16 flags; | ||
99 | |||
100 | assoclen = req->assoclen; | ||
101 | cryptlen = req->cryptlen; | ||
102 | |||
103 | if (dd->flags & FLAGS_RFC4106_GCM) | ||
104 | assoclen -= 8; | ||
105 | |||
106 | if (!(dd->flags & FLAGS_ENCRYPT)) | ||
107 | cryptlen -= authlen; | ||
108 | |||
109 | alen = ALIGN(assoclen, AES_BLOCK_SIZE); | ||
110 | clen = ALIGN(cryptlen, AES_BLOCK_SIZE); | ||
111 | |||
112 | nsg = !!(assoclen && cryptlen); | ||
113 | |||
114 | omap_aes_clear_copy_flags(dd); | ||
115 | |||
116 | sg_init_table(dd->in_sgl, nsg + 1); | ||
117 | if (assoclen) { | ||
118 | tmp = req->src; | ||
119 | ret = omap_crypto_align_sg(&tmp, assoclen, | ||
120 | AES_BLOCK_SIZE, dd->in_sgl, | ||
121 | OMAP_CRYPTO_COPY_DATA | | ||
122 | OMAP_CRYPTO_ZERO_BUF | | ||
123 | OMAP_CRYPTO_FORCE_SINGLE_ENTRY, | ||
124 | FLAGS_ASSOC_DATA_ST_SHIFT, | ||
125 | &dd->flags); | ||
126 | } | ||
127 | |||
128 | if (cryptlen) { | ||
129 | tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen); | ||
130 | |||
131 | ret = omap_crypto_align_sg(&tmp, cryptlen, | ||
132 | AES_BLOCK_SIZE, &dd->in_sgl[nsg], | ||
133 | OMAP_CRYPTO_COPY_DATA | | ||
134 | OMAP_CRYPTO_ZERO_BUF | | ||
135 | OMAP_CRYPTO_FORCE_SINGLE_ENTRY, | ||
136 | FLAGS_IN_DATA_ST_SHIFT, | ||
137 | &dd->flags); | ||
138 | } | ||
139 | |||
140 | dd->in_sg = dd->in_sgl; | ||
141 | dd->total = cryptlen; | ||
142 | dd->assoc_len = assoclen; | ||
143 | dd->authsize = authlen; | ||
144 | |||
145 | dd->out_sg = req->dst; | ||
146 | dd->orig_out = req->dst; | ||
147 | |||
148 | dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen); | ||
149 | |||
150 | flags = 0; | ||
151 | if (req->src == req->dst || dd->out_sg == sg_arr) | ||
152 | flags |= OMAP_CRYPTO_FORCE_COPY; | ||
153 | |||
154 | ret = omap_crypto_align_sg(&dd->out_sg, cryptlen, | ||
155 | AES_BLOCK_SIZE, &dd->out_sgl, | ||
156 | flags, | ||
157 | FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); | ||
158 | if (ret) | ||
159 | return ret; | ||
160 | |||
161 | dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen); | ||
162 | dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen); | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static void omap_aes_gcm_complete(struct crypto_async_request *req, int err) | ||
168 | { | ||
169 | struct omap_aes_gcm_result *res = req->data; | ||
170 | |||
171 | if (err == -EINPROGRESS) | ||
172 | return; | ||
173 | |||
174 | res->err = err; | ||
175 | complete(&res->completion); | ||
176 | } | ||
177 | |||
178 | static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv) | ||
179 | { | ||
180 | struct scatterlist iv_sg, tag_sg; | ||
181 | struct skcipher_request *sk_req; | ||
182 | struct omap_aes_gcm_result result; | ||
183 | struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
184 | int ret = 0; | ||
185 | |||
186 | sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL); | ||
187 | if (!sk_req) { | ||
188 | pr_err("skcipher: Failed to allocate request\n"); | ||
189 | return -1; | ||
190 | } | ||
191 | |||
192 | init_completion(&result.completion); | ||
193 | |||
194 | sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE); | ||
195 | sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE); | ||
196 | skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
197 | omap_aes_gcm_complete, &result); | ||
198 | ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen); | ||
199 | skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE, | ||
200 | NULL); | ||
201 | ret = crypto_skcipher_encrypt(sk_req); | ||
202 | switch (ret) { | ||
203 | case 0: | ||
204 | break; | ||
205 | case -EINPROGRESS: | ||
206 | case -EBUSY: | ||
207 | ret = wait_for_completion_interruptible(&result.completion); | ||
208 | if (!ret) { | ||
209 | ret = result.err; | ||
210 | if (!ret) { | ||
211 | reinit_completion(&result.completion); | ||
212 | break; | ||
213 | } | ||
214 | } | ||
215 | /* fall through */ | ||
216 | default: | ||
217 | pr_err("Encryption of IV failed for GCM mode"); | ||
218 | break; | ||
219 | } | ||
220 | |||
221 | skcipher_request_free(sk_req); | ||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | void omap_aes_gcm_dma_out_callback(void *data) | ||
226 | { | ||
227 | struct omap_aes_dev *dd = data; | ||
228 | struct omap_aes_reqctx *rctx; | ||
229 | int i, val; | ||
230 | u32 *auth_tag, tag[4]; | ||
231 | |||
232 | if (!(dd->flags & FLAGS_ENCRYPT)) | ||
233 | scatterwalk_map_and_copy(tag, dd->aead_req->src, | ||
234 | dd->total + dd->aead_req->assoclen, | ||
235 | dd->authsize, 0); | ||
236 | |||
237 | rctx = aead_request_ctx(dd->aead_req); | ||
238 | auth_tag = (u32 *)rctx->auth_tag; | ||
239 | for (i = 0; i < 4; i++) { | ||
240 | val = omap_aes_read(dd, AES_REG_TAG_N(dd, i)); | ||
241 | auth_tag[i] = val ^ auth_tag[i]; | ||
242 | if (!(dd->flags & FLAGS_ENCRYPT)) | ||
243 | auth_tag[i] = auth_tag[i] ^ tag[i]; | ||
244 | } | ||
245 | |||
246 | omap_aes_gcm_done_task(dd); | ||
247 | } | ||
248 | |||
249 | static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd, | ||
250 | struct aead_request *req) | ||
251 | { | ||
252 | struct omap_aes_ctx *ctx; | ||
253 | struct aead_request *backlog; | ||
254 | struct omap_aes_reqctx *rctx; | ||
255 | unsigned long flags; | ||
256 | int err, ret = 0; | ||
257 | |||
258 | spin_lock_irqsave(&dd->lock, flags); | ||
259 | if (req) | ||
260 | ret = aead_enqueue_request(&dd->aead_queue, req); | ||
261 | if (dd->flags & FLAGS_BUSY) { | ||
262 | spin_unlock_irqrestore(&dd->lock, flags); | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | backlog = aead_get_backlog(&dd->aead_queue); | ||
267 | req = aead_dequeue_request(&dd->aead_queue); | ||
268 | if (req) | ||
269 | dd->flags |= FLAGS_BUSY; | ||
270 | spin_unlock_irqrestore(&dd->lock, flags); | ||
271 | |||
272 | if (!req) | ||
273 | return ret; | ||
274 | |||
275 | if (backlog) | ||
276 | backlog->base.complete(&backlog->base, -EINPROGRESS); | ||
277 | |||
278 | ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
279 | rctx = aead_request_ctx(req); | ||
280 | |||
281 | dd->ctx = ctx; | ||
282 | rctx->dd = dd; | ||
283 | dd->aead_req = req; | ||
284 | |||
285 | rctx->mode &= FLAGS_MODE_MASK; | ||
286 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; | ||
287 | |||
288 | err = omap_aes_gcm_copy_buffers(dd, req); | ||
289 | if (err) | ||
290 | return err; | ||
291 | |||
292 | err = omap_aes_write_ctrl(dd); | ||
293 | if (!err) | ||
294 | err = omap_aes_crypt_dma_start(dd); | ||
295 | |||
296 | if (err) { | ||
297 | omap_aes_gcm_finish_req(dd, err); | ||
298 | omap_aes_gcm_handle_queue(dd, NULL); | ||
299 | } | ||
300 | |||
301 | return ret; | ||
302 | } | ||
303 | |||
304 | static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode) | ||
305 | { | ||
306 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); | ||
307 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
308 | unsigned int authlen = crypto_aead_authsize(aead); | ||
309 | struct omap_aes_dev *dd; | ||
310 | __be32 counter = cpu_to_be32(1); | ||
311 | int err, assoclen; | ||
312 | |||
313 | memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag)); | ||
314 | memcpy(rctx->iv + 12, &counter, 4); | ||
315 | |||
316 | err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv); | ||
317 | if (err) | ||
318 | return err; | ||
319 | |||
320 | if (mode & FLAGS_RFC4106_GCM) | ||
321 | assoclen = req->assoclen - 8; | ||
322 | else | ||
323 | assoclen = req->assoclen; | ||
324 | if (assoclen + req->cryptlen == 0) { | ||
325 | scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen, | ||
326 | 1); | ||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | dd = omap_aes_find_dev(rctx); | ||
331 | if (!dd) | ||
332 | return -ENODEV; | ||
333 | rctx->mode = mode; | ||
334 | |||
335 | return omap_aes_gcm_handle_queue(dd, req); | ||
336 | } | ||
337 | |||
338 | int omap_aes_gcm_encrypt(struct aead_request *req) | ||
339 | { | ||
340 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); | ||
341 | |||
342 | memcpy(rctx->iv, req->iv, 12); | ||
343 | return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM); | ||
344 | } | ||
345 | |||
346 | int omap_aes_gcm_decrypt(struct aead_request *req) | ||
347 | { | ||
348 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); | ||
349 | |||
350 | memcpy(rctx->iv, req->iv, 12); | ||
351 | return omap_aes_gcm_crypt(req, FLAGS_GCM); | ||
352 | } | ||
353 | |||
354 | int omap_aes_4106gcm_encrypt(struct aead_request *req) | ||
355 | { | ||
356 | struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
357 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); | ||
358 | |||
359 | memcpy(rctx->iv, ctx->nonce, 4); | ||
360 | memcpy(rctx->iv + 4, req->iv, 8); | ||
361 | return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM | | ||
362 | FLAGS_RFC4106_GCM); | ||
363 | } | ||
364 | |||
365 | int omap_aes_4106gcm_decrypt(struct aead_request *req) | ||
366 | { | ||
367 | struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); | ||
368 | struct omap_aes_reqctx *rctx = aead_request_ctx(req); | ||
369 | |||
370 | memcpy(rctx->iv, ctx->nonce, 4); | ||
371 | memcpy(rctx->iv + 4, req->iv, 8); | ||
372 | return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM); | ||
373 | } | ||
374 | |||
375 | int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, | ||
376 | unsigned int keylen) | ||
377 | { | ||
378 | struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); | ||
379 | |||
380 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | ||
381 | keylen != AES_KEYSIZE_256) | ||
382 | return -EINVAL; | ||
383 | |||
384 | memcpy(ctx->key, key, keylen); | ||
385 | ctx->keylen = keylen; | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key, | ||
391 | unsigned int keylen) | ||
392 | { | ||
393 | struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); | ||
394 | |||
395 | if (keylen < 4) | ||
396 | return -EINVAL; | ||
397 | |||
398 | keylen -= 4; | ||
399 | if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && | ||
400 | keylen != AES_KEYSIZE_256) | ||
401 | return -EINVAL; | ||
402 | |||
403 | memcpy(ctx->key, key, keylen); | ||
404 | memcpy(ctx->nonce, key + keylen, 4); | ||
405 | ctx->keylen = keylen; | ||
406 | |||
407 | return 0; | ||
408 | } | ||
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index fe32dd95ae4f..5120a17731d0 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
@@ -37,155 +37,10 @@ | |||
37 | #include <crypto/aes.h> | 37 | #include <crypto/aes.h> |
38 | #include <crypto/engine.h> | 38 | #include <crypto/engine.h> |
39 | #include <crypto/internal/skcipher.h> | 39 | #include <crypto/internal/skcipher.h> |
40 | #include <crypto/internal/aead.h> | ||
40 | 41 | ||
41 | #define DST_MAXBURST 4 | 42 | #include "omap-crypto.h" |
42 | #define DMA_MIN (DST_MAXBURST * sizeof(u32)) | 43 | #include "omap-aes.h" |
43 | |||
44 | #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) | ||
45 | |||
46 | /* OMAP TRM gives bitfields as start:end, where start is the higher bit | ||
47 | number. For example 7:0 */ | ||
48 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) | ||
49 | #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) | ||
50 | |||
51 | #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ | ||
52 | ((x ^ 0x01) * 0x04)) | ||
53 | #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) | ||
54 | |||
55 | #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) | ||
56 | #define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7) | ||
57 | #define AES_REG_CTRL_CTR_WIDTH_32 0 | ||
58 | #define AES_REG_CTRL_CTR_WIDTH_64 BIT(7) | ||
59 | #define AES_REG_CTRL_CTR_WIDTH_96 BIT(8) | ||
60 | #define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7) | ||
61 | #define AES_REG_CTRL_CTR BIT(6) | ||
62 | #define AES_REG_CTRL_CBC BIT(5) | ||
63 | #define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3) | ||
64 | #define AES_REG_CTRL_DIRECTION BIT(2) | ||
65 | #define AES_REG_CTRL_INPUT_READY BIT(1) | ||
66 | #define AES_REG_CTRL_OUTPUT_READY BIT(0) | ||
67 | #define AES_REG_CTRL_MASK GENMASK(24, 2) | ||
68 | |||
69 | #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) | ||
70 | |||
71 | #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) | ||
72 | |||
73 | #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) | ||
74 | #define AES_REG_MASK_SIDLE BIT(6) | ||
75 | #define AES_REG_MASK_START BIT(5) | ||
76 | #define AES_REG_MASK_DMA_OUT_EN BIT(3) | ||
77 | #define AES_REG_MASK_DMA_IN_EN BIT(2) | ||
78 | #define AES_REG_MASK_SOFTRESET BIT(1) | ||
79 | #define AES_REG_AUTOIDLE BIT(0) | ||
80 | |||
81 | #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) | ||
82 | |||
83 | #define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) | ||
84 | #define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) | ||
85 | #define AES_REG_IRQ_DATA_IN BIT(1) | ||
86 | #define AES_REG_IRQ_DATA_OUT BIT(2) | ||
87 | #define DEFAULT_TIMEOUT (5*HZ) | ||
88 | |||
89 | #define DEFAULT_AUTOSUSPEND_DELAY 1000 | ||
90 | |||
91 | #define FLAGS_MODE_MASK 0x000f | ||
92 | #define FLAGS_ENCRYPT BIT(0) | ||
93 | #define FLAGS_CBC BIT(1) | ||
94 | #define FLAGS_GIV BIT(2) | ||
95 | #define FLAGS_CTR BIT(3) | ||
96 | |||
97 | #define FLAGS_INIT BIT(4) | ||
98 | #define FLAGS_FAST BIT(5) | ||
99 | #define FLAGS_BUSY BIT(6) | ||
100 | |||
101 | #define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2) | ||
102 | |||
103 | struct omap_aes_ctx { | ||
104 | struct omap_aes_dev *dd; | ||
105 | |||
106 | int keylen; | ||
107 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | ||
108 | unsigned long flags; | ||
109 | struct crypto_skcipher *fallback; | ||
110 | }; | ||
111 | |||
112 | struct omap_aes_reqctx { | ||
113 | unsigned long mode; | ||
114 | }; | ||
115 | |||
116 | #define OMAP_AES_QUEUE_LENGTH 1 | ||
117 | #define OMAP_AES_CACHE_SIZE 0 | ||
118 | |||
119 | struct omap_aes_algs_info { | ||
120 | struct crypto_alg *algs_list; | ||
121 | unsigned int size; | ||
122 | unsigned int registered; | ||
123 | }; | ||
124 | |||
125 | struct omap_aes_pdata { | ||
126 | struct omap_aes_algs_info *algs_info; | ||
127 | unsigned int algs_info_size; | ||
128 | |||
129 | void (*trigger)(struct omap_aes_dev *dd, int length); | ||
130 | |||
131 | u32 key_ofs; | ||
132 | u32 iv_ofs; | ||
133 | u32 ctrl_ofs; | ||
134 | u32 data_ofs; | ||
135 | u32 rev_ofs; | ||
136 | u32 mask_ofs; | ||
137 | u32 irq_enable_ofs; | ||
138 | u32 irq_status_ofs; | ||
139 | |||
140 | u32 dma_enable_in; | ||
141 | u32 dma_enable_out; | ||
142 | u32 dma_start; | ||
143 | |||
144 | u32 major_mask; | ||
145 | u32 major_shift; | ||
146 | u32 minor_mask; | ||
147 | u32 minor_shift; | ||
148 | }; | ||
149 | |||
150 | struct omap_aes_dev { | ||
151 | struct list_head list; | ||
152 | unsigned long phys_base; | ||
153 | void __iomem *io_base; | ||
154 | struct omap_aes_ctx *ctx; | ||
155 | struct device *dev; | ||
156 | unsigned long flags; | ||
157 | int err; | ||
158 | |||
159 | struct tasklet_struct done_task; | ||
160 | |||
161 | struct ablkcipher_request *req; | ||
162 | struct crypto_engine *engine; | ||
163 | |||
164 | /* | ||
165 | * total is used by PIO mode for book keeping so introduce | ||
166 | * variable total_save as need it to calc page_order | ||
167 | */ | ||
168 | size_t total; | ||
169 | size_t total_save; | ||
170 | |||
171 | struct scatterlist *in_sg; | ||
172 | struct scatterlist *out_sg; | ||
173 | |||
174 | /* Buffers for copying for unaligned cases */ | ||
175 | struct scatterlist in_sgl; | ||
176 | struct scatterlist out_sgl; | ||
177 | struct scatterlist *orig_out; | ||
178 | int sgs_copied; | ||
179 | |||
180 | struct scatter_walk in_walk; | ||
181 | struct scatter_walk out_walk; | ||
182 | struct dma_chan *dma_lch_in; | ||
183 | struct dma_chan *dma_lch_out; | ||
184 | int in_sg_len; | ||
185 | int out_sg_len; | ||
186 | int pio_only; | ||
187 | const struct omap_aes_pdata *pdata; | ||
188 | }; | ||
189 | 44 | ||
190 | /* keep registered devices data here */ | 45 | /* keep registered devices data here */ |
191 | static LIST_HEAD(dev_list); | 46 | static LIST_HEAD(dev_list); |
@@ -201,7 +56,7 @@ static DEFINE_SPINLOCK(list_lock); | |||
201 | _read_ret; \ | 56 | _read_ret; \ |
202 | }) | 57 | }) |
203 | #else | 58 | #else |
204 | static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) | 59 | inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) |
205 | { | 60 | { |
206 | return __raw_readl(dd->io_base + offset); | 61 | return __raw_readl(dd->io_base + offset); |
207 | } | 62 | } |
@@ -215,7 +70,7 @@ static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) | |||
215 | __raw_writel(value, dd->io_base + offset); \ | 70 | __raw_writel(value, dd->io_base + offset); \ |
216 | } while (0) | 71 | } while (0) |
217 | #else | 72 | #else |
218 | static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, | 73 | inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, |
219 | u32 value) | 74 | u32 value) |
220 | { | 75 | { |
221 | __raw_writel(value, dd->io_base + offset); | 76 | __raw_writel(value, dd->io_base + offset); |
@@ -258,8 +113,16 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) | |||
258 | return 0; | 113 | return 0; |
259 | } | 114 | } |
260 | 115 | ||
261 | static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | 116 | void omap_aes_clear_copy_flags(struct omap_aes_dev *dd) |
117 | { | ||
118 | dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_IN_DATA_ST_SHIFT); | ||
119 | dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_OUT_DATA_ST_SHIFT); | ||
120 | dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_ASSOC_DATA_ST_SHIFT); | ||
121 | } | ||
122 | |||
123 | int omap_aes_write_ctrl(struct omap_aes_dev *dd) | ||
262 | { | 124 | { |
125 | struct omap_aes_reqctx *rctx; | ||
263 | unsigned int key32; | 126 | unsigned int key32; |
264 | int i, err; | 127 | int i, err; |
265 | u32 val; | 128 | u32 val; |
@@ -270,7 +133,11 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | |||
270 | 133 | ||
271 | key32 = dd->ctx->keylen / sizeof(u32); | 134 | key32 = dd->ctx->keylen / sizeof(u32); |
272 | 135 | ||
273 | /* it seems a key should always be set even if it has not changed */ | 136 | /* RESET the key as previous HASH keys should not get affected*/ |
137 | if (dd->flags & FLAGS_GCM) | ||
138 | for (i = 0; i < 0x40; i = i + 4) | ||
139 | omap_aes_write(dd, i, 0x0); | ||
140 | |||
274 | for (i = 0; i < key32; i++) { | 141 | for (i = 0; i < key32; i++) { |
275 | omap_aes_write(dd, AES_REG_KEY(dd, i), | 142 | omap_aes_write(dd, AES_REG_KEY(dd, i), |
276 | __le32_to_cpu(dd->ctx->key[i])); | 143 | __le32_to_cpu(dd->ctx->key[i])); |
@@ -279,12 +146,21 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | |||
279 | if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info) | 146 | if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info) |
280 | omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4); | 147 | omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4); |
281 | 148 | ||
149 | if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) { | ||
150 | rctx = aead_request_ctx(dd->aead_req); | ||
151 | omap_aes_write_n(dd, AES_REG_IV(dd, 0), (u32 *)rctx->iv, 4); | ||
152 | } | ||
153 | |||
282 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); | 154 | val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); |
283 | if (dd->flags & FLAGS_CBC) | 155 | if (dd->flags & FLAGS_CBC) |
284 | val |= AES_REG_CTRL_CBC; | 156 | val |= AES_REG_CTRL_CBC; |
285 | if (dd->flags & FLAGS_CTR) | 157 | |
158 | if (dd->flags & (FLAGS_CTR | FLAGS_GCM)) | ||
286 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; | 159 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; |
287 | 160 | ||
161 | if (dd->flags & FLAGS_GCM) | ||
162 | val |= AES_REG_CTRL_GCM; | ||
163 | |||
288 | if (dd->flags & FLAGS_ENCRYPT) | 164 | if (dd->flags & FLAGS_ENCRYPT) |
289 | val |= AES_REG_CTRL_DIRECTION; | 165 | val |= AES_REG_CTRL_DIRECTION; |
290 | 166 | ||
@@ -315,6 +191,8 @@ static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length) | |||
315 | { | 191 | { |
316 | omap_aes_write(dd, AES_REG_LENGTH_N(0), length); | 192 | omap_aes_write(dd, AES_REG_LENGTH_N(0), length); |
317 | omap_aes_write(dd, AES_REG_LENGTH_N(1), 0); | 193 | omap_aes_write(dd, AES_REG_LENGTH_N(1), 0); |
194 | if (dd->flags & FLAGS_GCM) | ||
195 | omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len); | ||
318 | 196 | ||
319 | omap_aes_dma_trigger_omap2(dd, length); | 197 | omap_aes_dma_trigger_omap2(dd, length); |
320 | } | 198 | } |
@@ -329,14 +207,14 @@ static void omap_aes_dma_stop(struct omap_aes_dev *dd) | |||
329 | omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask); | 207 | omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask); |
330 | } | 208 | } |
331 | 209 | ||
332 | static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) | 210 | struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_reqctx *rctx) |
333 | { | 211 | { |
334 | struct omap_aes_dev *dd; | 212 | struct omap_aes_dev *dd; |
335 | 213 | ||
336 | spin_lock_bh(&list_lock); | 214 | spin_lock_bh(&list_lock); |
337 | dd = list_first_entry(&dev_list, struct omap_aes_dev, list); | 215 | dd = list_first_entry(&dev_list, struct omap_aes_dev, list); |
338 | list_move_tail(&dd->list, &dev_list); | 216 | list_move_tail(&dd->list, &dev_list); |
339 | ctx->dd = dd; | 217 | rctx->dd = dd; |
340 | spin_unlock_bh(&list_lock); | 218 | spin_unlock_bh(&list_lock); |
341 | 219 | ||
342 | return dd; | 220 | return dd; |
@@ -387,26 +265,11 @@ static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) | |||
387 | dma_release_channel(dd->dma_lch_in); | 265 | dma_release_channel(dd->dma_lch_in); |
388 | } | 266 | } |
389 | 267 | ||
390 | static void sg_copy_buf(void *buf, struct scatterlist *sg, | 268 | static int omap_aes_crypt_dma(struct omap_aes_dev *dd, |
391 | unsigned int start, unsigned int nbytes, int out) | 269 | struct scatterlist *in_sg, |
270 | struct scatterlist *out_sg, | ||
271 | int in_sg_len, int out_sg_len) | ||
392 | { | 272 | { |
393 | struct scatter_walk walk; | ||
394 | |||
395 | if (!nbytes) | ||
396 | return; | ||
397 | |||
398 | scatterwalk_start(&walk, sg); | ||
399 | scatterwalk_advance(&walk, start); | ||
400 | scatterwalk_copychunks(buf, &walk, nbytes, out); | ||
401 | scatterwalk_done(&walk, out, 0); | ||
402 | } | ||
403 | |||
404 | static int omap_aes_crypt_dma(struct crypto_tfm *tfm, | ||
405 | struct scatterlist *in_sg, struct scatterlist *out_sg, | ||
406 | int in_sg_len, int out_sg_len) | ||
407 | { | ||
408 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); | ||
409 | struct omap_aes_dev *dd = ctx->dd; | ||
410 | struct dma_async_tx_descriptor *tx_in, *tx_out; | 273 | struct dma_async_tx_descriptor *tx_in, *tx_out; |
411 | struct dma_slave_config cfg; | 274 | struct dma_slave_config cfg; |
412 | int ret; | 275 | int ret; |
@@ -467,7 +330,10 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, | |||
467 | return -EINVAL; | 330 | return -EINVAL; |
468 | } | 331 | } |
469 | 332 | ||
470 | tx_out->callback = omap_aes_dma_out_callback; | 333 | if (dd->flags & FLAGS_GCM) |
334 | tx_out->callback = omap_aes_gcm_dma_out_callback; | ||
335 | else | ||
336 | tx_out->callback = omap_aes_dma_out_callback; | ||
471 | tx_out->callback_param = dd; | 337 | tx_out->callback_param = dd; |
472 | 338 | ||
473 | dmaengine_submit(tx_in); | 339 | dmaengine_submit(tx_in); |
@@ -482,10 +348,8 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, | |||
482 | return 0; | 348 | return 0; |
483 | } | 349 | } |
484 | 350 | ||
485 | static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | 351 | int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) |
486 | { | 352 | { |
487 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm( | ||
488 | crypto_ablkcipher_reqtfm(dd->req)); | ||
489 | int err; | 353 | int err; |
490 | 354 | ||
491 | pr_debug("total: %d\n", dd->total); | 355 | pr_debug("total: %d\n", dd->total); |
@@ -506,7 +370,7 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) | |||
506 | } | 370 | } |
507 | } | 371 | } |
508 | 372 | ||
509 | err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len, | 373 | err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len, |
510 | dd->out_sg_len); | 374 | dd->out_sg_len); |
511 | if (err && !dd->pio_only) { | 375 | if (err && !dd->pio_only) { |
512 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); | 376 | dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); |
@@ -529,7 +393,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) | |||
529 | pm_runtime_put_autosuspend(dd->dev); | 393 | pm_runtime_put_autosuspend(dd->dev); |
530 | } | 394 | } |
531 | 395 | ||
532 | static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | 396 | int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) |
533 | { | 397 | { |
534 | pr_debug("total: %d\n", dd->total); | 398 | pr_debug("total: %d\n", dd->total); |
535 | 399 | ||
@@ -539,62 +403,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | |||
539 | return 0; | 403 | return 0; |
540 | } | 404 | } |
541 | 405 | ||
542 | static int omap_aes_check_aligned(struct scatterlist *sg, int total) | ||
543 | { | ||
544 | int len = 0; | ||
545 | |||
546 | if (!IS_ALIGNED(total, AES_BLOCK_SIZE)) | ||
547 | return -EINVAL; | ||
548 | |||
549 | while (sg) { | ||
550 | if (!IS_ALIGNED(sg->offset, 4)) | ||
551 | return -1; | ||
552 | if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) | ||
553 | return -1; | ||
554 | |||
555 | len += sg->length; | ||
556 | sg = sg_next(sg); | ||
557 | } | ||
558 | |||
559 | if (len != total) | ||
560 | return -1; | ||
561 | |||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | static int omap_aes_copy_sgs(struct omap_aes_dev *dd) | ||
566 | { | ||
567 | void *buf_in, *buf_out; | ||
568 | int pages, total; | ||
569 | |||
570 | total = ALIGN(dd->total, AES_BLOCK_SIZE); | ||
571 | pages = get_order(total); | ||
572 | |||
573 | buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
574 | buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
575 | |||
576 | if (!buf_in || !buf_out) { | ||
577 | pr_err("Couldn't allocated pages for unaligned cases.\n"); | ||
578 | return -1; | ||
579 | } | ||
580 | |||
581 | dd->orig_out = dd->out_sg; | ||
582 | |||
583 | sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); | ||
584 | |||
585 | sg_init_table(&dd->in_sgl, 1); | ||
586 | sg_set_buf(&dd->in_sgl, buf_in, total); | ||
587 | dd->in_sg = &dd->in_sgl; | ||
588 | dd->in_sg_len = 1; | ||
589 | |||
590 | sg_init_table(&dd->out_sgl, 1); | ||
591 | sg_set_buf(&dd->out_sgl, buf_out, total); | ||
592 | dd->out_sg = &dd->out_sgl; | ||
593 | dd->out_sg_len = 1; | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, | 406 | static int omap_aes_handle_queue(struct omap_aes_dev *dd, |
599 | struct ablkcipher_request *req) | 407 | struct ablkcipher_request *req) |
600 | { | 408 | { |
@@ -609,8 +417,10 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, | |||
609 | { | 417 | { |
610 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( | 418 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( |
611 | crypto_ablkcipher_reqtfm(req)); | 419 | crypto_ablkcipher_reqtfm(req)); |
612 | struct omap_aes_dev *dd = ctx->dd; | 420 | struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); |
613 | struct omap_aes_reqctx *rctx; | 421 | struct omap_aes_dev *dd = rctx->dd; |
422 | int ret; | ||
423 | u16 flags; | ||
614 | 424 | ||
615 | if (!dd) | 425 | if (!dd) |
616 | return -ENODEV; | 426 | return -ENODEV; |
@@ -621,6 +431,23 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, | |||
621 | dd->total_save = req->nbytes; | 431 | dd->total_save = req->nbytes; |
622 | dd->in_sg = req->src; | 432 | dd->in_sg = req->src; |
623 | dd->out_sg = req->dst; | 433 | dd->out_sg = req->dst; |
434 | dd->orig_out = req->dst; | ||
435 | |||
436 | flags = OMAP_CRYPTO_COPY_DATA; | ||
437 | if (req->src == req->dst) | ||
438 | flags |= OMAP_CRYPTO_FORCE_COPY; | ||
439 | |||
440 | ret = omap_crypto_align_sg(&dd->in_sg, dd->total, AES_BLOCK_SIZE, | ||
441 | dd->in_sgl, flags, | ||
442 | FLAGS_IN_DATA_ST_SHIFT, &dd->flags); | ||
443 | if (ret) | ||
444 | return ret; | ||
445 | |||
446 | ret = omap_crypto_align_sg(&dd->out_sg, dd->total, AES_BLOCK_SIZE, | ||
447 | &dd->out_sgl, 0, | ||
448 | FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); | ||
449 | if (ret) | ||
450 | return ret; | ||
624 | 451 | ||
625 | dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total); | 452 | dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total); |
626 | if (dd->in_sg_len < 0) | 453 | if (dd->in_sg_len < 0) |
@@ -630,22 +457,11 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, | |||
630 | if (dd->out_sg_len < 0) | 457 | if (dd->out_sg_len < 0) |
631 | return dd->out_sg_len; | 458 | return dd->out_sg_len; |
632 | 459 | ||
633 | if (omap_aes_check_aligned(dd->in_sg, dd->total) || | ||
634 | omap_aes_check_aligned(dd->out_sg, dd->total)) { | ||
635 | if (omap_aes_copy_sgs(dd)) | ||
636 | pr_err("Failed to copy SGs for unaligned cases\n"); | ||
637 | dd->sgs_copied = 1; | ||
638 | } else { | ||
639 | dd->sgs_copied = 0; | ||
640 | } | ||
641 | |||
642 | rctx = ablkcipher_request_ctx(req); | ||
643 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | ||
644 | rctx->mode &= FLAGS_MODE_MASK; | 460 | rctx->mode &= FLAGS_MODE_MASK; |
645 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; | 461 | dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; |
646 | 462 | ||
647 | dd->ctx = ctx; | 463 | dd->ctx = ctx; |
648 | ctx->dd = dd; | 464 | rctx->dd = dd; |
649 | 465 | ||
650 | return omap_aes_write_ctrl(dd); | 466 | return omap_aes_write_ctrl(dd); |
651 | } | 467 | } |
@@ -653,9 +469,8 @@ static int omap_aes_prepare_req(struct crypto_engine *engine, | |||
653 | static int omap_aes_crypt_req(struct crypto_engine *engine, | 469 | static int omap_aes_crypt_req(struct crypto_engine *engine, |
654 | struct ablkcipher_request *req) | 470 | struct ablkcipher_request *req) |
655 | { | 471 | { |
656 | struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( | 472 | struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); |
657 | crypto_ablkcipher_reqtfm(req)); | 473 | struct omap_aes_dev *dd = rctx->dd; |
658 | struct omap_aes_dev *dd = ctx->dd; | ||
659 | 474 | ||
660 | if (!dd) | 475 | if (!dd) |
661 | return -ENODEV; | 476 | return -ENODEV; |
@@ -666,8 +481,6 @@ static int omap_aes_crypt_req(struct crypto_engine *engine, | |||
666 | static void omap_aes_done_task(unsigned long data) | 481 | static void omap_aes_done_task(unsigned long data) |
667 | { | 482 | { |
668 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; | 483 | struct omap_aes_dev *dd = (struct omap_aes_dev *)data; |
669 | void *buf_in, *buf_out; | ||
670 | int pages, len; | ||
671 | 484 | ||
672 | pr_debug("enter done_task\n"); | 485 | pr_debug("enter done_task\n"); |
673 | 486 | ||
@@ -680,17 +493,11 @@ static void omap_aes_done_task(unsigned long data) | |||
680 | omap_aes_crypt_dma_stop(dd); | 493 | omap_aes_crypt_dma_stop(dd); |
681 | } | 494 | } |
682 | 495 | ||
683 | if (dd->sgs_copied) { | 496 | omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save, |
684 | buf_in = sg_virt(&dd->in_sgl); | 497 | FLAGS_IN_DATA_ST_SHIFT, dd->flags); |
685 | buf_out = sg_virt(&dd->out_sgl); | ||
686 | 498 | ||
687 | sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); | 499 | omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save, |
688 | 500 | FLAGS_OUT_DATA_ST_SHIFT, dd->flags); | |
689 | len = ALIGN(dd->total_save, AES_BLOCK_SIZE); | ||
690 | pages = get_order(len); | ||
691 | free_pages((unsigned long)buf_in, pages); | ||
692 | free_pages((unsigned long)buf_out, pages); | ||
693 | } | ||
694 | 501 | ||
695 | omap_aes_finish_req(dd, 0); | 502 | omap_aes_finish_req(dd, 0); |
696 | 503 | ||
@@ -726,7 +533,7 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
726 | skcipher_request_zero(subreq); | 533 | skcipher_request_zero(subreq); |
727 | return ret; | 534 | return ret; |
728 | } | 535 | } |
729 | dd = omap_aes_find_dev(ctx); | 536 | dd = omap_aes_find_dev(rctx); |
730 | if (!dd) | 537 | if (!dd) |
731 | return -ENODEV; | 538 | return -ENODEV; |
732 | 539 | ||
@@ -811,6 +618,36 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm) | |||
811 | return 0; | 618 | return 0; |
812 | } | 619 | } |
813 | 620 | ||
621 | static int omap_aes_gcm_cra_init(struct crypto_aead *tfm) | ||
622 | { | ||
623 | struct omap_aes_dev *dd = NULL; | ||
624 | struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); | ||
625 | int err; | ||
626 | |||
627 | /* Find AES device, currently picks the first device */ | ||
628 | spin_lock_bh(&list_lock); | ||
629 | list_for_each_entry(dd, &dev_list, list) { | ||
630 | break; | ||
631 | } | ||
632 | spin_unlock_bh(&list_lock); | ||
633 | |||
634 | err = pm_runtime_get_sync(dd->dev); | ||
635 | if (err < 0) { | ||
636 | dev_err(dd->dev, "%s: failed to get_sync(%d)\n", | ||
637 | __func__, err); | ||
638 | return err; | ||
639 | } | ||
640 | |||
641 | tfm->reqsize = sizeof(struct omap_aes_reqctx); | ||
642 | ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0); | ||
643 | if (IS_ERR(ctx->ctr)) { | ||
644 | pr_warn("could not load aes driver for encrypting IV\n"); | ||
645 | return PTR_ERR(ctx->ctr); | ||
646 | } | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
814 | static void omap_aes_cra_exit(struct crypto_tfm *tfm) | 651 | static void omap_aes_cra_exit(struct crypto_tfm *tfm) |
815 | { | 652 | { |
816 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 653 | struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
@@ -821,6 +658,16 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm) | |||
821 | ctx->fallback = NULL; | 658 | ctx->fallback = NULL; |
822 | } | 659 | } |
823 | 660 | ||
661 | static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm) | ||
662 | { | ||
663 | struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); | ||
664 | |||
665 | omap_aes_cra_exit(crypto_aead_tfm(tfm)); | ||
666 | |||
667 | if (ctx->ctr) | ||
668 | crypto_free_skcipher(ctx->ctr); | ||
669 | } | ||
670 | |||
824 | /* ********************** ALGS ************************************ */ | 671 | /* ********************** ALGS ************************************ */ |
825 | 672 | ||
826 | static struct crypto_alg algs_ecb_cbc[] = { | 673 | static struct crypto_alg algs_ecb_cbc[] = { |
@@ -905,6 +752,54 @@ static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { | |||
905 | }, | 752 | }, |
906 | }; | 753 | }; |
907 | 754 | ||
755 | static struct aead_alg algs_aead_gcm[] = { | ||
756 | { | ||
757 | .base = { | ||
758 | .cra_name = "gcm(aes)", | ||
759 | .cra_driver_name = "gcm-aes-omap", | ||
760 | .cra_priority = 300, | ||
761 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
762 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
763 | .cra_blocksize = 1, | ||
764 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | ||
765 | .cra_alignmask = 0xf, | ||
766 | .cra_module = THIS_MODULE, | ||
767 | }, | ||
768 | .init = omap_aes_gcm_cra_init, | ||
769 | .exit = omap_aes_gcm_cra_exit, | ||
770 | .ivsize = 12, | ||
771 | .maxauthsize = AES_BLOCK_SIZE, | ||
772 | .setkey = omap_aes_gcm_setkey, | ||
773 | .encrypt = omap_aes_gcm_encrypt, | ||
774 | .decrypt = omap_aes_gcm_decrypt, | ||
775 | }, | ||
776 | { | ||
777 | .base = { | ||
778 | .cra_name = "rfc4106(gcm(aes))", | ||
779 | .cra_driver_name = "rfc4106-gcm-aes-omap", | ||
780 | .cra_priority = 300, | ||
781 | .cra_flags = CRYPTO_ALG_ASYNC | | ||
782 | CRYPTO_ALG_KERN_DRIVER_ONLY, | ||
783 | .cra_blocksize = 1, | ||
784 | .cra_ctxsize = sizeof(struct omap_aes_ctx), | ||
785 | .cra_alignmask = 0xf, | ||
786 | .cra_module = THIS_MODULE, | ||
787 | }, | ||
788 | .init = omap_aes_gcm_cra_init, | ||
789 | .exit = omap_aes_gcm_cra_exit, | ||
790 | .maxauthsize = AES_BLOCK_SIZE, | ||
791 | .ivsize = 8, | ||
792 | .setkey = omap_aes_4106gcm_setkey, | ||
793 | .encrypt = omap_aes_4106gcm_encrypt, | ||
794 | .decrypt = omap_aes_4106gcm_decrypt, | ||
795 | }, | ||
796 | }; | ||
797 | |||
798 | static struct omap_aes_aead_algs omap_aes_aead_info = { | ||
799 | .algs_list = algs_aead_gcm, | ||
800 | .size = ARRAY_SIZE(algs_aead_gcm), | ||
801 | }; | ||
802 | |||
908 | static const struct omap_aes_pdata omap_aes_pdata_omap2 = { | 803 | static const struct omap_aes_pdata omap_aes_pdata_omap2 = { |
909 | .algs_info = omap_aes_algs_info_ecb_cbc, | 804 | .algs_info = omap_aes_algs_info_ecb_cbc, |
910 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc), | 805 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc), |
@@ -958,6 +853,7 @@ static const struct omap_aes_pdata omap_aes_pdata_omap3 = { | |||
958 | static const struct omap_aes_pdata omap_aes_pdata_omap4 = { | 853 | static const struct omap_aes_pdata omap_aes_pdata_omap4 = { |
959 | .algs_info = omap_aes_algs_info_ecb_cbc_ctr, | 854 | .algs_info = omap_aes_algs_info_ecb_cbc_ctr, |
960 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), | 855 | .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), |
856 | .aead_algs_info = &omap_aes_aead_info, | ||
961 | .trigger = omap_aes_dma_trigger_omap4, | 857 | .trigger = omap_aes_dma_trigger_omap4, |
962 | .key_ofs = 0x3c, | 858 | .key_ofs = 0x3c, |
963 | .iv_ofs = 0x40, | 859 | .iv_ofs = 0x40, |
@@ -1140,6 +1036,7 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
1140 | struct device *dev = &pdev->dev; | 1036 | struct device *dev = &pdev->dev; |
1141 | struct omap_aes_dev *dd; | 1037 | struct omap_aes_dev *dd; |
1142 | struct crypto_alg *algp; | 1038 | struct crypto_alg *algp; |
1039 | struct aead_alg *aalg; | ||
1143 | struct resource res; | 1040 | struct resource res; |
1144 | int err = -ENOMEM, i, j, irq = -1; | 1041 | int err = -ENOMEM, i, j, irq = -1; |
1145 | u32 reg; | 1042 | u32 reg; |
@@ -1152,6 +1049,8 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
1152 | dd->dev = dev; | 1049 | dd->dev = dev; |
1153 | platform_set_drvdata(pdev, dd); | 1050 | platform_set_drvdata(pdev, dd); |
1154 | 1051 | ||
1052 | aead_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH); | ||
1053 | |||
1155 | err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : | 1054 | err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : |
1156 | omap_aes_get_res_pdev(dd, pdev, &res); | 1055 | omap_aes_get_res_pdev(dd, pdev, &res); |
1157 | if (err) | 1056 | if (err) |
@@ -1207,6 +1106,7 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
1207 | } | 1106 | } |
1208 | } | 1107 | } |
1209 | 1108 | ||
1109 | spin_lock_init(&dd->lock); | ||
1210 | 1110 | ||
1211 | INIT_LIST_HEAD(&dd->list); | 1111 | INIT_LIST_HEAD(&dd->list); |
1212 | spin_lock(&list_lock); | 1112 | spin_lock(&list_lock); |
@@ -1243,7 +1143,29 @@ static int omap_aes_probe(struct platform_device *pdev) | |||
1243 | } | 1143 | } |
1244 | } | 1144 | } |
1245 | 1145 | ||
1146 | if (dd->pdata->aead_algs_info && | ||
1147 | !dd->pdata->aead_algs_info->registered) { | ||
1148 | for (i = 0; i < dd->pdata->aead_algs_info->size; i++) { | ||
1149 | aalg = &dd->pdata->aead_algs_info->algs_list[i]; | ||
1150 | algp = &aalg->base; | ||
1151 | |||
1152 | pr_debug("reg alg: %s\n", algp->cra_name); | ||
1153 | INIT_LIST_HEAD(&algp->cra_list); | ||
1154 | |||
1155 | err = crypto_register_aead(aalg); | ||
1156 | if (err) | ||
1157 | goto err_aead_algs; | ||
1158 | |||
1159 | dd->pdata->aead_algs_info->registered++; | ||
1160 | } | ||
1161 | } | ||
1162 | |||
1246 | return 0; | 1163 | return 0; |
1164 | err_aead_algs: | ||
1165 | for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { | ||
1166 | aalg = &dd->pdata->aead_algs_info->algs_list[i]; | ||
1167 | crypto_unregister_aead(aalg); | ||
1168 | } | ||
1247 | err_algs: | 1169 | err_algs: |
1248 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) | 1170 | for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) |
1249 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) | 1171 | for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) |
@@ -1268,6 +1190,7 @@ err_data: | |||
1268 | static int omap_aes_remove(struct platform_device *pdev) | 1190 | static int omap_aes_remove(struct platform_device *pdev) |
1269 | { | 1191 | { |
1270 | struct omap_aes_dev *dd = platform_get_drvdata(pdev); | 1192 | struct omap_aes_dev *dd = platform_get_drvdata(pdev); |
1193 | struct aead_alg *aalg; | ||
1271 | int i, j; | 1194 | int i, j; |
1272 | 1195 | ||
1273 | if (!dd) | 1196 | if (!dd) |
@@ -1282,7 +1205,13 @@ static int omap_aes_remove(struct platform_device *pdev) | |||
1282 | crypto_unregister_alg( | 1205 | crypto_unregister_alg( |
1283 | &dd->pdata->algs_info[i].algs_list[j]); | 1206 | &dd->pdata->algs_info[i].algs_list[j]); |
1284 | 1207 | ||
1208 | for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) { | ||
1209 | aalg = &dd->pdata->aead_algs_info->algs_list[i]; | ||
1210 | crypto_unregister_aead(aalg); | ||
1211 | } | ||
1212 | |||
1285 | crypto_engine_exit(dd->engine); | 1213 | crypto_engine_exit(dd->engine); |
1214 | |||
1286 | tasklet_kill(&dd->done_task); | 1215 | tasklet_kill(&dd->done_task); |
1287 | omap_aes_dma_cleanup(dd); | 1216 | omap_aes_dma_cleanup(dd); |
1288 | pm_runtime_disable(dd->dev); | 1217 | pm_runtime_disable(dd->dev); |
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h new file mode 100644 index 000000000000..8906342e2b9a --- /dev/null +++ b/drivers/crypto/omap-aes.h | |||
@@ -0,0 +1,214 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for OMAP AES HW ACCELERATOR defines | ||
5 | * | ||
6 | * Copyright (c) 2015 Texas Instruments Incorporated | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as published | ||
10 | * by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | #ifndef __OMAP_AES_H__ | ||
14 | #define __OMAP_AES_H__ | ||
15 | |||
16 | #define DST_MAXBURST 4 | ||
17 | #define DMA_MIN (DST_MAXBURST * sizeof(u32)) | ||
18 | |||
19 | #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) | ||
20 | |||
21 | /* | ||
22 | * OMAP TRM gives bitfields as start:end, where start is the higher bit | ||
23 | * number. For example 7:0 | ||
24 | */ | ||
25 | #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) | ||
26 | #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) | ||
27 | |||
28 | #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ | ||
29 | (((x) ^ 0x01) * 0x04)) | ||
30 | #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) | ||
31 | |||
32 | #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) | ||
33 | #define AES_REG_CTRL_CONTEXT_READY BIT(31) | ||
34 | #define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7) | ||
35 | #define AES_REG_CTRL_CTR_WIDTH_32 0 | ||
36 | #define AES_REG_CTRL_CTR_WIDTH_64 BIT(7) | ||
37 | #define AES_REG_CTRL_CTR_WIDTH_96 BIT(8) | ||
38 | #define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7) | ||
39 | #define AES_REG_CTRL_GCM GENMASK(17, 16) | ||
40 | #define AES_REG_CTRL_CTR BIT(6) | ||
41 | #define AES_REG_CTRL_CBC BIT(5) | ||
42 | #define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3) | ||
43 | #define AES_REG_CTRL_DIRECTION BIT(2) | ||
44 | #define AES_REG_CTRL_INPUT_READY BIT(1) | ||
45 | #define AES_REG_CTRL_OUTPUT_READY BIT(0) | ||
46 | #define AES_REG_CTRL_MASK GENMASK(24, 2) | ||
47 | |||
48 | #define AES_REG_C_LEN_0 0x54 | ||
49 | #define AES_REG_C_LEN_1 0x58 | ||
50 | #define AES_REG_A_LEN 0x5C | ||
51 | |||
52 | #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) | ||
53 | #define AES_REG_TAG_N(dd, x) (0x70 + ((x) * 0x04)) | ||
54 | |||
55 | #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) | ||
56 | |||
57 | #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) | ||
58 | #define AES_REG_MASK_SIDLE BIT(6) | ||
59 | #define AES_REG_MASK_START BIT(5) | ||
60 | #define AES_REG_MASK_DMA_OUT_EN BIT(3) | ||
61 | #define AES_REG_MASK_DMA_IN_EN BIT(2) | ||
62 | #define AES_REG_MASK_SOFTRESET BIT(1) | ||
63 | #define AES_REG_AUTOIDLE BIT(0) | ||
64 | |||
65 | #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) | ||
66 | |||
67 | #define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) | ||
68 | #define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) | ||
69 | #define AES_REG_IRQ_DATA_IN BIT(1) | ||
70 | #define AES_REG_IRQ_DATA_OUT BIT(2) | ||
71 | #define DEFAULT_TIMEOUT (5 * HZ) | ||
72 | |||
73 | #define DEFAULT_AUTOSUSPEND_DELAY 1000 | ||
74 | |||
75 | #define FLAGS_MODE_MASK 0x001f | ||
76 | #define FLAGS_ENCRYPT BIT(0) | ||
77 | #define FLAGS_CBC BIT(1) | ||
78 | #define FLAGS_CTR BIT(2) | ||
79 | #define FLAGS_GCM BIT(3) | ||
80 | #define FLAGS_RFC4106_GCM BIT(4) | ||
81 | |||
82 | #define FLAGS_INIT BIT(5) | ||
83 | #define FLAGS_FAST BIT(6) | ||
84 | #define FLAGS_BUSY BIT(7) | ||
85 | |||
86 | #define FLAGS_IN_DATA_ST_SHIFT 8 | ||
87 | #define FLAGS_OUT_DATA_ST_SHIFT 10 | ||
88 | #define FLAGS_ASSOC_DATA_ST_SHIFT 12 | ||
89 | |||
90 | #define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2) | ||
91 | |||
92 | struct omap_aes_gcm_result { | ||
93 | struct completion completion; | ||
94 | int err; | ||
95 | }; | ||
96 | |||
97 | struct omap_aes_ctx { | ||
98 | int keylen; | ||
99 | u32 key[AES_KEYSIZE_256 / sizeof(u32)]; | ||
100 | u8 nonce[4]; | ||
101 | struct crypto_skcipher *fallback; | ||
102 | struct crypto_skcipher *ctr; | ||
103 | }; | ||
104 | |||
105 | struct omap_aes_reqctx { | ||
106 | struct omap_aes_dev *dd; | ||
107 | unsigned long mode; | ||
108 | u8 iv[AES_BLOCK_SIZE]; | ||
109 | u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)]; | ||
110 | }; | ||
111 | |||
112 | #define OMAP_AES_QUEUE_LENGTH 1 | ||
113 | #define OMAP_AES_CACHE_SIZE 0 | ||
114 | |||
115 | struct omap_aes_algs_info { | ||
116 | struct crypto_alg *algs_list; | ||
117 | unsigned int size; | ||
118 | unsigned int registered; | ||
119 | }; | ||
120 | |||
121 | struct omap_aes_aead_algs { | ||
122 | struct aead_alg *algs_list; | ||
123 | unsigned int size; | ||
124 | unsigned int registered; | ||
125 | }; | ||
126 | |||
127 | struct omap_aes_pdata { | ||
128 | struct omap_aes_algs_info *algs_info; | ||
129 | unsigned int algs_info_size; | ||
130 | struct omap_aes_aead_algs *aead_algs_info; | ||
131 | |||
132 | void (*trigger)(struct omap_aes_dev *dd, int length); | ||
133 | |||
134 | u32 key_ofs; | ||
135 | u32 iv_ofs; | ||
136 | u32 ctrl_ofs; | ||
137 | u32 data_ofs; | ||
138 | u32 rev_ofs; | ||
139 | u32 mask_ofs; | ||
140 | u32 irq_enable_ofs; | ||
141 | u32 irq_status_ofs; | ||
142 | |||
143 | u32 dma_enable_in; | ||
144 | u32 dma_enable_out; | ||
145 | u32 dma_start; | ||
146 | |||
147 | u32 major_mask; | ||
148 | u32 major_shift; | ||
149 | u32 minor_mask; | ||
150 | u32 minor_shift; | ||
151 | }; | ||
152 | |||
153 | struct omap_aes_dev { | ||
154 | struct list_head list; | ||
155 | unsigned long phys_base; | ||
156 | void __iomem *io_base; | ||
157 | struct omap_aes_ctx *ctx; | ||
158 | struct device *dev; | ||
159 | unsigned long flags; | ||
160 | int err; | ||
161 | |||
162 | struct tasklet_struct done_task; | ||
163 | struct aead_queue aead_queue; | ||
164 | spinlock_t lock; | ||
165 | |||
166 | struct ablkcipher_request *req; | ||
167 | struct aead_request *aead_req; | ||
168 | struct crypto_engine *engine; | ||
169 | |||
170 | /* | ||
171 | * total is used by PIO mode for book keeping so introduce | ||
172 | * variable total_save as need it to calc page_order | ||
173 | */ | ||
174 | size_t total; | ||
175 | size_t total_save; | ||
176 | size_t assoc_len; | ||
177 | size_t authsize; | ||
178 | |||
179 | struct scatterlist *in_sg; | ||
180 | struct scatterlist *out_sg; | ||
181 | |||
182 | /* Buffers for copying for unaligned cases */ | ||
183 | struct scatterlist in_sgl[2]; | ||
184 | struct scatterlist out_sgl; | ||
185 | struct scatterlist *orig_out; | ||
186 | |||
187 | struct scatter_walk in_walk; | ||
188 | struct scatter_walk out_walk; | ||
189 | struct dma_chan *dma_lch_in; | ||
190 | struct dma_chan *dma_lch_out; | ||
191 | int in_sg_len; | ||
192 | int out_sg_len; | ||
193 | int pio_only; | ||
194 | const struct omap_aes_pdata *pdata; | ||
195 | }; | ||
196 | |||
197 | u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset); | ||
198 | void omap_aes_write(struct omap_aes_dev *dd, u32 offset, u32 value); | ||
199 | struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_reqctx *rctx); | ||
200 | int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, | ||
201 | unsigned int keylen); | ||
202 | int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key, | ||
203 | unsigned int keylen); | ||
204 | int omap_aes_gcm_encrypt(struct aead_request *req); | ||
205 | int omap_aes_gcm_decrypt(struct aead_request *req); | ||
206 | int omap_aes_4106gcm_encrypt(struct aead_request *req); | ||
207 | int omap_aes_4106gcm_decrypt(struct aead_request *req); | ||
208 | int omap_aes_write_ctrl(struct omap_aes_dev *dd); | ||
209 | int omap_aes_crypt_dma_start(struct omap_aes_dev *dd); | ||
210 | int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd); | ||
211 | void omap_aes_gcm_dma_out_callback(void *data); | ||
212 | void omap_aes_clear_copy_flags(struct omap_aes_dev *dd); | ||
213 | |||
214 | #endif | ||
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c new file mode 100644 index 000000000000..23e37779317e --- /dev/null +++ b/drivers/crypto/omap-crypto.c | |||
@@ -0,0 +1,184 @@ | |||
1 | /* | ||
2 | * OMAP Crypto driver common support routines. | ||
3 | * | ||
4 | * Copyright (c) 2017 Texas Instruments Incorporated | ||
5 | * Tero Kristo <t-kristo@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as published | ||
9 | * by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/scatterlist.h> | ||
15 | #include <crypto/scatterwalk.h> | ||
16 | |||
17 | #include "omap-crypto.h" | ||
18 | |||
19 | static int omap_crypto_copy_sg_lists(int total, int bs, | ||
20 | struct scatterlist **sg, | ||
21 | struct scatterlist *new_sg, u16 flags) | ||
22 | { | ||
23 | int n = sg_nents(*sg); | ||
24 | struct scatterlist *tmp; | ||
25 | |||
26 | if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) { | ||
27 | new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); | ||
28 | if (!new_sg) | ||
29 | return -ENOMEM; | ||
30 | |||
31 | sg_init_table(new_sg, n); | ||
32 | } | ||
33 | |||
34 | tmp = new_sg; | ||
35 | |||
36 | while (*sg && total) { | ||
37 | int len = (*sg)->length; | ||
38 | |||
39 | if (total < len) | ||
40 | len = total; | ||
41 | |||
42 | if (len > 0) { | ||
43 | total -= len; | ||
44 | sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset); | ||
45 | if (total <= 0) | ||
46 | sg_mark_end(tmp); | ||
47 | tmp = sg_next(tmp); | ||
48 | } | ||
49 | |||
50 | *sg = sg_next(*sg); | ||
51 | } | ||
52 | |||
53 | *sg = new_sg; | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | static int omap_crypto_copy_sgs(int total, int bs, struct scatterlist **sg, | ||
59 | struct scatterlist *new_sg, u16 flags) | ||
60 | { | ||
61 | void *buf; | ||
62 | int pages; | ||
63 | int new_len; | ||
64 | |||
65 | new_len = ALIGN(total, bs); | ||
66 | pages = get_order(new_len); | ||
67 | |||
68 | buf = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
69 | if (!buf) { | ||
70 | pr_err("%s: Couldn't allocate pages for unaligned cases.\n", | ||
71 | __func__); | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | |||
75 | if (flags & OMAP_CRYPTO_COPY_DATA) { | ||
76 | scatterwalk_map_and_copy(buf, *sg, 0, total, 0); | ||
77 | if (flags & OMAP_CRYPTO_ZERO_BUF) | ||
78 | memset(buf + total, 0, new_len - total); | ||
79 | } | ||
80 | |||
81 | if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) | ||
82 | sg_init_table(new_sg, 1); | ||
83 | |||
84 | sg_set_buf(new_sg, buf, new_len); | ||
85 | |||
86 | *sg = new_sg; | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int omap_crypto_check_sg(struct scatterlist *sg, int total, int bs, | ||
92 | u16 flags) | ||
93 | { | ||
94 | int len = 0; | ||
95 | int num_sg = 0; | ||
96 | |||
97 | if (!IS_ALIGNED(total, bs)) | ||
98 | return OMAP_CRYPTO_NOT_ALIGNED; | ||
99 | |||
100 | while (sg) { | ||
101 | num_sg++; | ||
102 | |||
103 | if (!IS_ALIGNED(sg->offset, 4)) | ||
104 | return OMAP_CRYPTO_NOT_ALIGNED; | ||
105 | if (!IS_ALIGNED(sg->length, bs)) | ||
106 | return OMAP_CRYPTO_NOT_ALIGNED; | ||
107 | |||
108 | len += sg->length; | ||
109 | sg = sg_next(sg); | ||
110 | |||
111 | if (len >= total) | ||
112 | break; | ||
113 | } | ||
114 | |||
115 | if ((flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) && num_sg > 1) | ||
116 | return OMAP_CRYPTO_NOT_ALIGNED; | ||
117 | |||
118 | if (len != total) | ||
119 | return OMAP_CRYPTO_BAD_DATA_LENGTH; | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs, | ||
125 | struct scatterlist *new_sg, u16 flags, | ||
126 | u8 flags_shift, unsigned long *dd_flags) | ||
127 | { | ||
128 | int ret; | ||
129 | |||
130 | *dd_flags &= ~(OMAP_CRYPTO_COPY_MASK << flags_shift); | ||
131 | |||
132 | if (flags & OMAP_CRYPTO_FORCE_COPY) | ||
133 | ret = OMAP_CRYPTO_NOT_ALIGNED; | ||
134 | else | ||
135 | ret = omap_crypto_check_sg(*sg, total, bs, flags); | ||
136 | |||
137 | if (ret == OMAP_CRYPTO_NOT_ALIGNED) { | ||
138 | ret = omap_crypto_copy_sgs(total, bs, sg, new_sg, flags); | ||
139 | if (ret) | ||
140 | return ret; | ||
141 | *dd_flags |= OMAP_CRYPTO_DATA_COPIED << flags_shift; | ||
142 | } else if (ret == OMAP_CRYPTO_BAD_DATA_LENGTH) { | ||
143 | ret = omap_crypto_copy_sg_lists(total, bs, sg, new_sg, flags); | ||
144 | if (ret) | ||
145 | return ret; | ||
146 | if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) | ||
147 | *dd_flags |= OMAP_CRYPTO_SG_COPIED << flags_shift; | ||
148 | } else if (flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY) { | ||
149 | sg_set_buf(new_sg, sg_virt(*sg), (*sg)->length); | ||
150 | } | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | EXPORT_SYMBOL_GPL(omap_crypto_align_sg); | ||
155 | |||
156 | void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig, | ||
157 | int offset, int len, u8 flags_shift, | ||
158 | unsigned long flags) | ||
159 | { | ||
160 | void *buf; | ||
161 | int pages; | ||
162 | |||
163 | flags >>= flags_shift; | ||
164 | flags &= OMAP_CRYPTO_COPY_MASK; | ||
165 | |||
166 | if (!flags) | ||
167 | return; | ||
168 | |||
169 | buf = sg_virt(sg); | ||
170 | pages = get_order(len); | ||
171 | |||
172 | if (orig && (flags & OMAP_CRYPTO_COPY_MASK)) | ||
173 | scatterwalk_map_and_copy(buf, orig, offset, len, 1); | ||
174 | |||
175 | if (flags & OMAP_CRYPTO_DATA_COPIED) | ||
176 | free_pages((unsigned long)buf, pages); | ||
177 | else if (flags & OMAP_CRYPTO_SG_COPIED) | ||
178 | kfree(sg); | ||
179 | } | ||
180 | EXPORT_SYMBOL_GPL(omap_crypto_cleanup); | ||
181 | |||
182 | MODULE_DESCRIPTION("OMAP crypto support library."); | ||
183 | MODULE_LICENSE("GPL v2"); | ||
184 | MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>"); | ||
diff --git a/drivers/crypto/omap-crypto.h b/drivers/crypto/omap-crypto.h new file mode 100644 index 000000000000..36a230eb87af --- /dev/null +++ b/drivers/crypto/omap-crypto.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * OMAP Crypto driver common support routines. | ||
3 | * | ||
4 | * Copyright (c) 2017 Texas Instruments Incorporated | ||
5 | * Tero Kristo <t-kristo@ti.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as published | ||
9 | * by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef __CRYPTO_OMAP_CRYPTO_H | ||
13 | #define __CRYPTO_OMAP_CRYPTO_H | ||
14 | |||
15 | enum { | ||
16 | OMAP_CRYPTO_NOT_ALIGNED = 1, | ||
17 | OMAP_CRYPTO_BAD_DATA_LENGTH, | ||
18 | }; | ||
19 | |||
20 | #define OMAP_CRYPTO_DATA_COPIED BIT(0) | ||
21 | #define OMAP_CRYPTO_SG_COPIED BIT(1) | ||
22 | |||
23 | #define OMAP_CRYPTO_COPY_MASK 0x3 | ||
24 | |||
25 | #define OMAP_CRYPTO_COPY_DATA BIT(0) | ||
26 | #define OMAP_CRYPTO_FORCE_COPY BIT(1) | ||
27 | #define OMAP_CRYPTO_ZERO_BUF BIT(2) | ||
28 | #define OMAP_CRYPTO_FORCE_SINGLE_ENTRY BIT(3) | ||
29 | |||
30 | int omap_crypto_align_sg(struct scatterlist **sg, int total, int bs, | ||
31 | struct scatterlist *new_sg, u16 flags, | ||
32 | u8 flags_shift, unsigned long *dd_flags); | ||
33 | void omap_crypto_cleanup(struct scatterlist *sg, struct scatterlist *orig, | ||
34 | int offset, int len, u8 flags_shift, | ||
35 | unsigned long flags); | ||
36 | |||
37 | #endif | ||
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index a6f65532fd16..0bcab00e0ff5 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include <crypto/algapi.h> | 41 | #include <crypto/algapi.h> |
42 | #include <crypto/engine.h> | 42 | #include <crypto/engine.h> |
43 | 43 | ||
44 | #include "omap-crypto.h" | ||
45 | |||
44 | #define DST_MAXBURST 2 | 46 | #define DST_MAXBURST 2 |
45 | 47 | ||
46 | #define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2) | 48 | #define DES_BLOCK_WORDS (DES_BLOCK_SIZE >> 2) |
@@ -78,6 +80,11 @@ | |||
78 | #define FLAGS_INIT BIT(4) | 80 | #define FLAGS_INIT BIT(4) |
79 | #define FLAGS_BUSY BIT(6) | 81 | #define FLAGS_BUSY BIT(6) |
80 | 82 | ||
83 | #define DEFAULT_AUTOSUSPEND_DELAY 1000 | ||
84 | |||
85 | #define FLAGS_IN_DATA_ST_SHIFT 8 | ||
86 | #define FLAGS_OUT_DATA_ST_SHIFT 10 | ||
87 | |||
81 | struct omap_des_ctx { | 88 | struct omap_des_ctx { |
82 | struct omap_des_dev *dd; | 89 | struct omap_des_dev *dd; |
83 | 90 | ||
@@ -151,7 +158,6 @@ struct omap_des_dev { | |||
151 | struct scatterlist in_sgl; | 158 | struct scatterlist in_sgl; |
152 | struct scatterlist out_sgl; | 159 | struct scatterlist out_sgl; |
153 | struct scatterlist *orig_out; | 160 | struct scatterlist *orig_out; |
154 | int sgs_copied; | ||
155 | 161 | ||
156 | struct scatter_walk in_walk; | 162 | struct scatter_walk in_walk; |
157 | struct scatter_walk out_walk; | 163 | struct scatter_walk out_walk; |
@@ -370,20 +376,6 @@ static void omap_des_dma_cleanup(struct omap_des_dev *dd) | |||
370 | dma_release_channel(dd->dma_lch_in); | 376 | dma_release_channel(dd->dma_lch_in); |
371 | } | 377 | } |
372 | 378 | ||
373 | static void sg_copy_buf(void *buf, struct scatterlist *sg, | ||
374 | unsigned int start, unsigned int nbytes, int out) | ||
375 | { | ||
376 | struct scatter_walk walk; | ||
377 | |||
378 | if (!nbytes) | ||
379 | return; | ||
380 | |||
381 | scatterwalk_start(&walk, sg); | ||
382 | scatterwalk_advance(&walk, start); | ||
383 | scatterwalk_copychunks(buf, &walk, nbytes, out); | ||
384 | scatterwalk_done(&walk, out, 0); | ||
385 | } | ||
386 | |||
387 | static int omap_des_crypt_dma(struct crypto_tfm *tfm, | 379 | static int omap_des_crypt_dma(struct crypto_tfm *tfm, |
388 | struct scatterlist *in_sg, struct scatterlist *out_sg, | 380 | struct scatterlist *in_sg, struct scatterlist *out_sg, |
389 | int in_sg_len, int out_sg_len) | 381 | int in_sg_len, int out_sg_len) |
@@ -506,8 +498,10 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err) | |||
506 | 498 | ||
507 | pr_debug("err: %d\n", err); | 499 | pr_debug("err: %d\n", err); |
508 | 500 | ||
509 | pm_runtime_put(dd->dev); | ||
510 | crypto_finalize_cipher_request(dd->engine, req, err); | 501 | crypto_finalize_cipher_request(dd->engine, req, err); |
502 | |||
503 | pm_runtime_mark_last_busy(dd->dev); | ||
504 | pm_runtime_put_autosuspend(dd->dev); | ||
511 | } | 505 | } |
512 | 506 | ||
513 | static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) | 507 | static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) |
@@ -522,55 +516,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) | |||
522 | return 0; | 516 | return 0; |
523 | } | 517 | } |
524 | 518 | ||
525 | static int omap_des_copy_needed(struct scatterlist *sg) | ||
526 | { | ||
527 | while (sg) { | ||
528 | if (!IS_ALIGNED(sg->offset, 4)) | ||
529 | return -1; | ||
530 | if (!IS_ALIGNED(sg->length, DES_BLOCK_SIZE)) | ||
531 | return -1; | ||
532 | sg = sg_next(sg); | ||
533 | } | ||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | static int omap_des_copy_sgs(struct omap_des_dev *dd) | ||
538 | { | ||
539 | void *buf_in, *buf_out; | ||
540 | int pages; | ||
541 | |||
542 | pages = dd->total >> PAGE_SHIFT; | ||
543 | |||
544 | if (dd->total & (PAGE_SIZE-1)) | ||
545 | pages++; | ||
546 | |||
547 | BUG_ON(!pages); | ||
548 | |||
549 | buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
550 | buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); | ||
551 | |||
552 | if (!buf_in || !buf_out) { | ||
553 | pr_err("Couldn't allocated pages for unaligned cases.\n"); | ||
554 | return -1; | ||
555 | } | ||
556 | |||
557 | dd->orig_out = dd->out_sg; | ||
558 | |||
559 | sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); | ||
560 | |||
561 | sg_init_table(&dd->in_sgl, 1); | ||
562 | sg_set_buf(&dd->in_sgl, buf_in, dd->total); | ||
563 | dd->in_sg = &dd->in_sgl; | ||
564 | dd->in_sg_len = 1; | ||
565 | |||
566 | sg_init_table(&dd->out_sgl, 1); | ||
567 | sg_set_buf(&dd->out_sgl, buf_out, dd->total); | ||
568 | dd->out_sg = &dd->out_sgl; | ||
569 | dd->out_sg_len = 1; | ||
570 | |||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | static int omap_des_handle_queue(struct omap_des_dev *dd, | 519 | static int omap_des_handle_queue(struct omap_des_dev *dd, |
575 | struct ablkcipher_request *req) | 520 | struct ablkcipher_request *req) |
576 | { | 521 | { |
@@ -587,6 +532,8 @@ static int omap_des_prepare_req(struct crypto_engine *engine, | |||
587 | crypto_ablkcipher_reqtfm(req)); | 532 | crypto_ablkcipher_reqtfm(req)); |
588 | struct omap_des_dev *dd = omap_des_find_dev(ctx); | 533 | struct omap_des_dev *dd = omap_des_find_dev(ctx); |
589 | struct omap_des_reqctx *rctx; | 534 | struct omap_des_reqctx *rctx; |
535 | int ret; | ||
536 | u16 flags; | ||
590 | 537 | ||
591 | if (!dd) | 538 | if (!dd) |
592 | return -ENODEV; | 539 | return -ENODEV; |
@@ -597,6 +544,23 @@ static int omap_des_prepare_req(struct crypto_engine *engine, | |||
597 | dd->total_save = req->nbytes; | 544 | dd->total_save = req->nbytes; |
598 | dd->in_sg = req->src; | 545 | dd->in_sg = req->src; |
599 | dd->out_sg = req->dst; | 546 | dd->out_sg = req->dst; |
547 | dd->orig_out = req->dst; | ||
548 | |||
549 | flags = OMAP_CRYPTO_COPY_DATA; | ||
550 | if (req->src == req->dst) | ||
551 | flags |= OMAP_CRYPTO_FORCE_COPY; | ||
552 | |||
553 | ret = omap_crypto_align_sg(&dd->in_sg, dd->total, DES_BLOCK_SIZE, | ||
554 | &dd->in_sgl, flags, | ||
555 | FLAGS_IN_DATA_ST_SHIFT, &dd->flags); | ||
556 | if (ret) | ||
557 | return ret; | ||
558 | |||
559 | ret = omap_crypto_align_sg(&dd->out_sg, dd->total, DES_BLOCK_SIZE, | ||
560 | &dd->out_sgl, 0, | ||
561 | FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); | ||
562 | if (ret) | ||
563 | return ret; | ||
600 | 564 | ||
601 | dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total); | 565 | dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total); |
602 | if (dd->in_sg_len < 0) | 566 | if (dd->in_sg_len < 0) |
@@ -606,15 +570,6 @@ static int omap_des_prepare_req(struct crypto_engine *engine, | |||
606 | if (dd->out_sg_len < 0) | 570 | if (dd->out_sg_len < 0) |
607 | return dd->out_sg_len; | 571 | return dd->out_sg_len; |
608 | 572 | ||
609 | if (omap_des_copy_needed(dd->in_sg) || | ||
610 | omap_des_copy_needed(dd->out_sg)) { | ||
611 | if (omap_des_copy_sgs(dd)) | ||
612 | pr_err("Failed to copy SGs for unaligned cases\n"); | ||
613 | dd->sgs_copied = 1; | ||
614 | } else { | ||
615 | dd->sgs_copied = 0; | ||
616 | } | ||
617 | |||
618 | rctx = ablkcipher_request_ctx(req); | 573 | rctx = ablkcipher_request_ctx(req); |
619 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); | 574 | ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); |
620 | rctx->mode &= FLAGS_MODE_MASK; | 575 | rctx->mode &= FLAGS_MODE_MASK; |
@@ -642,8 +597,6 @@ static int omap_des_crypt_req(struct crypto_engine *engine, | |||
642 | static void omap_des_done_task(unsigned long data) | 597 | static void omap_des_done_task(unsigned long data) |
643 | { | 598 | { |
644 | struct omap_des_dev *dd = (struct omap_des_dev *)data; | 599 | struct omap_des_dev *dd = (struct omap_des_dev *)data; |
645 | void *buf_in, *buf_out; | ||
646 | int pages; | ||
647 | 600 | ||
648 | pr_debug("enter done_task\n"); | 601 | pr_debug("enter done_task\n"); |
649 | 602 | ||
@@ -656,16 +609,11 @@ static void omap_des_done_task(unsigned long data) | |||
656 | omap_des_crypt_dma_stop(dd); | 609 | omap_des_crypt_dma_stop(dd); |
657 | } | 610 | } |
658 | 611 | ||
659 | if (dd->sgs_copied) { | 612 | omap_crypto_cleanup(&dd->in_sgl, NULL, 0, dd->total_save, |
660 | buf_in = sg_virt(&dd->in_sgl); | 613 | FLAGS_IN_DATA_ST_SHIFT, dd->flags); |
661 | buf_out = sg_virt(&dd->out_sgl); | ||
662 | 614 | ||
663 | sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); | 615 | omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save, |
664 | 616 | FLAGS_OUT_DATA_ST_SHIFT, dd->flags); | |
665 | pages = get_order(dd->total_save); | ||
666 | free_pages((unsigned long)buf_in, pages); | ||
667 | free_pages((unsigned long)buf_out, pages); | ||
668 | } | ||
669 | 617 | ||
670 | omap_des_finish_req(dd, 0); | 618 | omap_des_finish_req(dd, 0); |
671 | 619 | ||
@@ -699,16 +647,28 @@ static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode) | |||
699 | 647 | ||
700 | /* ********************** ALG API ************************************ */ | 648 | /* ********************** ALG API ************************************ */ |
701 | 649 | ||
702 | static int omap_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 650 | static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
703 | unsigned int keylen) | 651 | unsigned int keylen) |
704 | { | 652 | { |
705 | struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 653 | struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
654 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | ||
706 | 655 | ||
707 | if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE)) | 656 | if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE)) |
708 | return -EINVAL; | 657 | return -EINVAL; |
709 | 658 | ||
710 | pr_debug("enter, keylen: %d\n", keylen); | 659 | pr_debug("enter, keylen: %d\n", keylen); |
711 | 660 | ||
661 | /* Do we need to test against weak key? */ | ||
662 | if (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY) { | ||
663 | u32 tmp[DES_EXPKEY_WORDS]; | ||
664 | int ret = des_ekey(tmp, key); | ||
665 | |||
666 | if (!ret) { | ||
667 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | ||
668 | return -EINVAL; | ||
669 | } | ||
670 | } | ||
671 | |||
712 | memcpy(ctx->key, key, keylen); | 672 | memcpy(ctx->key, key, keylen); |
713 | ctx->keylen = keylen; | 673 | ctx->keylen = keylen; |
714 | 674 | ||
@@ -1032,8 +992,10 @@ static int omap_des_probe(struct platform_device *pdev) | |||
1032 | } | 992 | } |
1033 | dd->phys_base = res->start; | 993 | dd->phys_base = res->start; |
1034 | 994 | ||
995 | pm_runtime_use_autosuspend(dev); | ||
996 | pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); | ||
997 | |||
1035 | pm_runtime_enable(dev); | 998 | pm_runtime_enable(dev); |
1036 | pm_runtime_irq_safe(dev); | ||
1037 | err = pm_runtime_get_sync(dev); | 999 | err = pm_runtime_get_sync(dev); |
1038 | if (err < 0) { | 1000 | if (err < 0) { |
1039 | pm_runtime_put_noidle(dev); | 1001 | pm_runtime_put_noidle(dev); |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index d0b16e5e4ee5..9ad9d399daf1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <crypto/algapi.h> | 41 | #include <crypto/algapi.h> |
42 | #include <crypto/sha.h> | 42 | #include <crypto/sha.h> |
43 | #include <crypto/hash.h> | 43 | #include <crypto/hash.h> |
44 | #include <crypto/hmac.h> | ||
44 | #include <crypto/internal/hash.h> | 45 | #include <crypto/internal/hash.h> |
45 | 46 | ||
46 | #define MD5_DIGEST_SIZE 16 | 47 | #define MD5_DIGEST_SIZE 16 |
@@ -225,7 +226,7 @@ struct omap_sham_dev { | |||
225 | struct dma_chan *dma_lch; | 226 | struct dma_chan *dma_lch; |
226 | struct tasklet_struct done_task; | 227 | struct tasklet_struct done_task; |
227 | u8 polling_mode; | 228 | u8 polling_mode; |
228 | u8 xmit_buf[BUFLEN]; | 229 | u8 xmit_buf[BUFLEN] OMAP_ALIGNED; |
229 | 230 | ||
230 | unsigned long flags; | 231 | unsigned long flags; |
231 | struct crypto_queue queue; | 232 | struct crypto_queue queue; |
@@ -750,7 +751,10 @@ static int omap_sham_align_sgs(struct scatterlist *sg, | |||
750 | if (final) | 751 | if (final) |
751 | new_len = DIV_ROUND_UP(new_len, bs) * bs; | 752 | new_len = DIV_ROUND_UP(new_len, bs) * bs; |
752 | else | 753 | else |
753 | new_len = new_len / bs * bs; | 754 | new_len = (new_len - 1) / bs * bs; |
755 | |||
756 | if (nbytes != new_len) | ||
757 | list_ok = false; | ||
754 | 758 | ||
755 | while (nbytes > 0 && sg_tmp) { | 759 | while (nbytes > 0 && sg_tmp) { |
756 | n++; | 760 | n++; |
@@ -846,6 +850,8 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) | |||
846 | xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs; | 850 | xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs; |
847 | else | 851 | else |
848 | xmit_len = xmit_len / bs * bs; | 852 | xmit_len = xmit_len / bs * bs; |
853 | } else if (!final) { | ||
854 | xmit_len -= bs; | ||
849 | } | 855 | } |
850 | 856 | ||
851 | hash_later = rctx->total - xmit_len; | 857 | hash_later = rctx->total - xmit_len; |
@@ -873,14 +879,21 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) | |||
873 | } | 879 | } |
874 | 880 | ||
875 | if (hash_later) { | 881 | if (hash_later) { |
876 | if (req->nbytes) { | 882 | int offset = 0; |
877 | scatterwalk_map_and_copy(rctx->buffer, req->src, | 883 | |
878 | req->nbytes - hash_later, | 884 | if (hash_later > req->nbytes) { |
879 | hash_later, 0); | ||
880 | } else { | ||
881 | memcpy(rctx->buffer, rctx->buffer + xmit_len, | 885 | memcpy(rctx->buffer, rctx->buffer + xmit_len, |
882 | hash_later); | 886 | hash_later - req->nbytes); |
887 | offset = hash_later - req->nbytes; | ||
883 | } | 888 | } |
889 | |||
890 | if (req->nbytes) { | ||
891 | scatterwalk_map_and_copy(rctx->buffer + offset, | ||
892 | req->src, | ||
893 | offset + req->nbytes - | ||
894 | hash_later, hash_later, 0); | ||
895 | } | ||
896 | |||
884 | rctx->bufcnt = hash_later; | 897 | rctx->bufcnt = hash_later; |
885 | } else { | 898 | } else { |
886 | rctx->bufcnt = 0; | 899 | rctx->bufcnt = 0; |
@@ -1130,7 +1143,7 @@ retry: | |||
1130 | ctx = ahash_request_ctx(req); | 1143 | ctx = ahash_request_ctx(req); |
1131 | 1144 | ||
1132 | err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE); | 1145 | err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE); |
1133 | if (err) | 1146 | if (err || !ctx->total) |
1134 | goto err1; | 1147 | goto err1; |
1135 | 1148 | ||
1136 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | 1149 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", |
@@ -1189,11 +1202,10 @@ static int omap_sham_update(struct ahash_request *req) | |||
1189 | if (!req->nbytes) | 1202 | if (!req->nbytes) |
1190 | return 0; | 1203 | return 0; |
1191 | 1204 | ||
1192 | if (ctx->total + req->nbytes < ctx->buflen) { | 1205 | if (ctx->bufcnt + req->nbytes <= ctx->buflen) { |
1193 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, | 1206 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, |
1194 | 0, req->nbytes, 0); | 1207 | 0, req->nbytes, 0); |
1195 | ctx->bufcnt += req->nbytes; | 1208 | ctx->bufcnt += req->nbytes; |
1196 | ctx->total += req->nbytes; | ||
1197 | return 0; | 1209 | return 0; |
1198 | } | 1210 | } |
1199 | 1211 | ||
@@ -1326,8 +1338,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
1326 | memcpy(bctx->opad, bctx->ipad, bs); | 1338 | memcpy(bctx->opad, bctx->ipad, bs); |
1327 | 1339 | ||
1328 | for (i = 0; i < bs; i++) { | 1340 | for (i = 0; i < bs; i++) { |
1329 | bctx->ipad[i] ^= 0x36; | 1341 | bctx->ipad[i] ^= HMAC_IPAD_VALUE; |
1330 | bctx->opad[i] ^= 0x5c; | 1342 | bctx->opad[i] ^= HMAC_OPAD_VALUE; |
1331 | } | 1343 | } |
1332 | } | 1344 | } |
1333 | 1345 | ||
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c index 2839fccdd84b..d3e25c37dc33 100644 --- a/drivers/crypto/qat/qat_common/adf_aer.c +++ b/drivers/crypto/qat/qat_common/adf_aer.c | |||
@@ -109,20 +109,7 @@ EXPORT_SYMBOL_GPL(adf_reset_sbr); | |||
109 | 109 | ||
110 | void adf_reset_flr(struct adf_accel_dev *accel_dev) | 110 | void adf_reset_flr(struct adf_accel_dev *accel_dev) |
111 | { | 111 | { |
112 | struct pci_dev *pdev = accel_to_pci_dev(accel_dev); | 112 | pcie_flr(accel_to_pci_dev(accel_dev)); |
113 | u16 control = 0; | ||
114 | int pos = 0; | ||
115 | |||
116 | dev_info(&GET_DEV(accel_dev), "Function level reset\n"); | ||
117 | pos = pci_pcie_cap(pdev); | ||
118 | if (!pos) { | ||
119 | dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); | ||
120 | return; | ||
121 | } | ||
122 | pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &control); | ||
123 | control |= PCI_EXP_DEVCTL_BCR_FLR; | ||
124 | pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, control); | ||
125 | msleep(100); | ||
126 | } | 113 | } |
127 | EXPORT_SYMBOL_GPL(adf_reset_flr); | 114 | EXPORT_SYMBOL_GPL(adf_reset_flr); |
128 | 115 | ||
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 20f35df8a01f..5b5efcc52cb5 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <crypto/aes.h> | 51 | #include <crypto/aes.h> |
52 | #include <crypto/sha.h> | 52 | #include <crypto/sha.h> |
53 | #include <crypto/hash.h> | 53 | #include <crypto/hash.h> |
54 | #include <crypto/hmac.h> | ||
54 | #include <crypto/algapi.h> | 55 | #include <crypto/algapi.h> |
55 | #include <crypto/authenc.h> | 56 | #include <crypto/authenc.h> |
56 | #include <linux/dma-mapping.h> | 57 | #include <linux/dma-mapping.h> |
@@ -178,8 +179,8 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, | |||
178 | for (i = 0; i < block_size; i++) { | 179 | for (i = 0; i < block_size; i++) { |
179 | char *ipad_ptr = ipad + i; | 180 | char *ipad_ptr = ipad + i; |
180 | char *opad_ptr = opad + i; | 181 | char *opad_ptr = opad + i; |
181 | *ipad_ptr ^= 0x36; | 182 | *ipad_ptr ^= HMAC_IPAD_VALUE; |
182 | *opad_ptr ^= 0x5C; | 183 | *opad_ptr ^= HMAC_OPAD_VALUE; |
183 | } | 184 | } |
184 | 185 | ||
185 | if (crypto_shash_init(shash)) | 186 | if (crypto_shash_init(shash)) |
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 2aab80bc241f..6f5dd68449c6 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c | |||
@@ -521,11 +521,11 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, | |||
521 | return 0; | 521 | return 0; |
522 | } | 522 | } |
523 | 523 | ||
524 | static int qat_dh_max_size(struct crypto_kpp *tfm) | 524 | static unsigned int qat_dh_max_size(struct crypto_kpp *tfm) |
525 | { | 525 | { |
526 | struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); | 526 | struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); |
527 | 527 | ||
528 | return ctx->p ? ctx->p_size : -EINVAL; | 528 | return ctx->p_size; |
529 | } | 529 | } |
530 | 530 | ||
531 | static int qat_dh_init_tfm(struct crypto_kpp *tfm) | 531 | static int qat_dh_init_tfm(struct crypto_kpp *tfm) |
@@ -1256,11 +1256,11 @@ static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, | |||
1256 | return qat_rsa_setkey(tfm, key, keylen, true); | 1256 | return qat_rsa_setkey(tfm, key, keylen, true); |
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | static int qat_rsa_max_size(struct crypto_akcipher *tfm) | 1259 | static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm) |
1260 | { | 1260 | { |
1261 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | 1261 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); |
1262 | 1262 | ||
1263 | return (ctx->n) ? ctx->key_sz : -EINVAL; | 1263 | return ctx->key_sz; |
1264 | } | 1264 | } |
1265 | 1265 | ||
1266 | static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) | 1266 | static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 90efd10d57a1..5cf64746731a 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | |||
@@ -16,13 +16,13 @@ | |||
16 | */ | 16 | */ |
17 | #include "sun4i-ss.h" | 17 | #include "sun4i-ss.h" |
18 | 18 | ||
19 | static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) | 19 | static int sun4i_ss_opti_poll(struct skcipher_request *areq) |
20 | { | 20 | { |
21 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 21 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
22 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 22 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
23 | struct sun4i_ss_ctx *ss = op->ss; | 23 | struct sun4i_ss_ctx *ss = op->ss; |
24 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); | 24 | unsigned int ivsize = crypto_skcipher_ivsize(tfm); |
25 | struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq); | 25 | struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); |
26 | u32 mode = ctx->mode; | 26 | u32 mode = ctx->mode; |
27 | /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ | 27 | /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ |
28 | u32 rx_cnt = SS_RX_DEFAULT; | 28 | u32 rx_cnt = SS_RX_DEFAULT; |
@@ -31,17 +31,17 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) | |||
31 | u32 v; | 31 | u32 v; |
32 | int err = 0; | 32 | int err = 0; |
33 | unsigned int i; | 33 | unsigned int i; |
34 | unsigned int ileft = areq->nbytes; | 34 | unsigned int ileft = areq->cryptlen; |
35 | unsigned int oleft = areq->nbytes; | 35 | unsigned int oleft = areq->cryptlen; |
36 | unsigned int todo; | 36 | unsigned int todo; |
37 | struct sg_mapping_iter mi, mo; | 37 | struct sg_mapping_iter mi, mo; |
38 | unsigned int oi, oo; /* offset for in and out */ | 38 | unsigned int oi, oo; /* offset for in and out */ |
39 | unsigned long flags; | 39 | unsigned long flags; |
40 | 40 | ||
41 | if (areq->nbytes == 0) | 41 | if (!areq->cryptlen) |
42 | return 0; | 42 | return 0; |
43 | 43 | ||
44 | if (!areq->info) { | 44 | if (!areq->iv) { |
45 | dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); | 45 | dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); |
46 | return -EINVAL; | 46 | return -EINVAL; |
47 | } | 47 | } |
@@ -56,9 +56,9 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) | |||
56 | for (i = 0; i < op->keylen; i += 4) | 56 | for (i = 0; i < op->keylen; i += 4) |
57 | writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); | 57 | writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); |
58 | 58 | ||
59 | if (areq->info) { | 59 | if (areq->iv) { |
60 | for (i = 0; i < 4 && i < ivsize / 4; i++) { | 60 | for (i = 0; i < 4 && i < ivsize / 4; i++) { |
61 | v = *(u32 *)(areq->info + i * 4); | 61 | v = *(u32 *)(areq->iv + i * 4); |
62 | writel(v, ss->base + SS_IV0 + i * 4); | 62 | writel(v, ss->base + SS_IV0 + i * 4); |
63 | } | 63 | } |
64 | } | 64 | } |
@@ -76,13 +76,13 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) | |||
76 | goto release_ss; | 76 | goto release_ss; |
77 | } | 77 | } |
78 | 78 | ||
79 | ileft = areq->nbytes / 4; | 79 | ileft = areq->cryptlen / 4; |
80 | oleft = areq->nbytes / 4; | 80 | oleft = areq->cryptlen / 4; |
81 | oi = 0; | 81 | oi = 0; |
82 | oo = 0; | 82 | oo = 0; |
83 | do { | 83 | do { |
84 | todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); | 84 | todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); |
85 | if (todo > 0) { | 85 | if (todo) { |
86 | ileft -= todo; | 86 | ileft -= todo; |
87 | writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); | 87 | writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); |
88 | oi += todo * 4; | 88 | oi += todo * 4; |
@@ -97,7 +97,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) | |||
97 | tx_cnt = SS_TXFIFO_SPACES(spaces); | 97 | tx_cnt = SS_TXFIFO_SPACES(spaces); |
98 | 98 | ||
99 | todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); | 99 | todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); |
100 | if (todo > 0) { | 100 | if (todo) { |
101 | oleft -= todo; | 101 | oleft -= todo; |
102 | readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); | 102 | readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); |
103 | oo += todo * 4; | 103 | oo += todo * 4; |
@@ -106,12 +106,12 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) | |||
106 | sg_miter_next(&mo); | 106 | sg_miter_next(&mo); |
107 | oo = 0; | 107 | oo = 0; |
108 | } | 108 | } |
109 | } while (oleft > 0); | 109 | } while (oleft); |
110 | 110 | ||
111 | if (areq->info) { | 111 | if (areq->iv) { |
112 | for (i = 0; i < 4 && i < ivsize / 4; i++) { | 112 | for (i = 0; i < 4 && i < ivsize / 4; i++) { |
113 | v = readl(ss->base + SS_IV0 + i * 4); | 113 | v = readl(ss->base + SS_IV0 + i * 4); |
114 | *(u32 *)(areq->info + i * 4) = v; | 114 | *(u32 *)(areq->iv + i * 4) = v; |
115 | } | 115 | } |
116 | } | 116 | } |
117 | 117 | ||
@@ -124,16 +124,16 @@ release_ss: | |||
124 | } | 124 | } |
125 | 125 | ||
126 | /* Generic function that support SG with size not multiple of 4 */ | 126 | /* Generic function that support SG with size not multiple of 4 */ |
127 | static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | 127 | static int sun4i_ss_cipher_poll(struct skcipher_request *areq) |
128 | { | 128 | { |
129 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 129 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
130 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 130 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
131 | struct sun4i_ss_ctx *ss = op->ss; | 131 | struct sun4i_ss_ctx *ss = op->ss; |
132 | int no_chunk = 1; | 132 | int no_chunk = 1; |
133 | struct scatterlist *in_sg = areq->src; | 133 | struct scatterlist *in_sg = areq->src; |
134 | struct scatterlist *out_sg = areq->dst; | 134 | struct scatterlist *out_sg = areq->dst; |
135 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); | 135 | unsigned int ivsize = crypto_skcipher_ivsize(tfm); |
136 | struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq); | 136 | struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); |
137 | u32 mode = ctx->mode; | 137 | u32 mode = ctx->mode; |
138 | /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ | 138 | /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ |
139 | u32 rx_cnt = SS_RX_DEFAULT; | 139 | u32 rx_cnt = SS_RX_DEFAULT; |
@@ -142,8 +142,8 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | |||
142 | u32 spaces; | 142 | u32 spaces; |
143 | int err = 0; | 143 | int err = 0; |
144 | unsigned int i; | 144 | unsigned int i; |
145 | unsigned int ileft = areq->nbytes; | 145 | unsigned int ileft = areq->cryptlen; |
146 | unsigned int oleft = areq->nbytes; | 146 | unsigned int oleft = areq->cryptlen; |
147 | unsigned int todo; | 147 | unsigned int todo; |
148 | struct sg_mapping_iter mi, mo; | 148 | struct sg_mapping_iter mi, mo; |
149 | unsigned int oi, oo; /* offset for in and out */ | 149 | unsigned int oi, oo; /* offset for in and out */ |
@@ -154,10 +154,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | |||
154 | unsigned int obl = 0; /* length of data in bufo */ | 154 | unsigned int obl = 0; /* length of data in bufo */ |
155 | unsigned long flags; | 155 | unsigned long flags; |
156 | 156 | ||
157 | if (areq->nbytes == 0) | 157 | if (!areq->cryptlen) |
158 | return 0; | 158 | return 0; |
159 | 159 | ||
160 | if (!areq->info) { | 160 | if (!areq->iv) { |
161 | dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); | 161 | dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); |
162 | return -EINVAL; | 162 | return -EINVAL; |
163 | } | 163 | } |
@@ -172,12 +172,12 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | |||
172 | * we can use the SS optimized function | 172 | * we can use the SS optimized function |
173 | */ | 173 | */ |
174 | while (in_sg && no_chunk == 1) { | 174 | while (in_sg && no_chunk == 1) { |
175 | if ((in_sg->length % 4) != 0) | 175 | if (in_sg->length % 4) |
176 | no_chunk = 0; | 176 | no_chunk = 0; |
177 | in_sg = sg_next(in_sg); | 177 | in_sg = sg_next(in_sg); |
178 | } | 178 | } |
179 | while (out_sg && no_chunk == 1) { | 179 | while (out_sg && no_chunk == 1) { |
180 | if ((out_sg->length % 4) != 0) | 180 | if (out_sg->length % 4) |
181 | no_chunk = 0; | 181 | no_chunk = 0; |
182 | out_sg = sg_next(out_sg); | 182 | out_sg = sg_next(out_sg); |
183 | } | 183 | } |
@@ -190,9 +190,9 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | |||
190 | for (i = 0; i < op->keylen; i += 4) | 190 | for (i = 0; i < op->keylen; i += 4) |
191 | writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); | 191 | writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); |
192 | 192 | ||
193 | if (areq->info) { | 193 | if (areq->iv) { |
194 | for (i = 0; i < 4 && i < ivsize / 4; i++) { | 194 | for (i = 0; i < 4 && i < ivsize / 4; i++) { |
195 | v = *(u32 *)(areq->info + i * 4); | 195 | v = *(u32 *)(areq->iv + i * 4); |
196 | writel(v, ss->base + SS_IV0 + i * 4); | 196 | writel(v, ss->base + SS_IV0 + i * 4); |
197 | } | 197 | } |
198 | } | 198 | } |
@@ -209,19 +209,19 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | |||
209 | err = -EINVAL; | 209 | err = -EINVAL; |
210 | goto release_ss; | 210 | goto release_ss; |
211 | } | 211 | } |
212 | ileft = areq->nbytes; | 212 | ileft = areq->cryptlen; |
213 | oleft = areq->nbytes; | 213 | oleft = areq->cryptlen; |
214 | oi = 0; | 214 | oi = 0; |
215 | oo = 0; | 215 | oo = 0; |
216 | 216 | ||
217 | while (oleft > 0) { | 217 | while (oleft) { |
218 | if (ileft > 0) { | 218 | if (ileft) { |
219 | /* | 219 | /* |
220 | * todo is the number of consecutive 4byte word that we | 220 | * todo is the number of consecutive 4byte word that we |
221 | * can read from current SG | 221 | * can read from current SG |
222 | */ | 222 | */ |
223 | todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); | 223 | todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); |
224 | if (todo > 0 && ob == 0) { | 224 | if (todo && !ob) { |
225 | writesl(ss->base + SS_RXFIFO, mi.addr + oi, | 225 | writesl(ss->base + SS_RXFIFO, mi.addr + oi, |
226 | todo); | 226 | todo); |
227 | ileft -= todo * 4; | 227 | ileft -= todo * 4; |
@@ -240,7 +240,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | |||
240 | ileft -= todo; | 240 | ileft -= todo; |
241 | oi += todo; | 241 | oi += todo; |
242 | ob += todo; | 242 | ob += todo; |
243 | if (ob % 4 == 0) { | 243 | if (!(ob % 4)) { |
244 | writesl(ss->base + SS_RXFIFO, buf, | 244 | writesl(ss->base + SS_RXFIFO, buf, |
245 | ob / 4); | 245 | ob / 4); |
246 | ob = 0; | 246 | ob = 0; |
@@ -257,14 +257,14 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | |||
257 | tx_cnt = SS_TXFIFO_SPACES(spaces); | 257 | tx_cnt = SS_TXFIFO_SPACES(spaces); |
258 | dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n", | 258 | dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n", |
259 | mode, | 259 | mode, |
260 | oi, mi.length, ileft, areq->nbytes, rx_cnt, | 260 | oi, mi.length, ileft, areq->cryptlen, rx_cnt, |
261 | oo, mo.length, oleft, areq->nbytes, tx_cnt, ob); | 261 | oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob); |
262 | 262 | ||
263 | if (tx_cnt == 0) | 263 | if (!tx_cnt) |
264 | continue; | 264 | continue; |
265 | /* todo in 4bytes word */ | 265 | /* todo in 4bytes word */ |
266 | todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); | 266 | todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); |
267 | if (todo > 0) { | 267 | if (todo) { |
268 | readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); | 268 | readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); |
269 | oleft -= todo * 4; | 269 | oleft -= todo * 4; |
270 | oo += todo * 4; | 270 | oo += todo * 4; |
@@ -300,10 +300,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) | |||
300 | /* bufo must be fully used here */ | 300 | /* bufo must be fully used here */ |
301 | } | 301 | } |
302 | } | 302 | } |
303 | if (areq->info) { | 303 | if (areq->iv) { |
304 | for (i = 0; i < 4 && i < ivsize / 4; i++) { | 304 | for (i = 0; i < 4 && i < ivsize / 4; i++) { |
305 | v = readl(ss->base + SS_IV0 + i * 4); | 305 | v = readl(ss->base + SS_IV0 + i * 4); |
306 | *(u32 *)(areq->info + i * 4) = v; | 306 | *(u32 *)(areq->iv + i * 4) = v; |
307 | } | 307 | } |
308 | } | 308 | } |
309 | 309 | ||
@@ -317,22 +317,22 @@ release_ss: | |||
317 | } | 317 | } |
318 | 318 | ||
319 | /* CBC AES */ | 319 | /* CBC AES */ |
320 | int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq) | 320 | int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq) |
321 | { | 321 | { |
322 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 322 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
323 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 323 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
324 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 324 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
325 | 325 | ||
326 | rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | | 326 | rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | |
327 | op->keymode; | 327 | op->keymode; |
328 | return sun4i_ss_cipher_poll(areq); | 328 | return sun4i_ss_cipher_poll(areq); |
329 | } | 329 | } |
330 | 330 | ||
331 | int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq) | 331 | int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq) |
332 | { | 332 | { |
333 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 333 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
334 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 334 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
335 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 335 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
336 | 336 | ||
337 | rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION | | 337 | rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION | |
338 | op->keymode; | 338 | op->keymode; |
@@ -340,22 +340,22 @@ int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq) | |||
340 | } | 340 | } |
341 | 341 | ||
342 | /* ECB AES */ | 342 | /* ECB AES */ |
343 | int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq) | 343 | int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq) |
344 | { | 344 | { |
345 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 345 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
346 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 346 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
347 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 347 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
348 | 348 | ||
349 | rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | | 349 | rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | |
350 | op->keymode; | 350 | op->keymode; |
351 | return sun4i_ss_cipher_poll(areq); | 351 | return sun4i_ss_cipher_poll(areq); |
352 | } | 352 | } |
353 | 353 | ||
354 | int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq) | 354 | int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq) |
355 | { | 355 | { |
356 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 356 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
357 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 357 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
358 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 358 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
359 | 359 | ||
360 | rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION | | 360 | rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION | |
361 | op->keymode; | 361 | op->keymode; |
@@ -363,22 +363,22 @@ int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq) | |||
363 | } | 363 | } |
364 | 364 | ||
365 | /* CBC DES */ | 365 | /* CBC DES */ |
366 | int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq) | 366 | int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq) |
367 | { | 367 | { |
368 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 368 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
369 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 369 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
370 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 370 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
371 | 371 | ||
372 | rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | | 372 | rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | |
373 | op->keymode; | 373 | op->keymode; |
374 | return sun4i_ss_cipher_poll(areq); | 374 | return sun4i_ss_cipher_poll(areq); |
375 | } | 375 | } |
376 | 376 | ||
377 | int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq) | 377 | int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq) |
378 | { | 378 | { |
379 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 379 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
380 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 380 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
381 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 381 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
382 | 382 | ||
383 | rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | | 383 | rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | |
384 | op->keymode; | 384 | op->keymode; |
@@ -386,22 +386,22 @@ int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq) | |||
386 | } | 386 | } |
387 | 387 | ||
388 | /* ECB DES */ | 388 | /* ECB DES */ |
389 | int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq) | 389 | int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq) |
390 | { | 390 | { |
391 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 391 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
392 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 392 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
393 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 393 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
394 | 394 | ||
395 | rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | | 395 | rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | |
396 | op->keymode; | 396 | op->keymode; |
397 | return sun4i_ss_cipher_poll(areq); | 397 | return sun4i_ss_cipher_poll(areq); |
398 | } | 398 | } |
399 | 399 | ||
400 | int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq) | 400 | int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq) |
401 | { | 401 | { |
402 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 402 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
403 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 403 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
404 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 404 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
405 | 405 | ||
406 | rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | | 406 | rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | |
407 | op->keymode; | 407 | op->keymode; |
@@ -409,22 +409,22 @@ int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq) | |||
409 | } | 409 | } |
410 | 410 | ||
411 | /* CBC 3DES */ | 411 | /* CBC 3DES */ |
412 | int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq) | 412 | int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq) |
413 | { | 413 | { |
414 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 414 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
415 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 415 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
416 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 416 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
417 | 417 | ||
418 | rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | | 418 | rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | |
419 | op->keymode; | 419 | op->keymode; |
420 | return sun4i_ss_cipher_poll(areq); | 420 | return sun4i_ss_cipher_poll(areq); |
421 | } | 421 | } |
422 | 422 | ||
423 | int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq) | 423 | int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq) |
424 | { | 424 | { |
425 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 425 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
426 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 426 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
427 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 427 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
428 | 428 | ||
429 | rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | | 429 | rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | |
430 | op->keymode; | 430 | op->keymode; |
@@ -432,22 +432,22 @@ int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq) | |||
432 | } | 432 | } |
433 | 433 | ||
434 | /* ECB 3DES */ | 434 | /* ECB 3DES */ |
435 | int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq) | 435 | int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq) |
436 | { | 436 | { |
437 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 437 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
438 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 438 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
439 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 439 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
440 | 440 | ||
441 | rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | | 441 | rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | |
442 | op->keymode; | 442 | op->keymode; |
443 | return sun4i_ss_cipher_poll(areq); | 443 | return sun4i_ss_cipher_poll(areq); |
444 | } | 444 | } |
445 | 445 | ||
446 | int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq) | 446 | int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq) |
447 | { | 447 | { |
448 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); | 448 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); |
449 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 449 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
450 | struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); | 450 | struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); |
451 | 451 | ||
452 | rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | | 452 | rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | |
453 | op->keymode; | 453 | op->keymode; |
@@ -457,24 +457,25 @@ int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq) | |||
457 | int sun4i_ss_cipher_init(struct crypto_tfm *tfm) | 457 | int sun4i_ss_cipher_init(struct crypto_tfm *tfm) |
458 | { | 458 | { |
459 | struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); | 459 | struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); |
460 | struct crypto_alg *alg = tfm->__crt_alg; | ||
461 | struct sun4i_ss_alg_template *algt; | 460 | struct sun4i_ss_alg_template *algt; |
462 | 461 | ||
463 | memset(op, 0, sizeof(struct sun4i_tfm_ctx)); | 462 | memset(op, 0, sizeof(struct sun4i_tfm_ctx)); |
464 | 463 | ||
465 | algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); | 464 | algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template, |
465 | alg.crypto.base); | ||
466 | op->ss = algt->ss; | 466 | op->ss = algt->ss; |
467 | 467 | ||
468 | tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx); | 468 | crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), |
469 | sizeof(struct sun4i_cipher_req_ctx)); | ||
469 | 470 | ||
470 | return 0; | 471 | return 0; |
471 | } | 472 | } |
472 | 473 | ||
473 | /* check and set the AES key, prepare the mode to be used */ | 474 | /* check and set the AES key, prepare the mode to be used */ |
474 | int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 475 | int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, |
475 | unsigned int keylen) | 476 | unsigned int keylen) |
476 | { | 477 | { |
477 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 478 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
478 | struct sun4i_ss_ctx *ss = op->ss; | 479 | struct sun4i_ss_ctx *ss = op->ss; |
479 | 480 | ||
480 | switch (keylen) { | 481 | switch (keylen) { |
@@ -489,7 +490,7 @@ int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
489 | break; | 490 | break; |
490 | default: | 491 | default: |
491 | dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen); | 492 | dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen); |
492 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 493 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
493 | return -EINVAL; | 494 | return -EINVAL; |
494 | } | 495 | } |
495 | op->keylen = keylen; | 496 | op->keylen = keylen; |
@@ -498,10 +499,10 @@ int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
498 | } | 499 | } |
499 | 500 | ||
500 | /* check and set the DES key, prepare the mode to be used */ | 501 | /* check and set the DES key, prepare the mode to be used */ |
501 | int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 502 | int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, |
502 | unsigned int keylen) | 503 | unsigned int keylen) |
503 | { | 504 | { |
504 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 505 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
505 | struct sun4i_ss_ctx *ss = op->ss; | 506 | struct sun4i_ss_ctx *ss = op->ss; |
506 | u32 flags; | 507 | u32 flags; |
507 | u32 tmp[DES_EXPKEY_WORDS]; | 508 | u32 tmp[DES_EXPKEY_WORDS]; |
@@ -509,15 +510,15 @@ int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
509 | 510 | ||
510 | if (unlikely(keylen != DES_KEY_SIZE)) { | 511 | if (unlikely(keylen != DES_KEY_SIZE)) { |
511 | dev_err(ss->dev, "Invalid keylen %u\n", keylen); | 512 | dev_err(ss->dev, "Invalid keylen %u\n", keylen); |
512 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 513 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
513 | return -EINVAL; | 514 | return -EINVAL; |
514 | } | 515 | } |
515 | 516 | ||
516 | flags = crypto_ablkcipher_get_flags(tfm); | 517 | flags = crypto_skcipher_get_flags(tfm); |
517 | 518 | ||
518 | ret = des_ekey(tmp, key); | 519 | ret = des_ekey(tmp, key); |
519 | if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | 520 | if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) { |
520 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); | 521 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); |
521 | dev_dbg(ss->dev, "Weak key %u\n", keylen); | 522 | dev_dbg(ss->dev, "Weak key %u\n", keylen); |
522 | return -EINVAL; | 523 | return -EINVAL; |
523 | } | 524 | } |
@@ -528,15 +529,15 @@ int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | |||
528 | } | 529 | } |
529 | 530 | ||
530 | /* check and set the 3DES key, prepare the mode to be used */ | 531 | /* check and set the 3DES key, prepare the mode to be used */ |
531 | int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 532 | int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, |
532 | unsigned int keylen) | 533 | unsigned int keylen) |
533 | { | 534 | { |
534 | struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); | 535 | struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); |
535 | struct sun4i_ss_ctx *ss = op->ss; | 536 | struct sun4i_ss_ctx *ss = op->ss; |
536 | 537 | ||
537 | if (unlikely(keylen != 3 * DES_KEY_SIZE)) { | 538 | if (unlikely(keylen != 3 * DES_KEY_SIZE)) { |
538 | dev_err(ss->dev, "Invalid keylen %u\n", keylen); | 539 | dev_err(ss->dev, "Invalid keylen %u\n", keylen); |
539 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 540 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
540 | return -EINVAL; | 541 | return -EINVAL; |
541 | } | 542 | } |
542 | op->keylen = keylen; | 543 | op->keylen = keylen; |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 3ac6c6c4ad18..02ad8256e900 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c | |||
@@ -83,134 +83,133 @@ static struct sun4i_ss_alg_template ss_algs[] = { | |||
83 | } | 83 | } |
84 | } | 84 | } |
85 | }, | 85 | }, |
86 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 86 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, |
87 | .alg.crypto = { | 87 | .alg.crypto = { |
88 | .cra_name = "cbc(aes)", | 88 | .setkey = sun4i_ss_aes_setkey, |
89 | .cra_driver_name = "cbc-aes-sun4i-ss", | 89 | .encrypt = sun4i_ss_cbc_aes_encrypt, |
90 | .cra_priority = 300, | 90 | .decrypt = sun4i_ss_cbc_aes_decrypt, |
91 | .cra_blocksize = AES_BLOCK_SIZE, | 91 | .min_keysize = AES_MIN_KEY_SIZE, |
92 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | 92 | .max_keysize = AES_MAX_KEY_SIZE, |
93 | .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), | 93 | .ivsize = AES_BLOCK_SIZE, |
94 | .cra_module = THIS_MODULE, | 94 | .base = { |
95 | .cra_alignmask = 3, | 95 | .cra_name = "cbc(aes)", |
96 | .cra_type = &crypto_ablkcipher_type, | 96 | .cra_driver_name = "cbc-aes-sun4i-ss", |
97 | .cra_init = sun4i_ss_cipher_init, | 97 | .cra_priority = 300, |
98 | .cra_ablkcipher = { | 98 | .cra_blocksize = AES_BLOCK_SIZE, |
99 | .min_keysize = AES_MIN_KEY_SIZE, | 99 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | |
100 | .max_keysize = AES_MAX_KEY_SIZE, | 100 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
101 | .ivsize = AES_BLOCK_SIZE, | 101 | .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), |
102 | .setkey = sun4i_ss_aes_setkey, | 102 | .cra_module = THIS_MODULE, |
103 | .encrypt = sun4i_ss_cbc_aes_encrypt, | 103 | .cra_alignmask = 3, |
104 | .decrypt = sun4i_ss_cbc_aes_decrypt, | 104 | .cra_init = sun4i_ss_cipher_init, |
105 | } | 105 | } |
106 | } | 106 | } |
107 | }, | 107 | }, |
108 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 108 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, |
109 | .alg.crypto = { | 109 | .alg.crypto = { |
110 | .cra_name = "ecb(aes)", | 110 | .setkey = sun4i_ss_aes_setkey, |
111 | .cra_driver_name = "ecb-aes-sun4i-ss", | 111 | .encrypt = sun4i_ss_ecb_aes_encrypt, |
112 | .cra_priority = 300, | 112 | .decrypt = sun4i_ss_ecb_aes_decrypt, |
113 | .cra_blocksize = AES_BLOCK_SIZE, | 113 | .min_keysize = AES_MIN_KEY_SIZE, |
114 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | 114 | .max_keysize = AES_MAX_KEY_SIZE, |
115 | .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), | 115 | .ivsize = AES_BLOCK_SIZE, |
116 | .cra_module = THIS_MODULE, | 116 | .base = { |
117 | .cra_alignmask = 3, | 117 | .cra_name = "ecb(aes)", |
118 | .cra_type = &crypto_ablkcipher_type, | 118 | .cra_driver_name = "ecb-aes-sun4i-ss", |
119 | .cra_init = sun4i_ss_cipher_init, | 119 | .cra_priority = 300, |
120 | .cra_ablkcipher = { | 120 | .cra_blocksize = AES_BLOCK_SIZE, |
121 | .min_keysize = AES_MIN_KEY_SIZE, | 121 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | |
122 | .max_keysize = AES_MAX_KEY_SIZE, | 122 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
123 | .ivsize = AES_BLOCK_SIZE, | 123 | .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), |
124 | .setkey = sun4i_ss_aes_setkey, | 124 | .cra_module = THIS_MODULE, |
125 | .encrypt = sun4i_ss_ecb_aes_encrypt, | 125 | .cra_alignmask = 3, |
126 | .decrypt = sun4i_ss_ecb_aes_decrypt, | 126 | .cra_init = sun4i_ss_cipher_init, |
127 | } | 127 | } |
128 | } | 128 | } |
129 | }, | 129 | }, |
130 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 130 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, |
131 | .alg.crypto = { | 131 | .alg.crypto = { |
132 | .cra_name = "cbc(des)", | 132 | .setkey = sun4i_ss_des_setkey, |
133 | .cra_driver_name = "cbc-des-sun4i-ss", | 133 | .encrypt = sun4i_ss_cbc_des_encrypt, |
134 | .cra_priority = 300, | 134 | .decrypt = sun4i_ss_cbc_des_decrypt, |
135 | .cra_blocksize = DES_BLOCK_SIZE, | 135 | .min_keysize = DES_KEY_SIZE, |
136 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | 136 | .max_keysize = DES_KEY_SIZE, |
137 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | 137 | .ivsize = DES_BLOCK_SIZE, |
138 | .cra_module = THIS_MODULE, | 138 | .base = { |
139 | .cra_alignmask = 3, | 139 | .cra_name = "cbc(des)", |
140 | .cra_type = &crypto_ablkcipher_type, | 140 | .cra_driver_name = "cbc-des-sun4i-ss", |
141 | .cra_init = sun4i_ss_cipher_init, | 141 | .cra_priority = 300, |
142 | .cra_u.ablkcipher = { | 142 | .cra_blocksize = DES_BLOCK_SIZE, |
143 | .min_keysize = DES_KEY_SIZE, | 143 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | |
144 | .max_keysize = DES_KEY_SIZE, | 144 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
145 | .ivsize = DES_BLOCK_SIZE, | 145 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), |
146 | .setkey = sun4i_ss_des_setkey, | 146 | .cra_module = THIS_MODULE, |
147 | .encrypt = sun4i_ss_cbc_des_encrypt, | 147 | .cra_alignmask = 3, |
148 | .decrypt = sun4i_ss_cbc_des_decrypt, | 148 | .cra_init = sun4i_ss_cipher_init, |
149 | } | 149 | } |
150 | } | 150 | } |
151 | }, | 151 | }, |
152 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 152 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, |
153 | .alg.crypto = { | 153 | .alg.crypto = { |
154 | .cra_name = "ecb(des)", | 154 | .setkey = sun4i_ss_des_setkey, |
155 | .cra_driver_name = "ecb-des-sun4i-ss", | 155 | .encrypt = sun4i_ss_ecb_des_encrypt, |
156 | .cra_priority = 300, | 156 | .decrypt = sun4i_ss_ecb_des_decrypt, |
157 | .cra_blocksize = DES_BLOCK_SIZE, | 157 | .min_keysize = DES_KEY_SIZE, |
158 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | 158 | .max_keysize = DES_KEY_SIZE, |
159 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | 159 | .base = { |
160 | .cra_module = THIS_MODULE, | 160 | .cra_name = "ecb(des)", |
161 | .cra_alignmask = 3, | 161 | .cra_driver_name = "ecb-des-sun4i-ss", |
162 | .cra_type = &crypto_ablkcipher_type, | 162 | .cra_priority = 300, |
163 | .cra_init = sun4i_ss_cipher_init, | 163 | .cra_blocksize = DES_BLOCK_SIZE, |
164 | .cra_u.ablkcipher = { | 164 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | |
165 | .min_keysize = DES_KEY_SIZE, | 165 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
166 | .max_keysize = DES_KEY_SIZE, | 166 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), |
167 | .setkey = sun4i_ss_des_setkey, | 167 | .cra_module = THIS_MODULE, |
168 | .encrypt = sun4i_ss_ecb_des_encrypt, | 168 | .cra_alignmask = 3, |
169 | .decrypt = sun4i_ss_ecb_des_decrypt, | 169 | .cra_init = sun4i_ss_cipher_init, |
170 | } | 170 | } |
171 | } | 171 | } |
172 | }, | 172 | }, |
173 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 173 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, |
174 | .alg.crypto = { | 174 | .alg.crypto = { |
175 | .cra_name = "cbc(des3_ede)", | 175 | .setkey = sun4i_ss_des3_setkey, |
176 | .cra_driver_name = "cbc-des3-sun4i-ss", | 176 | .encrypt = sun4i_ss_cbc_des3_encrypt, |
177 | .cra_priority = 300, | 177 | .decrypt = sun4i_ss_cbc_des3_decrypt, |
178 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 178 | .min_keysize = DES3_EDE_KEY_SIZE, |
179 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | 179 | .max_keysize = DES3_EDE_KEY_SIZE, |
180 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | 180 | .ivsize = DES3_EDE_BLOCK_SIZE, |
181 | .cra_module = THIS_MODULE, | 181 | .base = { |
182 | .cra_alignmask = 3, | 182 | .cra_name = "cbc(des3_ede)", |
183 | .cra_type = &crypto_ablkcipher_type, | 183 | .cra_driver_name = "cbc-des3-sun4i-ss", |
184 | .cra_init = sun4i_ss_cipher_init, | 184 | .cra_priority = 300, |
185 | .cra_u.ablkcipher = { | 185 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
186 | .min_keysize = DES3_EDE_KEY_SIZE, | 186 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | |
187 | .max_keysize = DES3_EDE_KEY_SIZE, | 187 | CRYPTO_ALG_KERN_DRIVER_ONLY, |
188 | .ivsize = DES3_EDE_BLOCK_SIZE, | 188 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), |
189 | .setkey = sun4i_ss_des3_setkey, | 189 | .cra_module = THIS_MODULE, |
190 | .encrypt = sun4i_ss_cbc_des3_encrypt, | 190 | .cra_alignmask = 3, |
191 | .decrypt = sun4i_ss_cbc_des3_decrypt, | 191 | .cra_init = sun4i_ss_cipher_init, |
192 | } | 192 | } |
193 | } | 193 | } |
194 | }, | 194 | }, |
195 | { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, | 195 | { .type = CRYPTO_ALG_TYPE_SKCIPHER, |
196 | .alg.crypto = { | 196 | .alg.crypto = { |
197 | .cra_name = "ecb(des3_ede)", | 197 | .setkey = sun4i_ss_des3_setkey, |
198 | .cra_driver_name = "ecb-des3-sun4i-ss", | 198 | .encrypt = sun4i_ss_ecb_des3_encrypt, |
199 | .cra_priority = 300, | 199 | .decrypt = sun4i_ss_ecb_des3_decrypt, |
200 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | 200 | .min_keysize = DES3_EDE_KEY_SIZE, |
201 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, | 201 | .max_keysize = DES3_EDE_KEY_SIZE, |
202 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), | 202 | .ivsize = DES3_EDE_BLOCK_SIZE, |
203 | .cra_module = THIS_MODULE, | 203 | .base = { |
204 | .cra_alignmask = 3, | 204 | .cra_name = "ecb(des3_ede)", |
205 | .cra_type = &crypto_ablkcipher_type, | 205 | .cra_driver_name = "ecb-des3-sun4i-ss", |
206 | .cra_init = sun4i_ss_cipher_init, | 206 | .cra_priority = 300, |
207 | .cra_u.ablkcipher = { | 207 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, |
208 | .min_keysize = DES3_EDE_KEY_SIZE, | 208 | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER, |
209 | .max_keysize = DES3_EDE_KEY_SIZE, | 209 | .cra_ctxsize = sizeof(struct sun4i_req_ctx), |
210 | .ivsize = DES3_EDE_BLOCK_SIZE, | 210 | .cra_module = THIS_MODULE, |
211 | .setkey = sun4i_ss_des3_setkey, | 211 | .cra_alignmask = 3, |
212 | .encrypt = sun4i_ss_ecb_des3_encrypt, | 212 | .cra_init = sun4i_ss_cipher_init, |
213 | .decrypt = sun4i_ss_ecb_des3_decrypt, | ||
214 | } | 213 | } |
215 | } | 214 | } |
216 | }, | 215 | }, |
@@ -266,12 +265,12 @@ static int sun4i_ss_probe(struct platform_device *pdev) | |||
266 | 265 | ||
267 | /* Enable both clocks */ | 266 | /* Enable both clocks */ |
268 | err = clk_prepare_enable(ss->busclk); | 267 | err = clk_prepare_enable(ss->busclk); |
269 | if (err != 0) { | 268 | if (err) { |
270 | dev_err(&pdev->dev, "Cannot prepare_enable busclk\n"); | 269 | dev_err(&pdev->dev, "Cannot prepare_enable busclk\n"); |
271 | return err; | 270 | return err; |
272 | } | 271 | } |
273 | err = clk_prepare_enable(ss->ssclk); | 272 | err = clk_prepare_enable(ss->ssclk); |
274 | if (err != 0) { | 273 | if (err) { |
275 | dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n"); | 274 | dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n"); |
276 | goto error_ssclk; | 275 | goto error_ssclk; |
277 | } | 276 | } |
@@ -281,7 +280,7 @@ static int sun4i_ss_probe(struct platform_device *pdev) | |||
281 | * Try to set the clock to the maximum allowed | 280 | * Try to set the clock to the maximum allowed |
282 | */ | 281 | */ |
283 | err = clk_set_rate(ss->ssclk, cr_mod); | 282 | err = clk_set_rate(ss->ssclk, cr_mod); |
284 | if (err != 0) { | 283 | if (err) { |
285 | dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n"); | 284 | dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n"); |
286 | goto error_clk; | 285 | goto error_clk; |
287 | } | 286 | } |
@@ -340,17 +339,17 @@ static int sun4i_ss_probe(struct platform_device *pdev) | |||
340 | for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { | 339 | for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { |
341 | ss_algs[i].ss = ss; | 340 | ss_algs[i].ss = ss; |
342 | switch (ss_algs[i].type) { | 341 | switch (ss_algs[i].type) { |
343 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 342 | case CRYPTO_ALG_TYPE_SKCIPHER: |
344 | err = crypto_register_alg(&ss_algs[i].alg.crypto); | 343 | err = crypto_register_skcipher(&ss_algs[i].alg.crypto); |
345 | if (err != 0) { | 344 | if (err) { |
346 | dev_err(ss->dev, "Fail to register %s\n", | 345 | dev_err(ss->dev, "Fail to register %s\n", |
347 | ss_algs[i].alg.crypto.cra_name); | 346 | ss_algs[i].alg.crypto.base.cra_name); |
348 | goto error_alg; | 347 | goto error_alg; |
349 | } | 348 | } |
350 | break; | 349 | break; |
351 | case CRYPTO_ALG_TYPE_AHASH: | 350 | case CRYPTO_ALG_TYPE_AHASH: |
352 | err = crypto_register_ahash(&ss_algs[i].alg.hash); | 351 | err = crypto_register_ahash(&ss_algs[i].alg.hash); |
353 | if (err != 0) { | 352 | if (err) { |
354 | dev_err(ss->dev, "Fail to register %s\n", | 353 | dev_err(ss->dev, "Fail to register %s\n", |
355 | ss_algs[i].alg.hash.halg.base.cra_name); | 354 | ss_algs[i].alg.hash.halg.base.cra_name); |
356 | goto error_alg; | 355 | goto error_alg; |
@@ -364,8 +363,8 @@ error_alg: | |||
364 | i--; | 363 | i--; |
365 | for (; i >= 0; i--) { | 364 | for (; i >= 0; i--) { |
366 | switch (ss_algs[i].type) { | 365 | switch (ss_algs[i].type) { |
367 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 366 | case CRYPTO_ALG_TYPE_SKCIPHER: |
368 | crypto_unregister_alg(&ss_algs[i].alg.crypto); | 367 | crypto_unregister_skcipher(&ss_algs[i].alg.crypto); |
369 | break; | 368 | break; |
370 | case CRYPTO_ALG_TYPE_AHASH: | 369 | case CRYPTO_ALG_TYPE_AHASH: |
371 | crypto_unregister_ahash(&ss_algs[i].alg.hash); | 370 | crypto_unregister_ahash(&ss_algs[i].alg.hash); |
@@ -388,8 +387,8 @@ static int sun4i_ss_remove(struct platform_device *pdev) | |||
388 | 387 | ||
389 | for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { | 388 | for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { |
390 | switch (ss_algs[i].type) { | 389 | switch (ss_algs[i].type) { |
391 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | 390 | case CRYPTO_ALG_TYPE_SKCIPHER: |
392 | crypto_unregister_alg(&ss_algs[i].alg.crypto); | 391 | crypto_unregister_skcipher(&ss_algs[i].alg.crypto); |
393 | break; | 392 | break; |
394 | case CRYPTO_ALG_TYPE_AHASH: | 393 | case CRYPTO_ALG_TYPE_AHASH: |
395 | crypto_unregister_ahash(&ss_algs[i].alg.hash); | 394 | crypto_unregister_ahash(&ss_algs[i].alg.hash); |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index 0de2f62d51ff..a4b5ff2b72f8 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c | |||
@@ -60,7 +60,7 @@ int sun4i_hash_export_md5(struct ahash_request *areq, void *out) | |||
60 | 60 | ||
61 | memcpy(octx->block, op->buf, op->len); | 61 | memcpy(octx->block, op->buf, op->len); |
62 | 62 | ||
63 | if (op->byte_count > 0) { | 63 | if (op->byte_count) { |
64 | for (i = 0; i < 4; i++) | 64 | for (i = 0; i < 4; i++) |
65 | octx->hash[i] = op->hash[i]; | 65 | octx->hash[i] = op->hash[i]; |
66 | } else { | 66 | } else { |
@@ -102,7 +102,7 @@ int sun4i_hash_export_sha1(struct ahash_request *areq, void *out) | |||
102 | 102 | ||
103 | memcpy(octx->buffer, op->buf, op->len); | 103 | memcpy(octx->buffer, op->buf, op->len); |
104 | 104 | ||
105 | if (op->byte_count > 0) { | 105 | if (op->byte_count) { |
106 | for (i = 0; i < 5; i++) | 106 | for (i = 0; i < 5; i++) |
107 | octx->state[i] = op->hash[i]; | 107 | octx->state[i] = op->hash[i]; |
108 | } else { | 108 | } else { |
@@ -167,44 +167,34 @@ int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) | |||
167 | */ | 167 | */ |
168 | static int sun4i_hash(struct ahash_request *areq) | 168 | static int sun4i_hash(struct ahash_request *areq) |
169 | { | 169 | { |
170 | u32 v, ivmode = 0; | ||
171 | unsigned int i = 0; | ||
172 | /* | 170 | /* |
173 | * i is the total bytes read from SGs, to be compared to areq->nbytes | 171 | * i is the total bytes read from SGs, to be compared to areq->nbytes |
174 | * i is important because we cannot rely on SG length since the sum of | 172 | * i is important because we cannot rely on SG length since the sum of |
175 | * SG->length could be greater than areq->nbytes | 173 | * SG->length could be greater than areq->nbytes |
174 | * | ||
175 | * end is the position when we need to stop writing to the device, | ||
176 | * to be compared to i | ||
177 | * | ||
178 | * in_i: advancement in the current SG | ||
176 | */ | 179 | */ |
177 | 180 | unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo; | |
181 | unsigned int in_i = 0; | ||
182 | u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0; | ||
178 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | 183 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); |
179 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | 184 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); |
180 | struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); | 185 | struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); |
181 | struct sun4i_ss_ctx *ss = tfmctx->ss; | 186 | struct sun4i_ss_ctx *ss = tfmctx->ss; |
182 | unsigned int in_i = 0; /* advancement in the current SG */ | 187 | struct scatterlist *in_sg = areq->src; |
183 | unsigned int end; | 188 | struct sg_mapping_iter mi; |
184 | /* | ||
185 | * end is the position when we need to stop writing to the device, | ||
186 | * to be compared to i | ||
187 | */ | ||
188 | int in_r, err = 0; | 189 | int in_r, err = 0; |
189 | unsigned int todo; | ||
190 | u32 spaces, rx_cnt = SS_RX_DEFAULT; | ||
191 | size_t copied = 0; | 190 | size_t copied = 0; |
192 | struct sg_mapping_iter mi; | ||
193 | unsigned int j = 0; | ||
194 | int zeros; | ||
195 | unsigned int index, padlen; | ||
196 | __be64 bits; | ||
197 | u32 bf[32]; | ||
198 | u32 wb = 0; | ||
199 | unsigned int nwait, nbw = 0; | ||
200 | struct scatterlist *in_sg = areq->src; | ||
201 | 191 | ||
202 | dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", | 192 | dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", |
203 | __func__, crypto_tfm_alg_name(areq->base.tfm), | 193 | __func__, crypto_tfm_alg_name(areq->base.tfm), |
204 | op->byte_count, areq->nbytes, op->mode, | 194 | op->byte_count, areq->nbytes, op->mode, |
205 | op->len, op->hash[0]); | 195 | op->len, op->hash[0]); |
206 | 196 | ||
207 | if (unlikely(areq->nbytes == 0) && (op->flags & SS_HASH_FINAL) == 0) | 197 | if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL)) |
208 | return 0; | 198 | return 0; |
209 | 199 | ||
210 | /* protect against overflow */ | 200 | /* protect against overflow */ |
@@ -213,7 +203,7 @@ static int sun4i_hash(struct ahash_request *areq) | |||
213 | return -EINVAL; | 203 | return -EINVAL; |
214 | } | 204 | } |
215 | 205 | ||
216 | if (op->len + areq->nbytes < 64 && (op->flags & SS_HASH_FINAL) == 0) { | 206 | if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) { |
217 | /* linearize data to op->buf */ | 207 | /* linearize data to op->buf */ |
218 | copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), | 208 | copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), |
219 | op->buf + op->len, areq->nbytes, 0); | 209 | op->buf + op->len, areq->nbytes, 0); |
@@ -227,7 +217,7 @@ static int sun4i_hash(struct ahash_request *areq) | |||
227 | * if some data have been processed before, | 217 | * if some data have been processed before, |
228 | * we need to restore the partial hash state | 218 | * we need to restore the partial hash state |
229 | */ | 219 | */ |
230 | if (op->byte_count > 0) { | 220 | if (op->byte_count) { |
231 | ivmode = SS_IV_ARBITRARY; | 221 | ivmode = SS_IV_ARBITRARY; |
232 | for (i = 0; i < 5; i++) | 222 | for (i = 0; i < 5; i++) |
233 | writel(op->hash[i], ss->base + SS_IV0 + i * 4); | 223 | writel(op->hash[i], ss->base + SS_IV0 + i * 4); |
@@ -235,11 +225,11 @@ static int sun4i_hash(struct ahash_request *areq) | |||
235 | /* Enable the device */ | 225 | /* Enable the device */ |
236 | writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); | 226 | writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); |
237 | 227 | ||
238 | if ((op->flags & SS_HASH_UPDATE) == 0) | 228 | if (!(op->flags & SS_HASH_UPDATE)) |
239 | goto hash_final; | 229 | goto hash_final; |
240 | 230 | ||
241 | /* start of handling data */ | 231 | /* start of handling data */ |
242 | if ((op->flags & SS_HASH_FINAL) == 0) { | 232 | if (!(op->flags & SS_HASH_FINAL)) { |
243 | end = ((areq->nbytes + op->len) / 64) * 64 - op->len; | 233 | end = ((areq->nbytes + op->len) / 64) * 64 - op->len; |
244 | 234 | ||
245 | if (end > areq->nbytes || areq->nbytes - end > 63) { | 235 | if (end > areq->nbytes || areq->nbytes - end > 63) { |
@@ -253,14 +243,14 @@ static int sun4i_hash(struct ahash_request *areq) | |||
253 | end = ((areq->nbytes + op->len) / 4) * 4 - op->len; | 243 | end = ((areq->nbytes + op->len) / 4) * 4 - op->len; |
254 | } | 244 | } |
255 | 245 | ||
256 | /* TODO if SGlen % 4 and op->len == 0 then DMA */ | 246 | /* TODO if SGlen % 4 and !op->len then DMA */ |
257 | i = 1; | 247 | i = 1; |
258 | while (in_sg && i == 1) { | 248 | while (in_sg && i == 1) { |
259 | if ((in_sg->length % 4) != 0) | 249 | if (in_sg->length % 4) |
260 | i = 0; | 250 | i = 0; |
261 | in_sg = sg_next(in_sg); | 251 | in_sg = sg_next(in_sg); |
262 | } | 252 | } |
263 | if (i == 1 && op->len == 0) | 253 | if (i == 1 && !op->len && areq->nbytes) |
264 | dev_dbg(ss->dev, "We can DMA\n"); | 254 | dev_dbg(ss->dev, "We can DMA\n"); |
265 | 255 | ||
266 | i = 0; | 256 | i = 0; |
@@ -275,7 +265,7 @@ static int sun4i_hash(struct ahash_request *areq) | |||
275 | * - the buffer is already used | 265 | * - the buffer is already used |
276 | * - the SG does not have enough byte remaining ( < 4) | 266 | * - the SG does not have enough byte remaining ( < 4) |
277 | */ | 267 | */ |
278 | if (op->len > 0 || (mi.length - in_i) < 4) { | 268 | if (op->len || (mi.length - in_i) < 4) { |
279 | /* | 269 | /* |
280 | * if we have entered here we have two reason to stop | 270 | * if we have entered here we have two reason to stop |
281 | * - the buffer is full | 271 | * - the buffer is full |
@@ -294,7 +284,7 @@ static int sun4i_hash(struct ahash_request *areq) | |||
294 | in_i = 0; | 284 | in_i = 0; |
295 | } | 285 | } |
296 | } | 286 | } |
297 | if (op->len > 3 && (op->len % 4) == 0) { | 287 | if (op->len > 3 && !(op->len % 4)) { |
298 | /* write buf to the device */ | 288 | /* write buf to the device */ |
299 | writesl(ss->base + SS_RXFIFO, op->buf, | 289 | writesl(ss->base + SS_RXFIFO, op->buf, |
300 | op->len / 4); | 290 | op->len / 4); |
@@ -313,7 +303,7 @@ static int sun4i_hash(struct ahash_request *areq) | |||
313 | i += todo * 4; | 303 | i += todo * 4; |
314 | in_i += todo * 4; | 304 | in_i += todo * 4; |
315 | rx_cnt -= todo; | 305 | rx_cnt -= todo; |
316 | if (rx_cnt == 0) { | 306 | if (!rx_cnt) { |
317 | spaces = readl(ss->base + SS_FCSR); | 307 | spaces = readl(ss->base + SS_FCSR); |
318 | rx_cnt = SS_RXFIFO_SPACES(spaces); | 308 | rx_cnt = SS_RXFIFO_SPACES(spaces); |
319 | } | 309 | } |
@@ -351,7 +341,7 @@ static int sun4i_hash(struct ahash_request *areq) | |||
351 | * Now if we have the flag final go to finalize part | 341 | * Now if we have the flag final go to finalize part |
352 | * If not, store the partial hash | 342 | * If not, store the partial hash |
353 | */ | 343 | */ |
354 | if ((op->flags & SS_HASH_FINAL) > 0) | 344 | if (op->flags & SS_HASH_FINAL) |
355 | goto hash_final; | 345 | goto hash_final; |
356 | 346 | ||
357 | writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); | 347 | writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); |
@@ -359,7 +349,7 @@ static int sun4i_hash(struct ahash_request *areq) | |||
359 | do { | 349 | do { |
360 | v = readl(ss->base + SS_CTL); | 350 | v = readl(ss->base + SS_CTL); |
361 | i++; | 351 | i++; |
362 | } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); | 352 | } while (i < SS_TIMEOUT && (v & SS_DATA_END)); |
363 | if (unlikely(i >= SS_TIMEOUT)) { | 353 | if (unlikely(i >= SS_TIMEOUT)) { |
364 | dev_err_ratelimited(ss->dev, | 354 | dev_err_ratelimited(ss->dev, |
365 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", | 355 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", |
@@ -368,6 +358,15 @@ static int sun4i_hash(struct ahash_request *areq) | |||
368 | goto release_ss; | 358 | goto release_ss; |
369 | } | 359 | } |
370 | 360 | ||
361 | /* | ||
362 | * The datasheet isn't very clear about when to retrieve the digest. The | ||
363 | * bit SS_DATA_END is cleared when the engine has processed the data and | ||
364 | * when the digest is computed *but* it doesn't mean the digest is | ||
365 | * available in the digest registers. Hence the delay to be sure we can | ||
366 | * read it. | ||
367 | */ | ||
368 | ndelay(1); | ||
369 | |||
371 | for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) | 370 | for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) |
372 | op->hash[i] = readl(ss->base + SS_MD0 + i * 4); | 371 | op->hash[i] = readl(ss->base + SS_MD0 + i * 4); |
373 | 372 | ||
@@ -388,56 +387,50 @@ static int sun4i_hash(struct ahash_request *areq) | |||
388 | hash_final: | 387 | hash_final: |
389 | 388 | ||
390 | /* write the remaining words of the wait buffer */ | 389 | /* write the remaining words of the wait buffer */ |
391 | if (op->len > 0) { | 390 | if (op->len) { |
392 | nwait = op->len / 4; | 391 | nwait = op->len / 4; |
393 | if (nwait > 0) { | 392 | if (nwait) { |
394 | writesl(ss->base + SS_RXFIFO, op->buf, nwait); | 393 | writesl(ss->base + SS_RXFIFO, op->buf, nwait); |
395 | op->byte_count += 4 * nwait; | 394 | op->byte_count += 4 * nwait; |
396 | } | 395 | } |
396 | |||
397 | nbw = op->len - 4 * nwait; | 397 | nbw = op->len - 4 * nwait; |
398 | wb = *(u32 *)(op->buf + nwait * 4); | 398 | if (nbw) { |
399 | wb &= (0xFFFFFFFF >> (4 - nbw) * 8); | 399 | wb = *(u32 *)(op->buf + nwait * 4); |
400 | wb &= GENMASK((nbw * 8) - 1, 0); | ||
401 | |||
402 | op->byte_count += nbw; | ||
403 | } | ||
400 | } | 404 | } |
401 | 405 | ||
402 | /* write the remaining bytes of the nbw buffer */ | 406 | /* write the remaining bytes of the nbw buffer */ |
403 | if (nbw > 0) { | 407 | wb |= ((1 << 7) << (nbw * 8)); |
404 | wb |= ((1 << 7) << (nbw * 8)); | 408 | bf[j++] = wb; |
405 | bf[j++] = wb; | ||
406 | } else { | ||
407 | bf[j++] = 1 << 7; | ||
408 | } | ||
409 | 409 | ||
410 | /* | 410 | /* |
411 | * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1) | 411 | * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1) |
412 | * I take the operations from other MD5/SHA1 implementations | 412 | * I take the operations from other MD5/SHA1 implementations |
413 | */ | 413 | */ |
414 | 414 | ||
415 | /* we have already send 4 more byte of which nbw data */ | 415 | /* last block size */ |
416 | if (op->mode == SS_OP_MD5) { | 416 | fill = 64 - (op->byte_count % 64); |
417 | index = (op->byte_count + 4) & 0x3f; | 417 | min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32)); |
418 | op->byte_count += nbw; | 418 | |
419 | if (index > 56) | 419 | /* if we can't fill all data, jump to the next 64 block */ |
420 | zeros = (120 - index) / 4; | 420 | if (fill < min_fill) |
421 | else | 421 | fill += 64; |
422 | zeros = (56 - index) / 4; | ||
423 | } else { | ||
424 | op->byte_count += nbw; | ||
425 | index = op->byte_count & 0x3f; | ||
426 | padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); | ||
427 | zeros = (padlen - 1) / 4; | ||
428 | } | ||
429 | 422 | ||
430 | memset(bf + j, 0, 4 * zeros); | 423 | j += (fill - min_fill) / sizeof(u32); |
431 | j += zeros; | ||
432 | 424 | ||
433 | /* write the length of data */ | 425 | /* write the length of data */ |
434 | if (op->mode == SS_OP_SHA1) { | 426 | if (op->mode == SS_OP_SHA1) { |
435 | bits = cpu_to_be64(op->byte_count << 3); | 427 | __be64 bits = cpu_to_be64(op->byte_count << 3); |
436 | bf[j++] = bits & 0xffffffff; | 428 | bf[j++] = lower_32_bits(bits); |
437 | bf[j++] = (bits >> 32) & 0xffffffff; | 429 | bf[j++] = upper_32_bits(bits); |
438 | } else { | 430 | } else { |
439 | bf[j++] = (op->byte_count << 3) & 0xffffffff; | 431 | __le64 bits = op->byte_count << 3; |
440 | bf[j++] = (op->byte_count >> 29) & 0xffffffff; | 432 | bf[j++] = lower_32_bits(bits); |
433 | bf[j++] = upper_32_bits(bits); | ||
441 | } | 434 | } |
442 | writesl(ss->base + SS_RXFIFO, bf, j); | 435 | writesl(ss->base + SS_RXFIFO, bf, j); |
443 | 436 | ||
@@ -453,7 +446,7 @@ hash_final: | |||
453 | do { | 446 | do { |
454 | v = readl(ss->base + SS_CTL); | 447 | v = readl(ss->base + SS_CTL); |
455 | i++; | 448 | i++; |
456 | } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); | 449 | } while (i < SS_TIMEOUT && (v & SS_DATA_END)); |
457 | if (unlikely(i >= SS_TIMEOUT)) { | 450 | if (unlikely(i >= SS_TIMEOUT)) { |
458 | dev_err_ratelimited(ss->dev, | 451 | dev_err_ratelimited(ss->dev, |
459 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", | 452 | "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", |
@@ -462,6 +455,15 @@ hash_final: | |||
462 | goto release_ss; | 455 | goto release_ss; |
463 | } | 456 | } |
464 | 457 | ||
458 | /* | ||
459 | * The datasheet isn't very clear about when to retrieve the digest. The | ||
460 | * bit SS_DATA_END is cleared when the engine has processed the data and | ||
461 | * when the digest is computed *but* it doesn't mean the digest is | ||
462 | * available in the digest registers. Hence the delay to be sure we can | ||
463 | * read it. | ||
464 | */ | ||
465 | ndelay(1); | ||
466 | |||
465 | /* Get the hash from the device */ | 467 | /* Get the hash from the device */ |
466 | if (op->mode == SS_OP_SHA1) { | 468 | if (op->mode == SS_OP_SHA1) { |
467 | for (i = 0; i < 5; i++) { | 469 | for (i = 0; i < 5; i++) { |
@@ -513,7 +515,7 @@ int sun4i_hash_digest(struct ahash_request *areq) | |||
513 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); | 515 | struct sun4i_req_ctx *op = ahash_request_ctx(areq); |
514 | 516 | ||
515 | err = sun4i_hash_init(areq); | 517 | err = sun4i_hash_init(areq); |
516 | if (err != 0) | 518 | if (err) |
517 | return err; | 519 | return err; |
518 | 520 | ||
519 | op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; | 521 | op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index f04c0f8cf026..a0e1efc1cb2a 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h | |||
@@ -24,9 +24,11 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <crypto/md5.h> | 26 | #include <crypto/md5.h> |
27 | #include <crypto/skcipher.h> | ||
27 | #include <crypto/sha.h> | 28 | #include <crypto/sha.h> |
28 | #include <crypto/hash.h> | 29 | #include <crypto/hash.h> |
29 | #include <crypto/internal/hash.h> | 30 | #include <crypto/internal/hash.h> |
31 | #include <crypto/internal/skcipher.h> | ||
30 | #include <crypto/aes.h> | 32 | #include <crypto/aes.h> |
31 | #include <crypto/des.h> | 33 | #include <crypto/des.h> |
32 | #include <crypto/internal/rng.h> | 34 | #include <crypto/internal/rng.h> |
@@ -140,7 +142,7 @@ struct sun4i_ss_alg_template { | |||
140 | u32 type; | 142 | u32 type; |
141 | u32 mode; | 143 | u32 mode; |
142 | union { | 144 | union { |
143 | struct crypto_alg crypto; | 145 | struct skcipher_alg crypto; |
144 | struct ahash_alg hash; | 146 | struct ahash_alg hash; |
145 | } alg; | 147 | } alg; |
146 | struct sun4i_ss_ctx *ss; | 148 | struct sun4i_ss_ctx *ss; |
@@ -177,25 +179,25 @@ int sun4i_hash_import_md5(struct ahash_request *areq, const void *in); | |||
177 | int sun4i_hash_export_sha1(struct ahash_request *areq, void *out); | 179 | int sun4i_hash_export_sha1(struct ahash_request *areq, void *out); |
178 | int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in); | 180 | int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in); |
179 | 181 | ||
180 | int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq); | 182 | int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq); |
181 | int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq); | 183 | int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq); |
182 | int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq); | 184 | int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq); |
183 | int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq); | 185 | int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq); |
184 | 186 | ||
185 | int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq); | 187 | int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq); |
186 | int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq); | 188 | int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq); |
187 | int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq); | 189 | int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq); |
188 | int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq); | 190 | int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq); |
189 | 191 | ||
190 | int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq); | 192 | int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq); |
191 | int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq); | 193 | int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq); |
192 | int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq); | 194 | int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq); |
193 | int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq); | 195 | int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq); |
194 | 196 | ||
195 | int sun4i_ss_cipher_init(struct crypto_tfm *tfm); | 197 | int sun4i_ss_cipher_init(struct crypto_tfm *tfm); |
196 | int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 198 | int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, |
197 | unsigned int keylen); | 199 | unsigned int keylen); |
198 | int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 200 | int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, |
199 | unsigned int keylen); | 201 | unsigned int keylen); |
200 | int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, | 202 | int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, |
201 | unsigned int keylen); | 203 | unsigned int keylen); |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 0bba6a19d36a..79791c690858 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -816,7 +816,7 @@ static void talitos_unregister_rng(struct device *dev) | |||
816 | * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP | 816 | * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP |
817 | */ | 817 | */ |
818 | #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) | 818 | #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) |
819 | #define TALITOS_MAX_KEY_SIZE 96 | 819 | #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) |
820 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ | 820 | #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ |
821 | 821 | ||
822 | struct talitos_ctx { | 822 | struct talitos_ctx { |
@@ -1495,6 +1495,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, | |||
1495 | { | 1495 | { |
1496 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); | 1496 | struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); |
1497 | 1497 | ||
1498 | if (keylen > TALITOS_MAX_KEY_SIZE) { | ||
1499 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | ||
1500 | return -EINVAL; | ||
1501 | } | ||
1502 | |||
1498 | memcpy(&ctx->key, key, keylen); | 1503 | memcpy(&ctx->key, key, keylen); |
1499 | ctx->keylen = keylen; | 1504 | ctx->keylen = keylen; |
1500 | 1505 | ||
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index 022c7ab7351a..96072b9b55c4 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c | |||
@@ -37,15 +37,10 @@ struct p8_aes_ctx { | |||
37 | 37 | ||
38 | static int p8_aes_init(struct crypto_tfm *tfm) | 38 | static int p8_aes_init(struct crypto_tfm *tfm) |
39 | { | 39 | { |
40 | const char *alg; | 40 | const char *alg = crypto_tfm_alg_name(tfm); |
41 | struct crypto_cipher *fallback; | 41 | struct crypto_cipher *fallback; |
42 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); | 42 | struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
43 | 43 | ||
44 | if (!(alg = crypto_tfm_alg_name(tfm))) { | ||
45 | printk(KERN_ERR "Failed to get algorithm name.\n"); | ||
46 | return -ENOENT; | ||
47 | } | ||
48 | |||
49 | fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); | 44 | fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); |
50 | if (IS_ERR(fallback)) { | 45 | if (IS_ERR(fallback)) { |
51 | printk(KERN_ERR | 46 | printk(KERN_ERR |
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 72a26eb4e954..7394d35d5936 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c | |||
@@ -39,15 +39,10 @@ struct p8_aes_cbc_ctx { | |||
39 | 39 | ||
40 | static int p8_aes_cbc_init(struct crypto_tfm *tfm) | 40 | static int p8_aes_cbc_init(struct crypto_tfm *tfm) |
41 | { | 41 | { |
42 | const char *alg; | 42 | const char *alg = crypto_tfm_alg_name(tfm); |
43 | struct crypto_skcipher *fallback; | 43 | struct crypto_skcipher *fallback; |
44 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); | 44 | struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); |
45 | 45 | ||
46 | if (!(alg = crypto_tfm_alg_name(tfm))) { | ||
47 | printk(KERN_ERR "Failed to get algorithm name.\n"); | ||
48 | return -ENOENT; | ||
49 | } | ||
50 | |||
51 | fallback = crypto_alloc_skcipher(alg, 0, | 46 | fallback = crypto_alloc_skcipher(alg, 0, |
52 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 47 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
53 | 48 | ||
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 7cf6d31c1123..9c26d9e8dbea 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c | |||
@@ -36,15 +36,10 @@ struct p8_aes_ctr_ctx { | |||
36 | 36 | ||
37 | static int p8_aes_ctr_init(struct crypto_tfm *tfm) | 37 | static int p8_aes_ctr_init(struct crypto_tfm *tfm) |
38 | { | 38 | { |
39 | const char *alg; | 39 | const char *alg = crypto_tfm_alg_name(tfm); |
40 | struct crypto_blkcipher *fallback; | 40 | struct crypto_blkcipher *fallback; |
41 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); | 41 | struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); |
42 | 42 | ||
43 | if (!(alg = crypto_tfm_alg_name(tfm))) { | ||
44 | printk(KERN_ERR "Failed to get algorithm name.\n"); | ||
45 | return -ENOENT; | ||
46 | } | ||
47 | |||
48 | fallback = | 43 | fallback = |
49 | crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); | 44 | crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); |
50 | if (IS_ERR(fallback)) { | 45 | if (IS_ERR(fallback)) { |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 6adc9290557a..8cd6e62e4c90 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
@@ -41,15 +41,10 @@ struct p8_aes_xts_ctx { | |||
41 | 41 | ||
42 | static int p8_aes_xts_init(struct crypto_tfm *tfm) | 42 | static int p8_aes_xts_init(struct crypto_tfm *tfm) |
43 | { | 43 | { |
44 | const char *alg; | 44 | const char *alg = crypto_tfm_alg_name(tfm); |
45 | struct crypto_skcipher *fallback; | 45 | struct crypto_skcipher *fallback; |
46 | struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); | 46 | struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm); |
47 | 47 | ||
48 | if (!(alg = crypto_tfm_alg_name(tfm))) { | ||
49 | printk(KERN_ERR "Failed to get algorithm name.\n"); | ||
50 | return -ENOENT; | ||
51 | } | ||
52 | |||
53 | fallback = crypto_alloc_skcipher(alg, 0, | 48 | fallback = crypto_alloc_skcipher(alg, 0, |
54 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 49 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
55 | if (IS_ERR(fallback)) { | 50 | if (IS_ERR(fallback)) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index e88c1808e46f..b32eb8c7c1e3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -868,6 +868,7 @@ struct adapter { | |||
868 | 868 | ||
869 | /* TC u32 offload */ | 869 | /* TC u32 offload */ |
870 | struct cxgb4_tc_u32_table *tc_u32; | 870 | struct cxgb4_tc_u32_table *tc_u32; |
871 | struct chcr_stats_debug chcr_stats; | ||
871 | }; | 872 | }; |
872 | 873 | ||
873 | /* Support for "sched-class" command to allow a TX Scheduling Class to be | 874 | /* Support for "sched-class" command to allow a TX Scheduling Class to be |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 1fa34b009891..77a59d7db7f5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | |||
@@ -3069,6 +3069,40 @@ static const struct file_operations meminfo_fops = { | |||
3069 | .llseek = seq_lseek, | 3069 | .llseek = seq_lseek, |
3070 | .release = single_release, | 3070 | .release = single_release, |
3071 | }; | 3071 | }; |
3072 | |||
3073 | static int chcr_show(struct seq_file *seq, void *v) | ||
3074 | { | ||
3075 | struct adapter *adap = seq->private; | ||
3076 | |||
3077 | seq_puts(seq, "Chelsio Crypto Accelerator Stats \n"); | ||
3078 | seq_printf(seq, "Cipher Ops: %10u \n", | ||
3079 | atomic_read(&adap->chcr_stats.cipher_rqst)); | ||
3080 | seq_printf(seq, "Digest Ops: %10u \n", | ||
3081 | atomic_read(&adap->chcr_stats.digest_rqst)); | ||
3082 | seq_printf(seq, "Aead Ops: %10u \n", | ||
3083 | atomic_read(&adap->chcr_stats.aead_rqst)); | ||
3084 | seq_printf(seq, "Completion: %10u \n", | ||
3085 | atomic_read(&adap->chcr_stats.complete)); | ||
3086 | seq_printf(seq, "Error: %10u \n", | ||
3087 | atomic_read(&adap->chcr_stats.error)); | ||
3088 | seq_printf(seq, "Fallback: %10u \n", | ||
3089 | atomic_read(&adap->chcr_stats.fallback)); | ||
3090 | return 0; | ||
3091 | } | ||
3092 | |||
3093 | |||
3094 | static int chcr_stats_open(struct inode *inode, struct file *file) | ||
3095 | { | ||
3096 | return single_open(file, chcr_show, inode->i_private); | ||
3097 | } | ||
3098 | |||
3099 | static const struct file_operations chcr_stats_debugfs_fops = { | ||
3100 | .owner = THIS_MODULE, | ||
3101 | .open = chcr_stats_open, | ||
3102 | .read = seq_read, | ||
3103 | .llseek = seq_lseek, | ||
3104 | .release = single_release, | ||
3105 | }; | ||
3072 | /* Add an array of Debug FS files. | 3106 | /* Add an array of Debug FS files. |
3073 | */ | 3107 | */ |
3074 | void add_debugfs_files(struct adapter *adap, | 3108 | void add_debugfs_files(struct adapter *adap, |
@@ -3143,6 +3177,7 @@ int t4_setup_debugfs(struct adapter *adap) | |||
3143 | { "tids", &tid_info_debugfs_fops, S_IRUSR, 0}, | 3177 | { "tids", &tid_info_debugfs_fops, S_IRUSR, 0}, |
3144 | { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, | 3178 | { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, |
3145 | { "meminfo", &meminfo_fops, S_IRUSR, 0 }, | 3179 | { "meminfo", &meminfo_fops, S_IRUSR, 0 }, |
3180 | { "crypto", &chcr_stats_debugfs_fops, S_IRUSR, 0 }, | ||
3146 | }; | 3181 | }; |
3147 | 3182 | ||
3148 | /* Debug FS nodes common to all T5 and later adapters. | 3183 | /* Debug FS nodes common to all T5 and later adapters. |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index d0868c2320da..ec53fe9dec68 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | |||
@@ -642,6 +642,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) | |||
642 | lld->sge_ingpadboundary = adap->sge.fl_align; | 642 | lld->sge_ingpadboundary = adap->sge.fl_align; |
643 | lld->sge_egrstatuspagesize = adap->sge.stat_len; | 643 | lld->sge_egrstatuspagesize = adap->sge.stat_len; |
644 | lld->sge_pktshift = adap->sge.pktshift; | 644 | lld->sge_pktshift = adap->sge.pktshift; |
645 | lld->ulp_crypto = adap->params.crypto; | ||
645 | lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; | 646 | lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; |
646 | lld->max_ordird_qp = adap->params.max_ordird_qp; | 647 | lld->max_ordird_qp = adap->params.max_ordird_qp; |
647 | lld->max_ird_adapter = adap->params.max_ird_adapter; | 648 | lld->max_ird_adapter = adap->params.max_ird_adapter; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 6e74040af49a..8f1c874cfe21 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | |||
@@ -275,6 +275,15 @@ struct cxgb4_virt_res { /* virtualized HW resources */ | |||
275 | unsigned int ncrypto_fc; | 275 | unsigned int ncrypto_fc; |
276 | }; | 276 | }; |
277 | 277 | ||
278 | struct chcr_stats_debug { | ||
279 | atomic_t cipher_rqst; | ||
280 | atomic_t digest_rqst; | ||
281 | atomic_t aead_rqst; | ||
282 | atomic_t complete; | ||
283 | atomic_t error; | ||
284 | atomic_t fallback; | ||
285 | }; | ||
286 | |||
278 | #define OCQ_WIN_OFFSET(pdev, vres) \ | 287 | #define OCQ_WIN_OFFSET(pdev, vres) \ |
279 | (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size)) | 288 | (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size)) |
280 | 289 | ||
@@ -322,6 +331,7 @@ struct cxgb4_lld_info { | |||
322 | unsigned int iscsi_tagmask; /* iscsi ddp tag mask */ | 331 | unsigned int iscsi_tagmask; /* iscsi ddp tag mask */ |
323 | unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */ | 332 | unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */ |
324 | unsigned int iscsi_llimit; /* chip's iscsi region llimit */ | 333 | unsigned int iscsi_llimit; /* chip's iscsi region llimit */ |
334 | unsigned int ulp_crypto; /* crypto lookaside support */ | ||
325 | void **iscsi_ppm; /* iscsi page pod manager */ | 335 | void **iscsi_ppm; /* iscsi page pod manager */ |
326 | int nodeid; /* device numa node id */ | 336 | int nodeid; /* device numa node id */ |
327 | bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */ | 337 | bool fr_nsmr_tpte_wr_support; /* FW supports FR_NSMR_TPTE_WR */ |
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index c37cc59e9bf2..b5e11de4d497 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h | |||
@@ -98,7 +98,7 @@ struct akcipher_alg { | |||
98 | unsigned int keylen); | 98 | unsigned int keylen); |
99 | int (*set_priv_key)(struct crypto_akcipher *tfm, const void *key, | 99 | int (*set_priv_key)(struct crypto_akcipher *tfm, const void *key, |
100 | unsigned int keylen); | 100 | unsigned int keylen); |
101 | int (*max_size)(struct crypto_akcipher *tfm); | 101 | unsigned int (*max_size)(struct crypto_akcipher *tfm); |
102 | int (*init)(struct crypto_akcipher *tfm); | 102 | int (*init)(struct crypto_akcipher *tfm); |
103 | void (*exit)(struct crypto_akcipher *tfm); | 103 | void (*exit)(struct crypto_akcipher *tfm); |
104 | 104 | ||
@@ -257,13 +257,14 @@ static inline void akcipher_request_set_crypt(struct akcipher_request *req, | |||
257 | /** | 257 | /** |
258 | * crypto_akcipher_maxsize() - Get len for output buffer | 258 | * crypto_akcipher_maxsize() - Get len for output buffer |
259 | * | 259 | * |
260 | * Function returns the dest buffer size required for a given key | 260 | * Function returns the dest buffer size required for a given key. |
261 | * Function assumes that the key is already set in the transformation. If this | ||
262 | * function is called without a setkey or with a failed setkey, you will end up | ||
263 | * in a NULL dereference. | ||
261 | * | 264 | * |
262 | * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() | 265 | * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() |
263 | * | ||
264 | * Return: minimum len for output buffer or error code in key hasn't been set | ||
265 | */ | 266 | */ |
266 | static inline int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) | 267 | static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) |
267 | { | 268 | { |
268 | struct akcipher_alg *alg = crypto_akcipher_alg(tfm); | 269 | struct akcipher_alg *alg = crypto_akcipher_alg(tfm); |
269 | 270 | ||
diff --git a/include/crypto/dh.h b/include/crypto/dh.h index 6b424ad3482e..f638998fb6d0 100644 --- a/include/crypto/dh.h +++ b/include/crypto/dh.h | |||
@@ -73,9 +73,9 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); | |||
73 | /** | 73 | /** |
74 | * crypto_dh_decode_key() - decode a private key | 74 | * crypto_dh_decode_key() - decode a private key |
75 | * @buf: Buffer holding a packet key that should be decoded | 75 | * @buf: Buffer holding a packet key that should be decoded |
76 | * @len: Lenth of the packet private key buffer | 76 | * @len: Length of the packet private key buffer |
77 | * @params: Buffer allocated by the caller that is filled with the | 77 | * @params: Buffer allocated by the caller that is filled with the |
78 | * unpacket DH private key. | 78 | * unpacked DH private key. |
79 | * | 79 | * |
80 | * The unpacking obtains the private key by pointing @p to the correct location | 80 | * The unpacking obtains the private key by pointing @p to the correct location |
81 | * in @buf. Thus, both pointers refer to the same memory. | 81 | * in @buf. Thus, both pointers refer to the same memory. |
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h index 03a64f62ba7a..1aff2a8a3a68 100644 --- a/include/crypto/ecdh.h +++ b/include/crypto/ecdh.h | |||
@@ -74,9 +74,9 @@ int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p); | |||
74 | /** | 74 | /** |
75 | * crypto_ecdh_decode_key() - decode a private key | 75 | * crypto_ecdh_decode_key() - decode a private key |
76 | * @buf: Buffer holding a packet key that should be decoded | 76 | * @buf: Buffer holding a packet key that should be decoded |
77 | * @len: Lenth of the packet private key buffer | 77 | * @len: Length of the packet private key buffer |
78 | * @p: Buffer allocated by the caller that is filled with the | 78 | * @p: Buffer allocated by the caller that is filled with the |
79 | * unpacket ECDH private key. | 79 | * unpacked ECDH private key. |
80 | * | 80 | * |
81 | * The unpacking obtains the private key by pointing @p to the correct location | 81 | * The unpacking obtains the private key by pointing @p to the correct location |
82 | * in @buf. Thus, both pointers refer to the same memory. | 82 | * in @buf. Thus, both pointers refer to the same memory. |
diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 1bf600fc99f7..dd04c1699b51 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h | |||
@@ -58,6 +58,7 @@ struct crypto_engine { | |||
58 | struct list_head list; | 58 | struct list_head list; |
59 | spinlock_t queue_lock; | 59 | spinlock_t queue_lock; |
60 | struct crypto_queue queue; | 60 | struct crypto_queue queue; |
61 | struct device *dev; | ||
61 | 62 | ||
62 | bool rt; | 63 | bool rt; |
63 | 64 | ||
diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h new file mode 100644 index 000000000000..ef09f7938204 --- /dev/null +++ b/include/crypto/hmac.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef _CRYPTO_HMAC_H | ||
2 | #define _CRYPTO_HMAC_H | ||
3 | |||
4 | #define HMAC_IPAD_VALUE 0x36 | ||
5 | #define HMAC_OPAD_VALUE 0x5c | ||
6 | |||
7 | #endif /* _CRYPTO_HMAC_H */ | ||
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index ce8e1f79374b..2133d17b7156 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h | |||
@@ -53,7 +53,7 @@ struct crypto_kpp { | |||
53 | * | 53 | * |
54 | * @set_secret: Function invokes the protocol specific function to | 54 | * @set_secret: Function invokes the protocol specific function to |
55 | * store the secret private key along with parameters. | 55 | * store the secret private key along with parameters. |
56 | * The implementation knows how to decode thie buffer | 56 | * The implementation knows how to decode the buffer |
57 | * @generate_public_key: Function generate the public key to be sent to the | 57 | * @generate_public_key: Function generate the public key to be sent to the |
58 | * counterpart. In case of error, where output is not big | 58 | * counterpart. In case of error, where output is not big |
59 | * enough req->dst_len will be updated to the size | 59 | * enough req->dst_len will be updated to the size |
@@ -79,7 +79,7 @@ struct kpp_alg { | |||
79 | int (*generate_public_key)(struct kpp_request *req); | 79 | int (*generate_public_key)(struct kpp_request *req); |
80 | int (*compute_shared_secret)(struct kpp_request *req); | 80 | int (*compute_shared_secret)(struct kpp_request *req); |
81 | 81 | ||
82 | int (*max_size)(struct crypto_kpp *tfm); | 82 | unsigned int (*max_size)(struct crypto_kpp *tfm); |
83 | 83 | ||
84 | int (*init)(struct crypto_kpp *tfm); | 84 | int (*init)(struct crypto_kpp *tfm); |
85 | void (*exit)(struct crypto_kpp *tfm); | 85 | void (*exit)(struct crypto_kpp *tfm); |
@@ -102,7 +102,7 @@ struct kpp_alg { | |||
102 | * @mask: specifies the mask for the algorithm | 102 | * @mask: specifies the mask for the algorithm |
103 | * | 103 | * |
104 | * Allocate a handle for kpp algorithm. The returned struct crypto_kpp | 104 | * Allocate a handle for kpp algorithm. The returned struct crypto_kpp |
105 | * is requeried for any following API invocation | 105 | * is required for any following API invocation |
106 | * | 106 | * |
107 | * Return: allocated handle in case of success; IS_ERR() is true in case of | 107 | * Return: allocated handle in case of success; IS_ERR() is true in case of |
108 | * an error, PTR_ERR() returns the error code. | 108 | * an error, PTR_ERR() returns the error code. |
@@ -323,13 +323,14 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) | |||
323 | /** | 323 | /** |
324 | * crypto_kpp_maxsize() - Get len for output buffer | 324 | * crypto_kpp_maxsize() - Get len for output buffer |
325 | * | 325 | * |
326 | * Function returns the output buffer size required | 326 | * Function returns the output buffer size required for a given key. |
327 | * Function assumes that the key is already set in the transformation. If this | ||
328 | * function is called without a setkey or with a failed setkey, you will end up | ||
329 | * in a NULL dereference. | ||
327 | * | 330 | * |
328 | * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() | 331 | * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() |
329 | * | ||
330 | * Return: minimum len for output buffer or error code if key hasn't been set | ||
331 | */ | 332 | */ |
332 | static inline int crypto_kpp_maxsize(struct crypto_kpp *tfm) | 333 | static inline unsigned int crypto_kpp_maxsize(struct crypto_kpp *tfm) |
333 | { | 334 | { |
334 | struct kpp_alg *alg = crypto_kpp_alg(tfm); | 335 | struct kpp_alg *alg = crypto_kpp_alg(tfm); |
335 | 336 | ||
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h index 46eb27ddbfab..3e00122bcf88 100644 --- a/include/linux/timeriomem-rng.h +++ b/include/linux/timeriomem-rng.h | |||
@@ -13,4 +13,7 @@ struct timeriomem_rng_data { | |||
13 | 13 | ||
14 | /* measures in usecs */ | 14 | /* measures in usecs */ |
15 | unsigned int period; | 15 | unsigned int period; |
16 | |||
17 | /* bits of entropy per 1024 bits read */ | ||
18 | unsigned int quality; | ||
16 | }; | 19 | }; |