diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-23 19:18:25 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-23 19:18:25 -0500 |
| commit | 26b265cd29dde56bf0901c421eabc7ae815f38c4 (patch) | |
| tree | 83a5418c96ccde8522bda6614063b665fe5e0ec9 | |
| parent | 2e7babfa892a55588467ef03b545002e32f31528 (diff) | |
| parent | f262f0f5cad0c9eca61d1d383e3b67b57dcbe5ea (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
- Made x86 ablk_helper generic for ARM
- Phase out chainiv in favour of eseqiv (affects IPsec)
- Fixed aes-cbc IV corruption on s390
- Added constant-time crypto_memneq which replaces memcmp
- Fixed aes-ctr in omap-aes
- Added OMAP3 ROM RNG support
- Add PRNG support for MSM SoC's
- Add and use Job Ring API in caam
- Misc fixes
[ NOTE! This pull request was sent within the merge window, but Herbert
has some questionable email sending setup that makes him public enemy
#1 as far as gmail is concerned. So most of his emails seem to be
trapped by gmail as spam, resulting in me not seeing them. - Linus ]
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (49 commits)
crypto: s390 - Fix aes-cbc IV corruption
crypto: omap-aes - Fix CTR mode counter length
crypto: omap-sham - Add missing modalias
padata: make the sequence counter an atomic_t
crypto: caam - Modify the interface layers to use JR API's
crypto: caam - Add API's to allocate/free Job Rings
crypto: caam - Add Platform driver for Job Ring
hwrng: msm - Add PRNG support for MSM SoC's
ARM: DT: msm: Add Qualcomm's PRNG driver binding document
crypto: skcipher - Use eseqiv even on UP machines
crypto: talitos - Simplify key parsing
crypto: picoxcell - Simplify and harden key parsing
crypto: ixp4xx - Simplify and harden key parsing
crypto: authencesn - Simplify key parsing
crypto: authenc - Export key parsing helper function
crypto: mv_cesa: remove deprecated IRQF_DISABLED
hwrng: OMAP3 ROM Random Number Generator support
crypto: sha256_ssse3 - also test for BMI2
crypto: mv_cesa - Remove redundant of_match_ptr
crypto: sahara - Remove redundant of_match_ptr
...
59 files changed, 1443 insertions, 640 deletions
diff --git a/Documentation/devicetree/bindings/rng/qcom,prng.txt b/Documentation/devicetree/bindings/rng/qcom,prng.txt new file mode 100644 index 000000000000..8e5853c2879b --- /dev/null +++ b/Documentation/devicetree/bindings/rng/qcom,prng.txt | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | Qualcomm MSM pseudo random number generator. | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | |||
| 5 | - compatible : should be "qcom,prng" | ||
| 6 | - reg : specifies base physical address and size of the registers map | ||
| 7 | - clocks : phandle to clock-controller plus clock-specifier pair | ||
| 8 | - clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block | ||
| 9 | |||
| 10 | Example: | ||
| 11 | |||
| 12 | rng@f9bff000 { | ||
| 13 | compatible = "qcom,prng"; | ||
| 14 | reg = <0xf9bff000 0x200>; | ||
| 15 | clocks = <&clock GCC_PRNG_AHB_CLK>; | ||
| 16 | clock-names = "core"; | ||
| 17 | }; | ||
diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c index d4639c506622..9a4e910c3796 100644 --- a/arch/arm/mach-tegra/fuse.c +++ b/arch/arm/mach-tegra/fuse.c | |||
| @@ -209,13 +209,3 @@ void __init tegra_init_fuse(void) | |||
| 209 | tegra_sku_id, tegra_cpu_process_id, | 209 | tegra_sku_id, tegra_cpu_process_id, |
| 210 | tegra_core_process_id); | 210 | tegra_core_process_id); |
| 211 | } | 211 | } |
| 212 | |||
| 213 | unsigned long long tegra_chip_uid(void) | ||
| 214 | { | ||
| 215 | unsigned long long lo, hi; | ||
| 216 | |||
| 217 | lo = tegra_fuse_readl(FUSE_UID_LOW); | ||
| 218 | hi = tegra_fuse_readl(FUSE_UID_HIGH); | ||
| 219 | return (hi << 32ull) | lo; | ||
| 220 | } | ||
| 221 | EXPORT_SYMBOL(tegra_chip_uid); | ||
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 46cae138ece2..4363528dc8fd 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
| @@ -35,7 +35,6 @@ static u8 *ctrblk; | |||
| 35 | static char keylen_flag; | 35 | static char keylen_flag; |
| 36 | 36 | ||
| 37 | struct s390_aes_ctx { | 37 | struct s390_aes_ctx { |
| 38 | u8 iv[AES_BLOCK_SIZE]; | ||
| 39 | u8 key[AES_MAX_KEY_SIZE]; | 38 | u8 key[AES_MAX_KEY_SIZE]; |
| 40 | long enc; | 39 | long enc; |
| 41 | long dec; | 40 | long dec; |
| @@ -441,30 +440,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
| 441 | return aes_set_key(tfm, in_key, key_len); | 440 | return aes_set_key(tfm, in_key, key_len); |
| 442 | } | 441 | } |
| 443 | 442 | ||
| 444 | static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, | 443 | static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, |
| 445 | struct blkcipher_walk *walk) | 444 | struct blkcipher_walk *walk) |
| 446 | { | 445 | { |
| 446 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | ||
| 447 | int ret = blkcipher_walk_virt(desc, walk); | 447 | int ret = blkcipher_walk_virt(desc, walk); |
| 448 | unsigned int nbytes = walk->nbytes; | 448 | unsigned int nbytes = walk->nbytes; |
| 449 | struct { | ||
| 450 | u8 iv[AES_BLOCK_SIZE]; | ||
| 451 | u8 key[AES_MAX_KEY_SIZE]; | ||
| 452 | } param; | ||
| 449 | 453 | ||
| 450 | if (!nbytes) | 454 | if (!nbytes) |
| 451 | goto out; | 455 | goto out; |
| 452 | 456 | ||
| 453 | memcpy(param, walk->iv, AES_BLOCK_SIZE); | 457 | memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); |
| 458 | memcpy(param.key, sctx->key, sctx->key_len); | ||
| 454 | do { | 459 | do { |
| 455 | /* only use complete blocks */ | 460 | /* only use complete blocks */ |
| 456 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); | 461 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); |
| 457 | u8 *out = walk->dst.virt.addr; | 462 | u8 *out = walk->dst.virt.addr; |
| 458 | u8 *in = walk->src.virt.addr; | 463 | u8 *in = walk->src.virt.addr; |
| 459 | 464 | ||
| 460 | ret = crypt_s390_kmc(func, param, out, in, n); | 465 | ret = crypt_s390_kmc(func, ¶m, out, in, n); |
| 461 | if (ret < 0 || ret != n) | 466 | if (ret < 0 || ret != n) |
| 462 | return -EIO; | 467 | return -EIO; |
| 463 | 468 | ||
| 464 | nbytes &= AES_BLOCK_SIZE - 1; | 469 | nbytes &= AES_BLOCK_SIZE - 1; |
| 465 | ret = blkcipher_walk_done(desc, walk, nbytes); | 470 | ret = blkcipher_walk_done(desc, walk, nbytes); |
| 466 | } while ((nbytes = walk->nbytes)); | 471 | } while ((nbytes = walk->nbytes)); |
| 467 | memcpy(walk->iv, param, AES_BLOCK_SIZE); | 472 | memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); |
| 468 | 473 | ||
| 469 | out: | 474 | out: |
| 470 | return ret; | 475 | return ret; |
| @@ -481,7 +486,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |||
| 481 | return fallback_blk_enc(desc, dst, src, nbytes); | 486 | return fallback_blk_enc(desc, dst, src, nbytes); |
| 482 | 487 | ||
| 483 | blkcipher_walk_init(&walk, dst, src, nbytes); | 488 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 484 | return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); | 489 | return cbc_aes_crypt(desc, sctx->enc, &walk); |
| 485 | } | 490 | } |
| 486 | 491 | ||
| 487 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, | 492 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, |
| @@ -495,7 +500,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |||
| 495 | return fallback_blk_dec(desc, dst, src, nbytes); | 500 | return fallback_blk_dec(desc, dst, src, nbytes); |
| 496 | 501 | ||
| 497 | blkcipher_walk_init(&walk, dst, src, nbytes); | 502 | blkcipher_walk_init(&walk, dst, src, nbytes); |
| 498 | return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); | 503 | return cbc_aes_crypt(desc, sctx->dec, &walk); |
| 499 | } | 504 | } |
| 500 | 505 | ||
| 501 | static struct crypto_alg cbc_aes_alg = { | 506 | static struct crypto_alg cbc_aes_alg = { |
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 7d6ba9db1be9..e0fc24db234a 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
| @@ -3,8 +3,9 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) | 5 | avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) |
| 6 | avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ | ||
| 7 | $(comma)4)$(comma)%ymm2,yes,no) | ||
| 6 | 8 | ||
| 7 | obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o | ||
| 8 | obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o | 9 | obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o |
| 9 | 10 | ||
| 10 | obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o | 11 | obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index f80e668785c0..835488b745ee 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | #include <asm/cpu_device_id.h> | 34 | #include <asm/cpu_device_id.h> |
| 35 | #include <asm/i387.h> | 35 | #include <asm/i387.h> |
| 36 | #include <asm/crypto/aes.h> | 36 | #include <asm/crypto/aes.h> |
| 37 | #include <asm/crypto/ablk_helper.h> | 37 | #include <crypto/ablk_helper.h> |
| 38 | #include <crypto/scatterwalk.h> | 38 | #include <crypto/scatterwalk.h> |
| 39 | #include <crypto/internal/aead.h> | 39 | #include <crypto/internal/aead.h> |
| 40 | #include <linux/workqueue.h> | 40 | #include <linux/workqueue.h> |
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c index 414fe5d7946b..4209a76fcdaa 100644 --- a/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | #include <crypto/ablk_helper.h> | ||
| 17 | #include <crypto/algapi.h> | 18 | #include <crypto/algapi.h> |
| 18 | #include <crypto/ctr.h> | 19 | #include <crypto/ctr.h> |
| 19 | #include <crypto/lrw.h> | 20 | #include <crypto/lrw.h> |
| @@ -21,7 +22,6 @@ | |||
| 21 | #include <asm/xcr.h> | 22 | #include <asm/xcr.h> |
| 22 | #include <asm/xsave.h> | 23 | #include <asm/xsave.h> |
| 23 | #include <asm/crypto/camellia.h> | 24 | #include <asm/crypto/camellia.h> |
| 24 | #include <asm/crypto/ablk_helper.h> | ||
| 25 | #include <asm/crypto/glue_helper.h> | 25 | #include <asm/crypto/glue_helper.h> |
| 26 | 26 | ||
| 27 | #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 | 27 | #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 |
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 37fd0c0a81ea..87a041a10f4a 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | #include <crypto/ablk_helper.h> | ||
| 17 | #include <crypto/algapi.h> | 18 | #include <crypto/algapi.h> |
| 18 | #include <crypto/ctr.h> | 19 | #include <crypto/ctr.h> |
| 19 | #include <crypto/lrw.h> | 20 | #include <crypto/lrw.h> |
| @@ -21,7 +22,6 @@ | |||
| 21 | #include <asm/xcr.h> | 22 | #include <asm/xcr.h> |
| 22 | #include <asm/xsave.h> | 23 | #include <asm/xsave.h> |
| 23 | #include <asm/crypto/camellia.h> | 24 | #include <asm/crypto/camellia.h> |
| 24 | #include <asm/crypto/ablk_helper.h> | ||
| 25 | #include <asm/crypto/glue_helper.h> | 25 | #include <asm/crypto/glue_helper.h> |
| 26 | 26 | ||
| 27 | #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 | 27 | #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 |
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index c6631813dc11..e6a3700489b9 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c | |||
| @@ -26,13 +26,13 @@ | |||
| 26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
| 27 | #include <linux/crypto.h> | 27 | #include <linux/crypto.h> |
| 28 | #include <linux/err.h> | 28 | #include <linux/err.h> |
| 29 | #include <crypto/ablk_helper.h> | ||
| 29 | #include <crypto/algapi.h> | 30 | #include <crypto/algapi.h> |
| 30 | #include <crypto/cast5.h> | 31 | #include <crypto/cast5.h> |
| 31 | #include <crypto/cryptd.h> | 32 | #include <crypto/cryptd.h> |
| 32 | #include <crypto/ctr.h> | 33 | #include <crypto/ctr.h> |
| 33 | #include <asm/xcr.h> | 34 | #include <asm/xcr.h> |
| 34 | #include <asm/xsave.h> | 35 | #include <asm/xsave.h> |
| 35 | #include <asm/crypto/ablk_helper.h> | ||
| 36 | #include <asm/crypto/glue_helper.h> | 36 | #include <asm/crypto/glue_helper.h> |
| 37 | 37 | ||
| 38 | #define CAST5_PARALLEL_BLOCKS 16 | 38 | #define CAST5_PARALLEL_BLOCKS 16 |
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 8d0dfb86a559..09f3677393e4 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/crypto.h> | 29 | #include <linux/crypto.h> |
| 30 | #include <linux/err.h> | 30 | #include <linux/err.h> |
| 31 | #include <crypto/ablk_helper.h> | ||
| 31 | #include <crypto/algapi.h> | 32 | #include <crypto/algapi.h> |
| 32 | #include <crypto/cast6.h> | 33 | #include <crypto/cast6.h> |
| 33 | #include <crypto/cryptd.h> | 34 | #include <crypto/cryptd.h> |
| @@ -37,7 +38,6 @@ | |||
| 37 | #include <crypto/xts.h> | 38 | #include <crypto/xts.h> |
| 38 | #include <asm/xcr.h> | 39 | #include <asm/xcr.h> |
| 39 | #include <asm/xsave.h> | 40 | #include <asm/xsave.h> |
| 40 | #include <asm/crypto/ablk_helper.h> | ||
| 41 | #include <asm/crypto/glue_helper.h> | 41 | #include <asm/crypto/glue_helper.h> |
| 42 | 42 | ||
| 43 | #define CAST6_PARALLEL_BLOCKS 8 | 43 | #define CAST6_PARALLEL_BLOCKS 8 |
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c index 23aabc6c20a5..2fae489b1524 100644 --- a/arch/x86/crypto/serpent_avx2_glue.c +++ b/arch/x86/crypto/serpent_avx2_glue.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
| 15 | #include <linux/crypto.h> | 15 | #include <linux/crypto.h> |
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | #include <crypto/ablk_helper.h> | ||
| 17 | #include <crypto/algapi.h> | 18 | #include <crypto/algapi.h> |
| 18 | #include <crypto/ctr.h> | 19 | #include <crypto/ctr.h> |
| 19 | #include <crypto/lrw.h> | 20 | #include <crypto/lrw.h> |
| @@ -22,7 +23,6 @@ | |||
| 22 | #include <asm/xcr.h> | 23 | #include <asm/xcr.h> |
| 23 | #include <asm/xsave.h> | 24 | #include <asm/xsave.h> |
| 24 | #include <asm/crypto/serpent-avx.h> | 25 | #include <asm/crypto/serpent-avx.h> |
| 25 | #include <asm/crypto/ablk_helper.h> | ||
| 26 | #include <asm/crypto/glue_helper.h> | 26 | #include <asm/crypto/glue_helper.h> |
| 27 | 27 | ||
| 28 | #define SERPENT_AVX2_PARALLEL_BLOCKS 16 | 28 | #define SERPENT_AVX2_PARALLEL_BLOCKS 16 |
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c index 9ae83cf8d21e..ff4870870972 100644 --- a/arch/x86/crypto/serpent_avx_glue.c +++ b/arch/x86/crypto/serpent_avx_glue.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/crypto.h> | 29 | #include <linux/crypto.h> |
| 30 | #include <linux/err.h> | 30 | #include <linux/err.h> |
| 31 | #include <crypto/ablk_helper.h> | ||
| 31 | #include <crypto/algapi.h> | 32 | #include <crypto/algapi.h> |
| 32 | #include <crypto/serpent.h> | 33 | #include <crypto/serpent.h> |
| 33 | #include <crypto/cryptd.h> | 34 | #include <crypto/cryptd.h> |
| @@ -38,7 +39,6 @@ | |||
| 38 | #include <asm/xcr.h> | 39 | #include <asm/xcr.h> |
| 39 | #include <asm/xsave.h> | 40 | #include <asm/xsave.h> |
| 40 | #include <asm/crypto/serpent-avx.h> | 41 | #include <asm/crypto/serpent-avx.h> |
| 41 | #include <asm/crypto/ablk_helper.h> | ||
| 42 | #include <asm/crypto/glue_helper.h> | 42 | #include <asm/crypto/glue_helper.h> |
| 43 | 43 | ||
| 44 | /* 8-way parallel cipher functions */ | 44 | /* 8-way parallel cipher functions */ |
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c index 97a356ece24d..8c95f8637306 100644 --- a/arch/x86/crypto/serpent_sse2_glue.c +++ b/arch/x86/crypto/serpent_sse2_glue.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/types.h> | 34 | #include <linux/types.h> |
| 35 | #include <linux/crypto.h> | 35 | #include <linux/crypto.h> |
| 36 | #include <linux/err.h> | 36 | #include <linux/err.h> |
| 37 | #include <crypto/ablk_helper.h> | ||
| 37 | #include <crypto/algapi.h> | 38 | #include <crypto/algapi.h> |
| 38 | #include <crypto/serpent.h> | 39 | #include <crypto/serpent.h> |
| 39 | #include <crypto/cryptd.h> | 40 | #include <crypto/cryptd.h> |
| @@ -42,7 +43,6 @@ | |||
| 42 | #include <crypto/lrw.h> | 43 | #include <crypto/lrw.h> |
| 43 | #include <crypto/xts.h> | 44 | #include <crypto/xts.h> |
| 44 | #include <asm/crypto/serpent-sse2.h> | 45 | #include <asm/crypto/serpent-sse2.h> |
| 45 | #include <asm/crypto/ablk_helper.h> | ||
| 46 | #include <asm/crypto/glue_helper.h> | 46 | #include <asm/crypto/glue_helper.h> |
| 47 | 47 | ||
| 48 | static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) | 48 | static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) |
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 50226c4b86ed..f248546da1ca 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c | |||
| @@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void) | |||
| 281 | /* allow AVX to override SSSE3, it's a little faster */ | 281 | /* allow AVX to override SSSE3, it's a little faster */ |
| 282 | if (avx_usable()) { | 282 | if (avx_usable()) { |
| 283 | #ifdef CONFIG_AS_AVX2 | 283 | #ifdef CONFIG_AS_AVX2 |
| 284 | if (boot_cpu_has(X86_FEATURE_AVX2)) | 284 | if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2)) |
| 285 | sha256_transform_asm = sha256_transform_rorx; | 285 | sha256_transform_asm = sha256_transform_rorx; |
| 286 | else | 286 | else |
| 287 | #endif | 287 | #endif |
| @@ -319,4 +319,4 @@ MODULE_LICENSE("GPL"); | |||
| 319 | MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); | 319 | MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); |
| 320 | 320 | ||
| 321 | MODULE_ALIAS("sha256"); | 321 | MODULE_ALIAS("sha256"); |
| 322 | MODULE_ALIAS("sha384"); | 322 | MODULE_ALIAS("sha224"); |
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c index a62ba541884e..4e3c665be129 100644 --- a/arch/x86/crypto/twofish_avx_glue.c +++ b/arch/x86/crypto/twofish_avx_glue.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
| 29 | #include <linux/crypto.h> | 29 | #include <linux/crypto.h> |
| 30 | #include <linux/err.h> | 30 | #include <linux/err.h> |
| 31 | #include <crypto/ablk_helper.h> | ||
| 31 | #include <crypto/algapi.h> | 32 | #include <crypto/algapi.h> |
| 32 | #include <crypto/twofish.h> | 33 | #include <crypto/twofish.h> |
| 33 | #include <crypto/cryptd.h> | 34 | #include <crypto/cryptd.h> |
| @@ -39,7 +40,6 @@ | |||
| 39 | #include <asm/xcr.h> | 40 | #include <asm/xcr.h> |
| 40 | #include <asm/xsave.h> | 41 | #include <asm/xsave.h> |
| 41 | #include <asm/crypto/twofish.h> | 42 | #include <asm/crypto/twofish.h> |
| 42 | #include <asm/crypto/ablk_helper.h> | ||
| 43 | #include <asm/crypto/glue_helper.h> | 43 | #include <asm/crypto/glue_helper.h> |
| 44 | #include <crypto/scatterwalk.h> | 44 | #include <crypto/scatterwalk.h> |
| 45 | #include <linux/workqueue.h> | 45 | #include <linux/workqueue.h> |
diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h new file mode 100644 index 000000000000..ee80b92f0096 --- /dev/null +++ b/arch/x86/include/asm/simd.h | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | |||
| 2 | #include <asm/i387.h> | ||
| 3 | |||
| 4 | /* | ||
| 5 | * may_use_simd - whether it is allowable at this time to issue SIMD | ||
| 6 | * instructions or access the SIMD register file | ||
| 7 | */ | ||
| 8 | static __must_check inline bool may_use_simd(void) | ||
| 9 | { | ||
| 10 | return irq_fpu_usable(); | ||
| 11 | } | ||
diff --git a/crypto/Kconfig b/crypto/Kconfig index 4ae5734fb473..7bcb70d216e1 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
| @@ -174,9 +174,8 @@ config CRYPTO_TEST | |||
| 174 | help | 174 | help |
| 175 | Quick & dirty crypto test module. | 175 | Quick & dirty crypto test module. |
| 176 | 176 | ||
| 177 | config CRYPTO_ABLK_HELPER_X86 | 177 | config CRYPTO_ABLK_HELPER |
| 178 | tristate | 178 | tristate |
| 179 | depends on X86 | ||
| 180 | select CRYPTO_CRYPTD | 179 | select CRYPTO_CRYPTD |
| 181 | 180 | ||
| 182 | config CRYPTO_GLUE_HELPER_X86 | 181 | config CRYPTO_GLUE_HELPER_X86 |
| @@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL | |||
| 695 | select CRYPTO_AES_X86_64 if 64BIT | 694 | select CRYPTO_AES_X86_64 if 64BIT |
| 696 | select CRYPTO_AES_586 if !64BIT | 695 | select CRYPTO_AES_586 if !64BIT |
| 697 | select CRYPTO_CRYPTD | 696 | select CRYPTO_CRYPTD |
| 698 | select CRYPTO_ABLK_HELPER_X86 | 697 | select CRYPTO_ABLK_HELPER |
| 699 | select CRYPTO_ALGAPI | 698 | select CRYPTO_ALGAPI |
| 700 | select CRYPTO_GLUE_HELPER_X86 if 64BIT | 699 | select CRYPTO_GLUE_HELPER_X86 if 64BIT |
| 701 | select CRYPTO_LRW | 700 | select CRYPTO_LRW |
| @@ -895,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 | |||
| 895 | depends on CRYPTO | 894 | depends on CRYPTO |
| 896 | select CRYPTO_ALGAPI | 895 | select CRYPTO_ALGAPI |
| 897 | select CRYPTO_CRYPTD | 896 | select CRYPTO_CRYPTD |
| 898 | select CRYPTO_ABLK_HELPER_X86 | 897 | select CRYPTO_ABLK_HELPER |
| 899 | select CRYPTO_GLUE_HELPER_X86 | 898 | select CRYPTO_GLUE_HELPER_X86 |
| 900 | select CRYPTO_CAMELLIA_X86_64 | 899 | select CRYPTO_CAMELLIA_X86_64 |
| 901 | select CRYPTO_LRW | 900 | select CRYPTO_LRW |
| @@ -917,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 | |||
| 917 | depends on CRYPTO | 916 | depends on CRYPTO |
| 918 | select CRYPTO_ALGAPI | 917 | select CRYPTO_ALGAPI |
| 919 | select CRYPTO_CRYPTD | 918 | select CRYPTO_CRYPTD |
| 920 | select CRYPTO_ABLK_HELPER_X86 | 919 | select CRYPTO_ABLK_HELPER |
| 921 | select CRYPTO_GLUE_HELPER_X86 | 920 | select CRYPTO_GLUE_HELPER_X86 |
| 922 | select CRYPTO_CAMELLIA_X86_64 | 921 | select CRYPTO_CAMELLIA_X86_64 |
| 923 | select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 | 922 | select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 |
| @@ -969,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64 | |||
| 969 | depends on X86 && 64BIT | 968 | depends on X86 && 64BIT |
| 970 | select CRYPTO_ALGAPI | 969 | select CRYPTO_ALGAPI |
| 971 | select CRYPTO_CRYPTD | 970 | select CRYPTO_CRYPTD |
| 972 | select CRYPTO_ABLK_HELPER_X86 | 971 | select CRYPTO_ABLK_HELPER |
| 973 | select CRYPTO_CAST_COMMON | 972 | select CRYPTO_CAST_COMMON |
| 974 | select CRYPTO_CAST5 | 973 | select CRYPTO_CAST5 |
| 975 | help | 974 | help |
| @@ -992,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64 | |||
| 992 | depends on X86 && 64BIT | 991 | depends on X86 && 64BIT |
| 993 | select CRYPTO_ALGAPI | 992 | select CRYPTO_ALGAPI |
| 994 | select CRYPTO_CRYPTD | 993 | select CRYPTO_CRYPTD |
| 995 | select CRYPTO_ABLK_HELPER_X86 | 994 | select CRYPTO_ABLK_HELPER |
| 996 | select CRYPTO_GLUE_HELPER_X86 | 995 | select CRYPTO_GLUE_HELPER_X86 |
| 997 | select CRYPTO_CAST_COMMON | 996 | select CRYPTO_CAST_COMMON |
| 998 | select CRYPTO_CAST6 | 997 | select CRYPTO_CAST6 |
| @@ -1110,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64 | |||
| 1110 | depends on X86 && 64BIT | 1109 | depends on X86 && 64BIT |
| 1111 | select CRYPTO_ALGAPI | 1110 | select CRYPTO_ALGAPI |
| 1112 | select CRYPTO_CRYPTD | 1111 | select CRYPTO_CRYPTD |
| 1113 | select CRYPTO_ABLK_HELPER_X86 | 1112 | select CRYPTO_ABLK_HELPER |
| 1114 | select CRYPTO_GLUE_HELPER_X86 | 1113 | select CRYPTO_GLUE_HELPER_X86 |
| 1115 | select CRYPTO_SERPENT | 1114 | select CRYPTO_SERPENT |
| 1116 | select CRYPTO_LRW | 1115 | select CRYPTO_LRW |
| @@ -1132,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586 | |||
| 1132 | depends on X86 && !64BIT | 1131 | depends on X86 && !64BIT |
| 1133 | select CRYPTO_ALGAPI | 1132 | select CRYPTO_ALGAPI |
| 1134 | select CRYPTO_CRYPTD | 1133 | select CRYPTO_CRYPTD |
| 1135 | select CRYPTO_ABLK_HELPER_X86 | 1134 | select CRYPTO_ABLK_HELPER |
| 1136 | select CRYPTO_GLUE_HELPER_X86 | 1135 | select CRYPTO_GLUE_HELPER_X86 |
| 1137 | select CRYPTO_SERPENT | 1136 | select CRYPTO_SERPENT |
| 1138 | select CRYPTO_LRW | 1137 | select CRYPTO_LRW |
| @@ -1154,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64 | |||
| 1154 | depends on X86 && 64BIT | 1153 | depends on X86 && 64BIT |
| 1155 | select CRYPTO_ALGAPI | 1154 | select CRYPTO_ALGAPI |
| 1156 | select CRYPTO_CRYPTD | 1155 | select CRYPTO_CRYPTD |
| 1157 | select CRYPTO_ABLK_HELPER_X86 | 1156 | select CRYPTO_ABLK_HELPER |
| 1158 | select CRYPTO_GLUE_HELPER_X86 | 1157 | select CRYPTO_GLUE_HELPER_X86 |
| 1159 | select CRYPTO_SERPENT | 1158 | select CRYPTO_SERPENT |
| 1160 | select CRYPTO_LRW | 1159 | select CRYPTO_LRW |
| @@ -1176,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64 | |||
| 1176 | depends on X86 && 64BIT | 1175 | depends on X86 && 64BIT |
| 1177 | select CRYPTO_ALGAPI | 1176 | select CRYPTO_ALGAPI |
| 1178 | select CRYPTO_CRYPTD | 1177 | select CRYPTO_CRYPTD |
| 1179 | select CRYPTO_ABLK_HELPER_X86 | 1178 | select CRYPTO_ABLK_HELPER |
| 1180 | select CRYPTO_GLUE_HELPER_X86 | 1179 | select CRYPTO_GLUE_HELPER_X86 |
| 1181 | select CRYPTO_SERPENT | 1180 | select CRYPTO_SERPENT |
| 1182 | select CRYPTO_SERPENT_AVX_X86_64 | 1181 | select CRYPTO_SERPENT_AVX_X86_64 |
| @@ -1292,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64 | |||
| 1292 | depends on X86 && 64BIT | 1291 | depends on X86 && 64BIT |
| 1293 | select CRYPTO_ALGAPI | 1292 | select CRYPTO_ALGAPI |
| 1294 | select CRYPTO_CRYPTD | 1293 | select CRYPTO_CRYPTD |
| 1295 | select CRYPTO_ABLK_HELPER_X86 | 1294 | select CRYPTO_ABLK_HELPER |
| 1296 | select CRYPTO_GLUE_HELPER_X86 | 1295 | select CRYPTO_GLUE_HELPER_X86 |
| 1297 | select CRYPTO_TWOFISH_COMMON | 1296 | select CRYPTO_TWOFISH_COMMON |
| 1298 | select CRYPTO_TWOFISH_X86_64 | 1297 | select CRYPTO_TWOFISH_X86_64 |
diff --git a/crypto/Makefile b/crypto/Makefile index b3a7e807e08b..989c510da8cc 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
| @@ -2,8 +2,13 @@ | |||
| 2 | # Cryptographic API | 2 | # Cryptographic API |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | # memneq MUST be built with -Os or -O0 to prevent early-return optimizations | ||
| 6 | # that will defeat memneq's actual purpose to prevent timing attacks. | ||
| 7 | CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3 | ||
| 8 | CFLAGS_memneq.o := -Os | ||
| 9 | |||
| 5 | obj-$(CONFIG_CRYPTO) += crypto.o | 10 | obj-$(CONFIG_CRYPTO) += crypto.o |
| 6 | crypto-y := api.o cipher.o compress.o | 11 | crypto-y := api.o cipher.o compress.o memneq.o |
| 7 | 12 | ||
| 8 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o | 13 | obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o |
| 9 | 14 | ||
| @@ -105,3 +110,4 @@ obj-$(CONFIG_XOR_BLOCKS) += xor.o | |||
| 105 | obj-$(CONFIG_ASYNC_CORE) += async_tx/ | 110 | obj-$(CONFIG_ASYNC_CORE) += async_tx/ |
| 106 | obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ | 111 | obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ |
| 107 | obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o | 112 | obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o |
| 113 | obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o | ||
diff --git a/arch/x86/crypto/ablk_helper.c b/crypto/ablk_helper.c index 43282fe04a8b..ffe7278d4bd8 100644 --- a/arch/x86/crypto/ablk_helper.c +++ b/crypto/ablk_helper.c | |||
| @@ -28,10 +28,11 @@ | |||
| 28 | #include <linux/crypto.h> | 28 | #include <linux/crypto.h> |
| 29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
| 30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
| 31 | #include <linux/hardirq.h> | ||
| 31 | #include <crypto/algapi.h> | 32 | #include <crypto/algapi.h> |
| 32 | #include <crypto/cryptd.h> | 33 | #include <crypto/cryptd.h> |
| 33 | #include <asm/i387.h> | 34 | #include <crypto/ablk_helper.h> |
| 34 | #include <asm/crypto/ablk_helper.h> | 35 | #include <asm/simd.h> |
| 35 | 36 | ||
| 36 | int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | 37 | int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, |
| 37 | unsigned int key_len) | 38 | unsigned int key_len) |
| @@ -70,11 +71,11 @@ int ablk_encrypt(struct ablkcipher_request *req) | |||
| 70 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 71 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
| 71 | struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 72 | struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
| 72 | 73 | ||
| 73 | if (!irq_fpu_usable()) { | 74 | if (!may_use_simd()) { |
| 74 | struct ablkcipher_request *cryptd_req = | 75 | struct ablkcipher_request *cryptd_req = |
| 75 | ablkcipher_request_ctx(req); | 76 | ablkcipher_request_ctx(req); |
| 76 | 77 | ||
| 77 | memcpy(cryptd_req, req, sizeof(*req)); | 78 | *cryptd_req = *req; |
| 78 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | 79 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); |
| 79 | 80 | ||
| 80 | return crypto_ablkcipher_encrypt(cryptd_req); | 81 | return crypto_ablkcipher_encrypt(cryptd_req); |
| @@ -89,11 +90,11 @@ int ablk_decrypt(struct ablkcipher_request *req) | |||
| 89 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | 90 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
| 90 | struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); | 91 | struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); |
| 91 | 92 | ||
| 92 | if (!irq_fpu_usable()) { | 93 | if (!may_use_simd()) { |
| 93 | struct ablkcipher_request *cryptd_req = | 94 | struct ablkcipher_request *cryptd_req = |
| 94 | ablkcipher_request_ctx(req); | 95 | ablkcipher_request_ctx(req); |
| 95 | 96 | ||
| 96 | memcpy(cryptd_req, req, sizeof(*req)); | 97 | *cryptd_req = *req; |
| 97 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | 98 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); |
| 98 | 99 | ||
| 99 | return crypto_ablkcipher_decrypt(cryptd_req); | 100 | return crypto_ablkcipher_decrypt(cryptd_req); |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 7d4a8d28277e..40886c489903 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
| @@ -16,9 +16,7 @@ | |||
| 16 | #include <crypto/internal/skcipher.h> | 16 | #include <crypto/internal/skcipher.h> |
| 17 | #include <linux/cpumask.h> | 17 | #include <linux/cpumask.h> |
| 18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
| 21 | #include <linux/module.h> | ||
| 22 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
| 23 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
| 24 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| @@ -30,8 +28,6 @@ | |||
| 30 | 28 | ||
| 31 | #include "internal.h" | 29 | #include "internal.h" |
| 32 | 30 | ||
| 33 | static const char *skcipher_default_geniv __read_mostly; | ||
| 34 | |||
| 35 | struct ablkcipher_buffer { | 31 | struct ablkcipher_buffer { |
| 36 | struct list_head entry; | 32 | struct list_head entry; |
| 37 | struct scatter_walk dst; | 33 | struct scatter_walk dst; |
| @@ -527,8 +523,7 @@ const char *crypto_default_geniv(const struct crypto_alg *alg) | |||
| 527 | alg->cra_blocksize) | 523 | alg->cra_blocksize) |
| 528 | return "chainiv"; | 524 | return "chainiv"; |
| 529 | 525 | ||
| 530 | return alg->cra_flags & CRYPTO_ALG_ASYNC ? | 526 | return "eseqiv"; |
| 531 | "eseqiv" : skcipher_default_geniv; | ||
| 532 | } | 527 | } |
| 533 | 528 | ||
| 534 | static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) | 529 | static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) |
| @@ -709,17 +704,3 @@ err: | |||
| 709 | return ERR_PTR(err); | 704 | return ERR_PTR(err); |
| 710 | } | 705 | } |
| 711 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); | 706 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); |
| 712 | |||
| 713 | static int __init skcipher_module_init(void) | ||
| 714 | { | ||
| 715 | skcipher_default_geniv = num_possible_cpus() > 1 ? | ||
| 716 | "eseqiv" : "chainiv"; | ||
| 717 | return 0; | ||
| 718 | } | ||
| 719 | |||
| 720 | static void skcipher_module_exit(void) | ||
| 721 | { | ||
| 722 | } | ||
| 723 | |||
| 724 | module_init(skcipher_module_init); | ||
| 725 | module_exit(skcipher_module_exit); | ||
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index c0bb3778f1ae..666f1962a160 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c | |||
| @@ -230,11 +230,11 @@ remainder: | |||
| 230 | */ | 230 | */ |
| 231 | if (byte_count < DEFAULT_BLK_SZ) { | 231 | if (byte_count < DEFAULT_BLK_SZ) { |
| 232 | empty_rbuf: | 232 | empty_rbuf: |
| 233 | for (; ctx->rand_data_valid < DEFAULT_BLK_SZ; | 233 | while (ctx->rand_data_valid < DEFAULT_BLK_SZ) { |
| 234 | ctx->rand_data_valid++) { | ||
| 235 | *ptr = ctx->rand_data[ctx->rand_data_valid]; | 234 | *ptr = ctx->rand_data[ctx->rand_data_valid]; |
| 236 | ptr++; | 235 | ptr++; |
| 237 | byte_count--; | 236 | byte_count--; |
| 237 | ctx->rand_data_valid++; | ||
| 238 | if (byte_count == 0) | 238 | if (byte_count == 0) |
| 239 | goto done; | 239 | goto done; |
| 240 | } | 240 | } |
diff --git a/crypto/asymmetric_keys/rsa.c b/crypto/asymmetric_keys/rsa.c index 90a17f59ba28..459cf97a75e2 100644 --- a/crypto/asymmetric_keys/rsa.c +++ b/crypto/asymmetric_keys/rsa.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <crypto/algapi.h> | ||
| 16 | #include "public_key.h" | 17 | #include "public_key.h" |
| 17 | 18 | ||
| 18 | MODULE_LICENSE("GPL"); | 19 | MODULE_LICENSE("GPL"); |
| @@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size, | |||
| 189 | } | 190 | } |
| 190 | } | 191 | } |
| 191 | 192 | ||
| 192 | if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { | 193 | if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) { |
| 193 | kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); | 194 | kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); |
| 194 | return -EBADMSG; | 195 | return -EBADMSG; |
| 195 | } | 196 | } |
| 196 | 197 | ||
| 197 | if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { | 198 | if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) { |
| 198 | kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); | 199 | kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); |
| 199 | return -EKEYREJECTED; | 200 | return -EKEYREJECTED; |
| 200 | } | 201 | } |
diff --git a/crypto/authenc.c b/crypto/authenc.c index ffce19de05cf..1875e7026e8f 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c | |||
| @@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err) | |||
| 52 | aead_request_complete(req, err); | 52 | aead_request_complete(req, err); |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | 55 | int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, |
| 56 | unsigned int keylen) | 56 | unsigned int keylen) |
| 57 | { | 57 | { |
| 58 | unsigned int authkeylen; | 58 | struct rtattr *rta = (struct rtattr *)key; |
| 59 | unsigned int enckeylen; | ||
| 60 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 61 | struct crypto_ahash *auth = ctx->auth; | ||
| 62 | struct crypto_ablkcipher *enc = ctx->enc; | ||
| 63 | struct rtattr *rta = (void *)key; | ||
| 64 | struct crypto_authenc_key_param *param; | 59 | struct crypto_authenc_key_param *param; |
| 65 | int err = -EINVAL; | ||
| 66 | 60 | ||
| 67 | if (!RTA_OK(rta, keylen)) | 61 | if (!RTA_OK(rta, keylen)) |
| 68 | goto badkey; | 62 | return -EINVAL; |
| 69 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 63 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) |
| 70 | goto badkey; | 64 | return -EINVAL; |
| 71 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | 65 | if (RTA_PAYLOAD(rta) < sizeof(*param)) |
| 72 | goto badkey; | 66 | return -EINVAL; |
| 73 | 67 | ||
| 74 | param = RTA_DATA(rta); | 68 | param = RTA_DATA(rta); |
| 75 | enckeylen = be32_to_cpu(param->enckeylen); | 69 | keys->enckeylen = be32_to_cpu(param->enckeylen); |
| 76 | 70 | ||
| 77 | key += RTA_ALIGN(rta->rta_len); | 71 | key += RTA_ALIGN(rta->rta_len); |
| 78 | keylen -= RTA_ALIGN(rta->rta_len); | 72 | keylen -= RTA_ALIGN(rta->rta_len); |
| 79 | 73 | ||
| 80 | if (keylen < enckeylen) | 74 | if (keylen < keys->enckeylen) |
| 81 | goto badkey; | 75 | return -EINVAL; |
| 82 | 76 | ||
| 83 | authkeylen = keylen - enckeylen; | 77 | keys->authkeylen = keylen - keys->enckeylen; |
| 78 | keys->authkey = key; | ||
| 79 | keys->enckey = key + keys->authkeylen; | ||
| 80 | |||
| 81 | return 0; | ||
| 82 | } | ||
| 83 | EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys); | ||
| 84 | |||
| 85 | static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | ||
| 86 | unsigned int keylen) | ||
| 87 | { | ||
| 88 | struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); | ||
| 89 | struct crypto_ahash *auth = ctx->auth; | ||
| 90 | struct crypto_ablkcipher *enc = ctx->enc; | ||
| 91 | struct crypto_authenc_keys keys; | ||
| 92 | int err = -EINVAL; | ||
| 93 | |||
| 94 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) | ||
| 95 | goto badkey; | ||
| 84 | 96 | ||
| 85 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); | 97 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); |
| 86 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & | 98 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & |
| 87 | CRYPTO_TFM_REQ_MASK); | 99 | CRYPTO_TFM_REQ_MASK); |
| 88 | err = crypto_ahash_setkey(auth, key, authkeylen); | 100 | err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); |
| 89 | crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & | 101 | crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & |
| 90 | CRYPTO_TFM_RES_MASK); | 102 | CRYPTO_TFM_RES_MASK); |
| 91 | 103 | ||
| @@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |||
| 95 | crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); | 107 | crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); |
| 96 | crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & | 108 | crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & |
| 97 | CRYPTO_TFM_REQ_MASK); | 109 | CRYPTO_TFM_REQ_MASK); |
| 98 | err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); | 110 | err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); |
| 99 | crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & | 111 | crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & |
| 100 | CRYPTO_TFM_RES_MASK); | 112 | CRYPTO_TFM_RES_MASK); |
| 101 | 113 | ||
| @@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq, | |||
| 188 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 200 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
| 189 | authsize, 0); | 201 | authsize, 0); |
| 190 | 202 | ||
| 191 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 203 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
| 192 | if (err) | 204 | if (err) |
| 193 | goto out; | 205 | goto out; |
| 194 | 206 | ||
| @@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq, | |||
| 227 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 239 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
| 228 | authsize, 0); | 240 | authsize, 0); |
| 229 | 241 | ||
| 230 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 242 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
| 231 | if (err) | 243 | if (err) |
| 232 | goto out; | 244 | goto out; |
| 233 | 245 | ||
| @@ -462,7 +474,7 @@ static int crypto_authenc_verify(struct aead_request *req, | |||
| 462 | ihash = ohash + authsize; | 474 | ihash = ohash + authsize; |
| 463 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 475 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
| 464 | authsize, 0); | 476 | authsize, 0); |
| 465 | return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; | 477 | return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; |
| 466 | } | 478 | } |
| 467 | 479 | ||
| 468 | static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, | 480 | static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, |
diff --git a/crypto/authencesn.c b/crypto/authencesn.c index ab53762fc309..4be0dd4373a9 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c | |||
| @@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err) | |||
| 59 | static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, | 59 | static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, |
| 60 | unsigned int keylen) | 60 | unsigned int keylen) |
| 61 | { | 61 | { |
| 62 | unsigned int authkeylen; | ||
| 63 | unsigned int enckeylen; | ||
| 64 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); | 62 | struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); |
| 65 | struct crypto_ahash *auth = ctx->auth; | 63 | struct crypto_ahash *auth = ctx->auth; |
| 66 | struct crypto_ablkcipher *enc = ctx->enc; | 64 | struct crypto_ablkcipher *enc = ctx->enc; |
| 67 | struct rtattr *rta = (void *)key; | 65 | struct crypto_authenc_keys keys; |
| 68 | struct crypto_authenc_key_param *param; | ||
| 69 | int err = -EINVAL; | 66 | int err = -EINVAL; |
| 70 | 67 | ||
| 71 | if (!RTA_OK(rta, keylen)) | 68 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
| 72 | goto badkey; | 69 | goto badkey; |
| 73 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | ||
| 74 | goto badkey; | ||
| 75 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
| 76 | goto badkey; | ||
| 77 | |||
| 78 | param = RTA_DATA(rta); | ||
| 79 | enckeylen = be32_to_cpu(param->enckeylen); | ||
| 80 | |||
| 81 | key += RTA_ALIGN(rta->rta_len); | ||
| 82 | keylen -= RTA_ALIGN(rta->rta_len); | ||
| 83 | |||
| 84 | if (keylen < enckeylen) | ||
| 85 | goto badkey; | ||
| 86 | |||
| 87 | authkeylen = keylen - enckeylen; | ||
| 88 | 70 | ||
| 89 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); | 71 | crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); |
| 90 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & | 72 | crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & |
| 91 | CRYPTO_TFM_REQ_MASK); | 73 | CRYPTO_TFM_REQ_MASK); |
| 92 | err = crypto_ahash_setkey(auth, key, authkeylen); | 74 | err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); |
| 93 | crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & | 75 | crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & |
| 94 | CRYPTO_TFM_RES_MASK); | 76 | CRYPTO_TFM_RES_MASK); |
| 95 | 77 | ||
| @@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * | |||
| 99 | crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); | 81 | crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); |
| 100 | crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & | 82 | crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & |
| 101 | CRYPTO_TFM_REQ_MASK); | 83 | CRYPTO_TFM_REQ_MASK); |
| 102 | err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); | 84 | err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); |
| 103 | crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & | 85 | crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & |
| 104 | CRYPTO_TFM_RES_MASK); | 86 | CRYPTO_TFM_RES_MASK); |
| 105 | 87 | ||
| @@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar | |||
| 247 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 229 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
| 248 | authsize, 0); | 230 | authsize, 0); |
| 249 | 231 | ||
| 250 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 232 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
| 251 | if (err) | 233 | if (err) |
| 252 | goto out; | 234 | goto out; |
| 253 | 235 | ||
| @@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a | |||
| 296 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 278 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
| 297 | authsize, 0); | 279 | authsize, 0); |
| 298 | 280 | ||
| 299 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 281 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
| 300 | if (err) | 282 | if (err) |
| 301 | goto out; | 283 | goto out; |
| 302 | 284 | ||
| @@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, | |||
| 336 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 318 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
| 337 | authsize, 0); | 319 | authsize, 0); |
| 338 | 320 | ||
| 339 | err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; | 321 | err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; |
| 340 | if (err) | 322 | if (err) |
| 341 | goto out; | 323 | goto out; |
| 342 | 324 | ||
| @@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req) | |||
| 568 | ihash = ohash + authsize; | 550 | ihash = ohash + authsize; |
| 569 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, | 551 | scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, |
| 570 | authsize, 0); | 552 | authsize, 0); |
| 571 | return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; | 553 | return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; |
| 572 | } | 554 | } |
| 573 | 555 | ||
| 574 | static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, | 556 | static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, |
diff --git a/crypto/ccm.c b/crypto/ccm.c index 499c91717d93..3e05499d183a 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c | |||
| @@ -363,7 +363,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq, | |||
| 363 | 363 | ||
| 364 | if (!err) { | 364 | if (!err) { |
| 365 | err = crypto_ccm_auth(req, req->dst, cryptlen); | 365 | err = crypto_ccm_auth(req, req->dst, cryptlen); |
| 366 | if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) | 366 | if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) |
| 367 | err = -EBADMSG; | 367 | err = -EBADMSG; |
| 368 | } | 368 | } |
| 369 | aead_request_complete(req, err); | 369 | aead_request_complete(req, err); |
| @@ -422,7 +422,7 @@ static int crypto_ccm_decrypt(struct aead_request *req) | |||
| 422 | return err; | 422 | return err; |
| 423 | 423 | ||
| 424 | /* verify */ | 424 | /* verify */ |
| 425 | if (memcmp(authtag, odata, authsize)) | 425 | if (crypto_memneq(authtag, odata, authsize)) |
| 426 | return -EBADMSG; | 426 | return -EBADMSG; |
| 427 | 427 | ||
| 428 | return err; | 428 | return err; |
diff --git a/crypto/gcm.c b/crypto/gcm.c index 43e1fb05ea54..b4f017939004 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
| @@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req, | |||
| 582 | 582 | ||
| 583 | crypto_xor(auth_tag, iauth_tag, 16); | 583 | crypto_xor(auth_tag, iauth_tag, 16); |
| 584 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); | 584 | scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); |
| 585 | return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; | 585 | return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; |
| 586 | } | 586 | } |
| 587 | 587 | ||
| 588 | static void gcm_decrypt_done(struct crypto_async_request *areq, int err) | 588 | static void gcm_decrypt_done(struct crypto_async_request *areq, int err) |
diff --git a/crypto/memneq.c b/crypto/memneq.c new file mode 100644 index 000000000000..cd0162221c14 --- /dev/null +++ b/crypto/memneq.c | |||
| @@ -0,0 +1,138 @@ | |||
| 1 | /* | ||
| 2 | * Constant-time equality testing of memory regions. | ||
| 3 | * | ||
| 4 | * Authors: | ||
| 5 | * | ||
| 6 | * James Yonan <james@openvpn.net> | ||
| 7 | * Daniel Borkmann <dborkman@redhat.com> | ||
| 8 | * | ||
| 9 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
| 10 | * redistributing this file, you may do so under either license. | ||
| 11 | * | ||
| 12 | * GPL LICENSE SUMMARY | ||
| 13 | * | ||
| 14 | * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. | ||
| 15 | * | ||
| 16 | * This program is free software; you can redistribute it and/or modify | ||
| 17 | * it under the terms of version 2 of the GNU General Public License as | ||
| 18 | * published by the Free Software Foundation. | ||
| 19 | * | ||
| 20 | * This program is distributed in the hope that it will be useful, but | ||
| 21 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 23 | * General Public License for more details. | ||
| 24 | * | ||
| 25 | * You should have received a copy of the GNU General Public License | ||
| 26 | * along with this program; if not, write to the Free Software | ||
| 27 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 28 | * The full GNU General Public License is included in this distribution | ||
| 29 | * in the file called LICENSE.GPL. | ||
| 30 | * | ||
| 31 | * BSD LICENSE | ||
| 32 | * | ||
| 33 | * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. | ||
| 34 | * | ||
| 35 | * Redistribution and use in source and binary forms, with or without | ||
| 36 | * modification, are permitted provided that the following conditions | ||
| 37 | * are met: | ||
| 38 | * | ||
| 39 | * * Redistributions of source code must retain the above copyright | ||
| 40 | * notice, this list of conditions and the following disclaimer. | ||
| 41 | * * Redistributions in binary form must reproduce the above copyright | ||
| 42 | * notice, this list of conditions and the following disclaimer in | ||
| 43 | * the documentation and/or other materials provided with the | ||
| 44 | * distribution. | ||
| 45 | * * Neither the name of OpenVPN Technologies nor the names of its | ||
| 46 | * contributors may be used to endorse or promote products derived | ||
| 47 | * from this software without specific prior written permission. | ||
| 48 | * | ||
| 49 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 50 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 51 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
| 52 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
| 53 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
| 54 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
| 55 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
| 56 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
| 57 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 58 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 59 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 60 | */ | ||
| 61 | |||
| 62 | #include <crypto/algapi.h> | ||
| 63 | |||
| 64 | #ifndef __HAVE_ARCH_CRYPTO_MEMNEQ | ||
| 65 | |||
| 66 | /* Generic path for arbitrary size */ | ||
| 67 | static inline unsigned long | ||
| 68 | __crypto_memneq_generic(const void *a, const void *b, size_t size) | ||
| 69 | { | ||
| 70 | unsigned long neq = 0; | ||
| 71 | |||
| 72 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
| 73 | while (size >= sizeof(unsigned long)) { | ||
| 74 | neq |= *(unsigned long *)a ^ *(unsigned long *)b; | ||
| 75 | a += sizeof(unsigned long); | ||
| 76 | b += sizeof(unsigned long); | ||
| 77 | size -= sizeof(unsigned long); | ||
| 78 | } | ||
| 79 | #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | ||
| 80 | while (size > 0) { | ||
| 81 | neq |= *(unsigned char *)a ^ *(unsigned char *)b; | ||
| 82 | a += 1; | ||
| 83 | b += 1; | ||
| 84 | size -= 1; | ||
| 85 | } | ||
| 86 | return neq; | ||
| 87 | } | ||
| 88 | |||
| 89 | /* Loop-free fast-path for frequently used 16-byte size */ | ||
| 90 | static inline unsigned long __crypto_memneq_16(const void *a, const void *b) | ||
| 91 | { | ||
| 92 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
| 93 | if (sizeof(unsigned long) == 8) | ||
| 94 | return ((*(unsigned long *)(a) ^ *(unsigned long *)(b)) | ||
| 95 | | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8))); | ||
| 96 | else if (sizeof(unsigned int) == 4) | ||
| 97 | return ((*(unsigned int *)(a) ^ *(unsigned int *)(b)) | ||
| 98 | | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4)) | ||
| 99 | | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8)) | ||
| 100 | | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12))); | ||
| 101 | else | ||
| 102 | #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | ||
| 103 | return ((*(unsigned char *)(a) ^ *(unsigned char *)(b)) | ||
| 104 | | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1)) | ||
| 105 | | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2)) | ||
| 106 | | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3)) | ||
| 107 | | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4)) | ||
| 108 | | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5)) | ||
| 109 | | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6)) | ||
| 110 | | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7)) | ||
| 111 | | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8)) | ||
| 112 | | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9)) | ||
| 113 | | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10)) | ||
| 114 | | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11)) | ||
| 115 | | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12)) | ||
| 116 | | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13)) | ||
| 117 | | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14)) | ||
| 118 | | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15))); | ||
| 119 | } | ||
| 120 | |||
| 121 | /* Compare two areas of memory without leaking timing information, | ||
| 122 | * and with special optimizations for common sizes. Users should | ||
| 123 | * not call this function directly, but should instead use | ||
| 124 | * crypto_memneq defined in crypto/algapi.h. | ||
| 125 | */ | ||
| 126 | noinline unsigned long __crypto_memneq(const void *a, const void *b, | ||
| 127 | size_t size) | ||
| 128 | { | ||
| 129 | switch (size) { | ||
| 130 | case 16: | ||
| 131 | return __crypto_memneq_16(a, b); | ||
| 132 | default: | ||
| 133 | return __crypto_memneq_generic(a, b, size); | ||
| 134 | } | ||
| 135 | } | ||
| 136 | EXPORT_SYMBOL(__crypto_memneq); | ||
| 137 | |||
| 138 | #endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ | ||
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index c206de2951f2..2f2b08457c67 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
| @@ -165,6 +165,19 @@ config HW_RANDOM_OMAP | |||
| 165 | 165 | ||
| 166 | If unsure, say Y. | 166 | If unsure, say Y. |
| 167 | 167 | ||
| 168 | config HW_RANDOM_OMAP3_ROM | ||
| 169 | tristate "OMAP3 ROM Random Number Generator support" | ||
| 170 | depends on HW_RANDOM && ARCH_OMAP3 | ||
| 171 | default HW_RANDOM | ||
| 172 | ---help--- | ||
| 173 | This driver provides kernel-side support for the Random Number | ||
| 174 | Generator hardware found on OMAP34xx processors. | ||
| 175 | |||
| 176 | To compile this driver as a module, choose M here: the | ||
| 177 | module will be called omap3-rom-rng. | ||
| 178 | |||
| 179 | If unsure, say Y. | ||
| 180 | |||
| 168 | config HW_RANDOM_OCTEON | 181 | config HW_RANDOM_OCTEON |
| 169 | tristate "Octeon Random Number Generator support" | 182 | tristate "Octeon Random Number Generator support" |
| 170 | depends on HW_RANDOM && CAVIUM_OCTEON_SOC | 183 | depends on HW_RANDOM && CAVIUM_OCTEON_SOC |
| @@ -327,3 +340,15 @@ config HW_RANDOM_TPM | |||
| 327 | module will be called tpm-rng. | 340 | module will be called tpm-rng. |
| 328 | 341 | ||
| 329 | If unsure, say Y. | 342 | If unsure, say Y. |
| 343 | |||
| 344 | config HW_RANDOM_MSM | ||
| 345 | tristate "Qualcomm MSM Random Number Generator support" | ||
| 346 | depends on HW_RANDOM && ARCH_MSM | ||
| 347 | ---help--- | ||
| 348 | This driver provides kernel-side support for the Random Number | ||
| 349 | Generator hardware found on Qualcomm MSM SoCs. | ||
| 350 | |||
| 351 | To compile this driver as a module, choose M here. the | ||
| 352 | module will be called msm-rng. | ||
| 353 | |||
| 354 | If unsure, say Y. | ||
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index d7d2435ff7fa..3ae7755a52e7 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile | |||
| @@ -15,6 +15,7 @@ n2-rng-y := n2-drv.o n2-asm.o | |||
| 15 | obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o | 15 | obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o |
| 16 | obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o | 16 | obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o |
| 17 | obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o | 17 | obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o |
| 18 | obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o | ||
| 18 | obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o | 19 | obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o |
| 19 | obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o | 20 | obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o |
| 20 | obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o | 21 | obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o |
| @@ -28,3 +29,4 @@ obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o | |||
| 28 | obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o | 29 | obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o |
| 29 | obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o | 30 | obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o |
| 30 | obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o | 31 | obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o |
| 32 | obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o | ||
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c new file mode 100644 index 000000000000..148521e51dc6 --- /dev/null +++ b/drivers/char/hw_random/msm-rng.c | |||
| @@ -0,0 +1,197 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 and | ||
| 6 | * only version 2 as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | #include <linux/clk.h> | ||
| 15 | #include <linux/err.h> | ||
| 16 | #include <linux/hw_random.h> | ||
| 17 | #include <linux/io.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/of.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | |||
| 22 | /* Device specific register offsets */ | ||
| 23 | #define PRNG_DATA_OUT 0x0000 | ||
| 24 | #define PRNG_STATUS 0x0004 | ||
| 25 | #define PRNG_LFSR_CFG 0x0100 | ||
| 26 | #define PRNG_CONFIG 0x0104 | ||
| 27 | |||
| 28 | /* Device specific register masks and config values */ | ||
| 29 | #define PRNG_LFSR_CFG_MASK 0x0000ffff | ||
| 30 | #define PRNG_LFSR_CFG_CLOCKS 0x0000dddd | ||
| 31 | #define PRNG_CONFIG_HW_ENABLE BIT(1) | ||
| 32 | #define PRNG_STATUS_DATA_AVAIL BIT(0) | ||
| 33 | |||
| 34 | #define MAX_HW_FIFO_DEPTH 16 | ||
| 35 | #define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) | ||
| 36 | #define WORD_SZ 4 | ||
| 37 | |||
| 38 | struct msm_rng { | ||
| 39 | void __iomem *base; | ||
| 40 | struct clk *clk; | ||
| 41 | struct hwrng hwrng; | ||
| 42 | }; | ||
| 43 | |||
| 44 | #define to_msm_rng(p) container_of(p, struct msm_rng, hwrng) | ||
| 45 | |||
| 46 | static int msm_rng_enable(struct hwrng *hwrng, int enable) | ||
| 47 | { | ||
| 48 | struct msm_rng *rng = to_msm_rng(hwrng); | ||
| 49 | u32 val; | ||
| 50 | int ret; | ||
| 51 | |||
| 52 | ret = clk_prepare_enable(rng->clk); | ||
| 53 | if (ret) | ||
| 54 | return ret; | ||
| 55 | |||
| 56 | if (enable) { | ||
| 57 | /* Enable PRNG only if it is not already enabled */ | ||
| 58 | val = readl_relaxed(rng->base + PRNG_CONFIG); | ||
| 59 | if (val & PRNG_CONFIG_HW_ENABLE) | ||
| 60 | goto already_enabled; | ||
| 61 | |||
| 62 | val = readl_relaxed(rng->base + PRNG_LFSR_CFG); | ||
| 63 | val &= ~PRNG_LFSR_CFG_MASK; | ||
| 64 | val |= PRNG_LFSR_CFG_CLOCKS; | ||
| 65 | writel(val, rng->base + PRNG_LFSR_CFG); | ||
| 66 | |||
| 67 | val = readl_relaxed(rng->base + PRNG_CONFIG); | ||
| 68 | val |= PRNG_CONFIG_HW_ENABLE; | ||
| 69 | writel(val, rng->base + PRNG_CONFIG); | ||
| 70 | } else { | ||
| 71 | val = readl_relaxed(rng->base + PRNG_CONFIG); | ||
| 72 | val &= ~PRNG_CONFIG_HW_ENABLE; | ||
| 73 | writel(val, rng->base + PRNG_CONFIG); | ||
| 74 | } | ||
| 75 | |||
| 76 | already_enabled: | ||
| 77 | clk_disable_unprepare(rng->clk); | ||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait) | ||
| 82 | { | ||
| 83 | struct msm_rng *rng = to_msm_rng(hwrng); | ||
| 84 | size_t currsize = 0; | ||
| 85 | u32 *retdata = data; | ||
| 86 | size_t maxsize; | ||
| 87 | int ret; | ||
| 88 | u32 val; | ||
| 89 | |||
| 90 | /* calculate max size bytes to transfer back to caller */ | ||
| 91 | maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max); | ||
| 92 | |||
| 93 | /* no room for word data */ | ||
| 94 | if (maxsize < WORD_SZ) | ||
| 95 | return 0; | ||
| 96 | |||
| 97 | ret = clk_prepare_enable(rng->clk); | ||
| 98 | if (ret) | ||
| 99 | return ret; | ||
| 100 | |||
| 101 | /* read random data from hardware */ | ||
| 102 | do { | ||
| 103 | val = readl_relaxed(rng->base + PRNG_STATUS); | ||
| 104 | if (!(val & PRNG_STATUS_DATA_AVAIL)) | ||
| 105 | break; | ||
| 106 | |||
| 107 | val = readl_relaxed(rng->base + PRNG_DATA_OUT); | ||
| 108 | if (!val) | ||
| 109 | break; | ||
| 110 | |||
| 111 | *retdata++ = val; | ||
| 112 | currsize += WORD_SZ; | ||
| 113 | |||
| 114 | /* make sure we stay on 32bit boundary */ | ||
| 115 | if ((maxsize - currsize) < WORD_SZ) | ||
| 116 | break; | ||
| 117 | } while (currsize < maxsize); | ||
| 118 | |||
| 119 | clk_disable_unprepare(rng->clk); | ||
| 120 | |||
| 121 | return currsize; | ||
| 122 | } | ||
| 123 | |||
| 124 | static int msm_rng_init(struct hwrng *hwrng) | ||
| 125 | { | ||
| 126 | return msm_rng_enable(hwrng, 1); | ||
| 127 | } | ||
| 128 | |||
| 129 | static void msm_rng_cleanup(struct hwrng *hwrng) | ||
| 130 | { | ||
| 131 | msm_rng_enable(hwrng, 0); | ||
| 132 | } | ||
| 133 | |||
| 134 | static int msm_rng_probe(struct platform_device *pdev) | ||
| 135 | { | ||
| 136 | struct resource *res; | ||
| 137 | struct msm_rng *rng; | ||
| 138 | int ret; | ||
| 139 | |||
| 140 | rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); | ||
| 141 | if (!rng) | ||
| 142 | return -ENOMEM; | ||
| 143 | |||
| 144 | platform_set_drvdata(pdev, rng); | ||
| 145 | |||
| 146 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 147 | rng->base = devm_ioremap_resource(&pdev->dev, res); | ||
| 148 | if (IS_ERR(rng->base)) | ||
| 149 | return PTR_ERR(rng->base); | ||
| 150 | |||
| 151 | rng->clk = devm_clk_get(&pdev->dev, "core"); | ||
| 152 | if (IS_ERR(rng->clk)) | ||
| 153 | return PTR_ERR(rng->clk); | ||
| 154 | |||
| 155 | rng->hwrng.name = KBUILD_MODNAME, | ||
| 156 | rng->hwrng.init = msm_rng_init, | ||
| 157 | rng->hwrng.cleanup = msm_rng_cleanup, | ||
| 158 | rng->hwrng.read = msm_rng_read, | ||
| 159 | |||
| 160 | ret = hwrng_register(&rng->hwrng); | ||
| 161 | if (ret) { | ||
| 162 | dev_err(&pdev->dev, "failed to register hwrng\n"); | ||
| 163 | return ret; | ||
| 164 | } | ||
| 165 | |||
| 166 | return 0; | ||
| 167 | } | ||
| 168 | |||
| 169 | static int msm_rng_remove(struct platform_device *pdev) | ||
| 170 | { | ||
| 171 | struct msm_rng *rng = platform_get_drvdata(pdev); | ||
| 172 | |||
| 173 | hwrng_unregister(&rng->hwrng); | ||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | static const struct of_device_id msm_rng_of_match[] = { | ||
| 178 | { .compatible = "qcom,prng", }, | ||
| 179 | {} | ||
| 180 | }; | ||
| 181 | MODULE_DEVICE_TABLE(of, msm_rng_of_match); | ||
| 182 | |||
| 183 | static struct platform_driver msm_rng_driver = { | ||
| 184 | .probe = msm_rng_probe, | ||
| 185 | .remove = msm_rng_remove, | ||
| 186 | .driver = { | ||
| 187 | .name = KBUILD_MODNAME, | ||
| 188 | .owner = THIS_MODULE, | ||
| 189 | .of_match_table = of_match_ptr(msm_rng_of_match), | ||
| 190 | } | ||
| 191 | }; | ||
| 192 | module_platform_driver(msm_rng_driver); | ||
| 193 | |||
| 194 | MODULE_ALIAS("platform:" KBUILD_MODNAME); | ||
| 195 | MODULE_AUTHOR("The Linux Foundation"); | ||
| 196 | MODULE_DESCRIPTION("Qualcomm MSM random number generator driver"); | ||
| 197 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c new file mode 100644 index 000000000000..c853e9e68573 --- /dev/null +++ b/drivers/char/hw_random/omap3-rom-rng.c | |||
| @@ -0,0 +1,141 @@ | |||
| 1 | /* | ||
| 2 | * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Nokia Corporation | ||
| 5 | * Author: Juha Yrjola <juha.yrjola@solidboot.com> | ||
| 6 | * | ||
| 7 | * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com> | ||
| 8 | * | ||
| 9 | * This file is licensed under the terms of the GNU General Public | ||
| 10 | * License version 2. This program is licensed "as is" without any | ||
| 11 | * warranty of any kind, whether express or implied. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 15 | |||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/init.h> | ||
| 18 | #include <linux/random.h> | ||
| 19 | #include <linux/hw_random.h> | ||
| 20 | #include <linux/timer.h> | ||
| 21 | #include <linux/clk.h> | ||
| 22 | #include <linux/err.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | |||
| 25 | #define RNG_RESET 0x01 | ||
| 26 | #define RNG_GEN_PRNG_HW_INIT 0x02 | ||
| 27 | #define RNG_GEN_HW 0x08 | ||
| 28 | |||
| 29 | /* param1: ptr, param2: count, param3: flag */ | ||
| 30 | static u32 (*omap3_rom_rng_call)(u32, u32, u32); | ||
| 31 | |||
| 32 | static struct timer_list idle_timer; | ||
| 33 | static int rng_idle; | ||
| 34 | static struct clk *rng_clk; | ||
| 35 | |||
| 36 | static void omap3_rom_rng_idle(unsigned long data) | ||
| 37 | { | ||
| 38 | int r; | ||
| 39 | |||
| 40 | r = omap3_rom_rng_call(0, 0, RNG_RESET); | ||
| 41 | if (r != 0) { | ||
| 42 | pr_err("reset failed: %d\n", r); | ||
| 43 | return; | ||
| 44 | } | ||
| 45 | clk_disable_unprepare(rng_clk); | ||
| 46 | rng_idle = 1; | ||
| 47 | } | ||
| 48 | |||
| 49 | static int omap3_rom_rng_get_random(void *buf, unsigned int count) | ||
| 50 | { | ||
| 51 | u32 r; | ||
| 52 | u32 ptr; | ||
| 53 | |||
| 54 | del_timer_sync(&idle_timer); | ||
| 55 | if (rng_idle) { | ||
| 56 | clk_prepare_enable(rng_clk); | ||
| 57 | r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); | ||
| 58 | if (r != 0) { | ||
| 59 | clk_disable_unprepare(rng_clk); | ||
| 60 | pr_err("HW init failed: %d\n", r); | ||
| 61 | return -EIO; | ||
| 62 | } | ||
| 63 | rng_idle = 0; | ||
| 64 | } | ||
| 65 | |||
| 66 | ptr = virt_to_phys(buf); | ||
| 67 | r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW); | ||
| 68 | mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500)); | ||
| 69 | if (r != 0) | ||
| 70 | return -EINVAL; | ||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | |||
| 74 | static int omap3_rom_rng_data_present(struct hwrng *rng, int wait) | ||
| 75 | { | ||
| 76 | return 1; | ||
| 77 | } | ||
| 78 | |||
| 79 | static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) | ||
| 80 | { | ||
| 81 | int r; | ||
| 82 | |||
| 83 | r = omap3_rom_rng_get_random(data, 4); | ||
| 84 | if (r < 0) | ||
| 85 | return r; | ||
| 86 | return 4; | ||
| 87 | } | ||
| 88 | |||
| 89 | static struct hwrng omap3_rom_rng_ops = { | ||
| 90 | .name = "omap3-rom", | ||
| 91 | .data_present = omap3_rom_rng_data_present, | ||
| 92 | .data_read = omap3_rom_rng_data_read, | ||
| 93 | }; | ||
| 94 | |||
| 95 | static int omap3_rom_rng_probe(struct platform_device *pdev) | ||
| 96 | { | ||
| 97 | pr_info("initializing\n"); | ||
| 98 | |||
| 99 | omap3_rom_rng_call = pdev->dev.platform_data; | ||
| 100 | if (!omap3_rom_rng_call) { | ||
| 101 | pr_err("omap3_rom_rng_call is NULL\n"); | ||
| 102 | return -EINVAL; | ||
| 103 | } | ||
| 104 | |||
| 105 | setup_timer(&idle_timer, omap3_rom_rng_idle, 0); | ||
| 106 | rng_clk = clk_get(&pdev->dev, "ick"); | ||
| 107 | if (IS_ERR(rng_clk)) { | ||
| 108 | pr_err("unable to get RNG clock\n"); | ||
| 109 | return PTR_ERR(rng_clk); | ||
| 110 | } | ||
| 111 | |||
| 112 | /* Leave the RNG in reset state. */ | ||
| 113 | clk_prepare_enable(rng_clk); | ||
| 114 | omap3_rom_rng_idle(0); | ||
| 115 | |||
| 116 | return hwrng_register(&omap3_rom_rng_ops); | ||
| 117 | } | ||
| 118 | |||
| 119 | static int omap3_rom_rng_remove(struct platform_device *pdev) | ||
| 120 | { | ||
| 121 | hwrng_unregister(&omap3_rom_rng_ops); | ||
| 122 | clk_disable_unprepare(rng_clk); | ||
| 123 | clk_put(rng_clk); | ||
| 124 | return 0; | ||
| 125 | } | ||
| 126 | |||
| 127 | static struct platform_driver omap3_rom_rng_driver = { | ||
| 128 | .driver = { | ||
| 129 | .name = "omap3-rom-rng", | ||
| 130 | .owner = THIS_MODULE, | ||
| 131 | }, | ||
| 132 | .probe = omap3_rom_rng_probe, | ||
| 133 | .remove = omap3_rom_rng_remove, | ||
| 134 | }; | ||
| 135 | |||
| 136 | module_platform_driver(omap3_rom_rng_driver); | ||
| 137 | |||
| 138 | MODULE_ALIAS("platform:omap3-rom-rng"); | ||
| 139 | MODULE_AUTHOR("Juha Yrjola"); | ||
| 140 | MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); | ||
| 141 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c index b761459a3436..ab7ffdec0ec3 100644 --- a/drivers/char/hw_random/pseries-rng.c +++ b/drivers/char/hw_random/pseries-rng.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include <linux/hw_random.h> | 24 | #include <linux/hw_random.h> |
| 25 | #include <asm/vio.h> | 25 | #include <asm/vio.h> |
| 26 | 26 | ||
| 27 | #define MODULE_NAME "pseries-rng" | ||
| 28 | 27 | ||
| 29 | static int pseries_rng_data_read(struct hwrng *rng, u32 *data) | 28 | static int pseries_rng_data_read(struct hwrng *rng, u32 *data) |
| 30 | { | 29 | { |
| @@ -55,7 +54,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) | |||
| 55 | }; | 54 | }; |
| 56 | 55 | ||
| 57 | static struct hwrng pseries_rng = { | 56 | static struct hwrng pseries_rng = { |
| 58 | .name = MODULE_NAME, | 57 | .name = KBUILD_MODNAME, |
| 59 | .data_read = pseries_rng_data_read, | 58 | .data_read = pseries_rng_data_read, |
| 60 | }; | 59 | }; |
| 61 | 60 | ||
| @@ -78,7 +77,7 @@ static struct vio_device_id pseries_rng_driver_ids[] = { | |||
| 78 | MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); | 77 | MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); |
| 79 | 78 | ||
| 80 | static struct vio_driver pseries_rng_driver = { | 79 | static struct vio_driver pseries_rng_driver = { |
| 81 | .name = MODULE_NAME, | 80 | .name = KBUILD_MODNAME, |
| 82 | .probe = pseries_rng_probe, | 81 | .probe = pseries_rng_probe, |
| 83 | .remove = pseries_rng_remove, | 82 | .remove = pseries_rng_remove, |
| 84 | .get_desired_dma = pseries_rng_get_desired_dma, | 83 | .get_desired_dma = pseries_rng_get_desired_dma, |
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index e737772ad69a..de5a6dcfb3e2 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c | |||
| @@ -221,7 +221,7 @@ static void __exit mod_exit(void) | |||
| 221 | module_init(mod_init); | 221 | module_init(mod_init); |
| 222 | module_exit(mod_exit); | 222 | module_exit(mod_exit); |
| 223 | 223 | ||
| 224 | static struct x86_cpu_id via_rng_cpu_id[] = { | 224 | static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { |
| 225 | X86_FEATURE_MATCH(X86_FEATURE_XSTORE), | 225 | X86_FEATURE_MATCH(X86_FEATURE_XSTORE), |
| 226 | {} | 226 | {} |
| 227 | }; | 227 | }; |
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index ca89f6b84b06..e7555ff4cafd 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig | |||
| @@ -4,16 +4,29 @@ config CRYPTO_DEV_FSL_CAAM | |||
| 4 | help | 4 | help |
| 5 | Enables the driver module for Freescale's Cryptographic Accelerator | 5 | Enables the driver module for Freescale's Cryptographic Accelerator |
| 6 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). | 6 | and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). |
| 7 | This module adds a job ring operation interface, and configures h/w | 7 | This module creates job ring devices, and configures h/w |
| 8 | to operate as a DPAA component automatically, depending | 8 | to operate as a DPAA component automatically, depending |
| 9 | on h/w feature availability. | 9 | on h/w feature availability. |
| 10 | 10 | ||
| 11 | To compile this driver as a module, choose M here: the module | 11 | To compile this driver as a module, choose M here: the module |
| 12 | will be called caam. | 12 | will be called caam. |
| 13 | 13 | ||
| 14 | config CRYPTO_DEV_FSL_CAAM_JR | ||
| 15 | tristate "Freescale CAAM Job Ring driver backend" | ||
| 16 | depends on CRYPTO_DEV_FSL_CAAM | ||
| 17 | default y | ||
| 18 | help | ||
| 19 | Enables the driver module for Job Rings which are part of | ||
| 20 | Freescale's Cryptographic Accelerator | ||
| 21 | and Assurance Module (CAAM). This module adds a job ring operation | ||
| 22 | interface. | ||
| 23 | |||
| 24 | To compile this driver as a module, choose M here: the module | ||
| 25 | will be called caam_jr. | ||
| 26 | |||
| 14 | config CRYPTO_DEV_FSL_CAAM_RINGSIZE | 27 | config CRYPTO_DEV_FSL_CAAM_RINGSIZE |
| 15 | int "Job Ring size" | 28 | int "Job Ring size" |
| 16 | depends on CRYPTO_DEV_FSL_CAAM | 29 | depends on CRYPTO_DEV_FSL_CAAM_JR |
| 17 | range 2 9 | 30 | range 2 9 |
| 18 | default "9" | 31 | default "9" |
| 19 | help | 32 | help |
| @@ -31,7 +44,7 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE | |||
| 31 | 44 | ||
| 32 | config CRYPTO_DEV_FSL_CAAM_INTC | 45 | config CRYPTO_DEV_FSL_CAAM_INTC |
| 33 | bool "Job Ring interrupt coalescing" | 46 | bool "Job Ring interrupt coalescing" |
| 34 | depends on CRYPTO_DEV_FSL_CAAM | 47 | depends on CRYPTO_DEV_FSL_CAAM_JR |
| 35 | default n | 48 | default n |
| 36 | help | 49 | help |
| 37 | Enable the Job Ring's interrupt coalescing feature. | 50 | Enable the Job Ring's interrupt coalescing feature. |
| @@ -62,7 +75,7 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD | |||
| 62 | 75 | ||
| 63 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | 76 | config CRYPTO_DEV_FSL_CAAM_CRYPTO_API |
| 64 | tristate "Register algorithm implementations with the Crypto API" | 77 | tristate "Register algorithm implementations with the Crypto API" |
| 65 | depends on CRYPTO_DEV_FSL_CAAM | 78 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR |
| 66 | default y | 79 | default y |
| 67 | select CRYPTO_ALGAPI | 80 | select CRYPTO_ALGAPI |
| 68 | select CRYPTO_AUTHENC | 81 | select CRYPTO_AUTHENC |
| @@ -76,7 +89,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API | |||
| 76 | 89 | ||
| 77 | config CRYPTO_DEV_FSL_CAAM_AHASH_API | 90 | config CRYPTO_DEV_FSL_CAAM_AHASH_API |
| 78 | tristate "Register hash algorithm implementations with Crypto API" | 91 | tristate "Register hash algorithm implementations with Crypto API" |
| 79 | depends on CRYPTO_DEV_FSL_CAAM | 92 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR |
| 80 | default y | 93 | default y |
| 81 | select CRYPTO_HASH | 94 | select CRYPTO_HASH |
| 82 | help | 95 | help |
| @@ -88,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API | |||
| 88 | 101 | ||
| 89 | config CRYPTO_DEV_FSL_CAAM_RNG_API | 102 | config CRYPTO_DEV_FSL_CAAM_RNG_API |
| 90 | tristate "Register caam device for hwrng API" | 103 | tristate "Register caam device for hwrng API" |
| 91 | depends on CRYPTO_DEV_FSL_CAAM | 104 | depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR |
| 92 | default y | 105 | default y |
| 93 | select CRYPTO_RNG | 106 | select CRYPTO_RNG |
| 94 | select HW_RANDOM | 107 | select HW_RANDOM |
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index d56bd0ec65d8..550758a333e7 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile | |||
| @@ -6,8 +6,10 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y) | |||
| 6 | endif | 6 | endif |
| 7 | 7 | ||
| 8 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o | 8 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o |
| 9 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o | ||
| 9 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o | 10 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o |
| 10 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o | 11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o |
| 11 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o | 12 | obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o |
| 12 | 13 | ||
| 13 | caam-objs := ctrl.o jr.o error.o key_gen.o | 14 | caam-objs := ctrl.o |
| 15 | caam_jr-objs := jr.o key_gen.o error.o | ||
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 7c63b72ecd75..4f44b71b9e24 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -86,6 +86,7 @@ | |||
| 86 | #else | 86 | #else |
| 87 | #define debug(format, arg...) | 87 | #define debug(format, arg...) |
| 88 | #endif | 88 | #endif |
| 89 | static struct list_head alg_list; | ||
| 89 | 90 | ||
| 90 | /* Set DK bit in class 1 operation if shared */ | 91 | /* Set DK bit in class 1 operation if shared */ |
| 91 | static inline void append_dec_op1(u32 *desc, u32 type) | 92 | static inline void append_dec_op1(u32 *desc, u32 type) |
| @@ -2057,7 +2058,6 @@ static struct caam_alg_template driver_algs[] = { | |||
| 2057 | 2058 | ||
| 2058 | struct caam_crypto_alg { | 2059 | struct caam_crypto_alg { |
| 2059 | struct list_head entry; | 2060 | struct list_head entry; |
| 2060 | struct device *ctrldev; | ||
| 2061 | int class1_alg_type; | 2061 | int class1_alg_type; |
| 2062 | int class2_alg_type; | 2062 | int class2_alg_type; |
| 2063 | int alg_op; | 2063 | int alg_op; |
| @@ -2070,14 +2070,12 @@ static int caam_cra_init(struct crypto_tfm *tfm) | |||
| 2070 | struct caam_crypto_alg *caam_alg = | 2070 | struct caam_crypto_alg *caam_alg = |
| 2071 | container_of(alg, struct caam_crypto_alg, crypto_alg); | 2071 | container_of(alg, struct caam_crypto_alg, crypto_alg); |
| 2072 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); | 2072 | struct caam_ctx *ctx = crypto_tfm_ctx(tfm); |
| 2073 | struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); | ||
| 2074 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | ||
| 2075 | 2073 | ||
| 2076 | /* | 2074 | ctx->jrdev = caam_jr_alloc(); |
| 2077 | * distribute tfms across job rings to ensure in-order | 2075 | if (IS_ERR(ctx->jrdev)) { |
| 2078 | * crypto request processing per tfm | 2076 | pr_err("Job Ring Device allocation for transform failed\n"); |
| 2079 | */ | 2077 | return PTR_ERR(ctx->jrdev); |
| 2080 | ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; | 2078 | } |
| 2081 | 2079 | ||
| 2082 | /* copy descriptor header template value */ | 2080 | /* copy descriptor header template value */ |
| 2083 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; | 2081 | ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; |
| @@ -2104,44 +2102,26 @@ static void caam_cra_exit(struct crypto_tfm *tfm) | |||
| 2104 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, | 2102 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, |
| 2105 | desc_bytes(ctx->sh_desc_givenc), | 2103 | desc_bytes(ctx->sh_desc_givenc), |
| 2106 | DMA_TO_DEVICE); | 2104 | DMA_TO_DEVICE); |
| 2105 | |||
| 2106 | caam_jr_free(ctx->jrdev); | ||
| 2107 | } | 2107 | } |
| 2108 | 2108 | ||
| 2109 | static void __exit caam_algapi_exit(void) | 2109 | static void __exit caam_algapi_exit(void) |
| 2110 | { | 2110 | { |
| 2111 | 2111 | ||
| 2112 | struct device_node *dev_node; | ||
| 2113 | struct platform_device *pdev; | ||
| 2114 | struct device *ctrldev; | ||
| 2115 | struct caam_drv_private *priv; | ||
| 2116 | struct caam_crypto_alg *t_alg, *n; | 2112 | struct caam_crypto_alg *t_alg, *n; |
| 2117 | 2113 | ||
| 2118 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 2114 | if (!alg_list.next) |
| 2119 | if (!dev_node) { | ||
| 2120 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 2121 | if (!dev_node) | ||
| 2122 | return; | ||
| 2123 | } | ||
| 2124 | |||
| 2125 | pdev = of_find_device_by_node(dev_node); | ||
| 2126 | if (!pdev) | ||
| 2127 | return; | ||
| 2128 | |||
| 2129 | ctrldev = &pdev->dev; | ||
| 2130 | of_node_put(dev_node); | ||
| 2131 | priv = dev_get_drvdata(ctrldev); | ||
| 2132 | |||
| 2133 | if (!priv->alg_list.next) | ||
| 2134 | return; | 2115 | return; |
| 2135 | 2116 | ||
| 2136 | list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { | 2117 | list_for_each_entry_safe(t_alg, n, &alg_list, entry) { |
| 2137 | crypto_unregister_alg(&t_alg->crypto_alg); | 2118 | crypto_unregister_alg(&t_alg->crypto_alg); |
| 2138 | list_del(&t_alg->entry); | 2119 | list_del(&t_alg->entry); |
| 2139 | kfree(t_alg); | 2120 | kfree(t_alg); |
| 2140 | } | 2121 | } |
| 2141 | } | 2122 | } |
| 2142 | 2123 | ||
| 2143 | static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | 2124 | static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template |
| 2144 | struct caam_alg_template | ||
| 2145 | *template) | 2125 | *template) |
| 2146 | { | 2126 | { |
| 2147 | struct caam_crypto_alg *t_alg; | 2127 | struct caam_crypto_alg *t_alg; |
| @@ -2149,7 +2129,7 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | |||
| 2149 | 2129 | ||
| 2150 | t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); | 2130 | t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); |
| 2151 | if (!t_alg) { | 2131 | if (!t_alg) { |
| 2152 | dev_err(ctrldev, "failed to allocate t_alg\n"); | 2132 | pr_err("failed to allocate t_alg\n"); |
| 2153 | return ERR_PTR(-ENOMEM); | 2133 | return ERR_PTR(-ENOMEM); |
| 2154 | } | 2134 | } |
| 2155 | 2135 | ||
| @@ -2181,62 +2161,39 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, | |||
| 2181 | t_alg->class1_alg_type = template->class1_alg_type; | 2161 | t_alg->class1_alg_type = template->class1_alg_type; |
| 2182 | t_alg->class2_alg_type = template->class2_alg_type; | 2162 | t_alg->class2_alg_type = template->class2_alg_type; |
| 2183 | t_alg->alg_op = template->alg_op; | 2163 | t_alg->alg_op = template->alg_op; |
| 2184 | t_alg->ctrldev = ctrldev; | ||
| 2185 | 2164 | ||
| 2186 | return t_alg; | 2165 | return t_alg; |
| 2187 | } | 2166 | } |
| 2188 | 2167 | ||
| 2189 | static int __init caam_algapi_init(void) | 2168 | static int __init caam_algapi_init(void) |
| 2190 | { | 2169 | { |
| 2191 | struct device_node *dev_node; | ||
| 2192 | struct platform_device *pdev; | ||
| 2193 | struct device *ctrldev; | ||
| 2194 | struct caam_drv_private *priv; | ||
| 2195 | int i = 0, err = 0; | 2170 | int i = 0, err = 0; |
| 2196 | 2171 | ||
| 2197 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 2172 | INIT_LIST_HEAD(&alg_list); |
| 2198 | if (!dev_node) { | ||
| 2199 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 2200 | if (!dev_node) | ||
| 2201 | return -ENODEV; | ||
| 2202 | } | ||
| 2203 | |||
| 2204 | pdev = of_find_device_by_node(dev_node); | ||
| 2205 | if (!pdev) | ||
| 2206 | return -ENODEV; | ||
| 2207 | |||
| 2208 | ctrldev = &pdev->dev; | ||
| 2209 | priv = dev_get_drvdata(ctrldev); | ||
| 2210 | of_node_put(dev_node); | ||
| 2211 | |||
| 2212 | INIT_LIST_HEAD(&priv->alg_list); | ||
| 2213 | |||
| 2214 | atomic_set(&priv->tfm_count, -1); | ||
| 2215 | 2173 | ||
| 2216 | /* register crypto algorithms the device supports */ | 2174 | /* register crypto algorithms the device supports */ |
| 2217 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 2175 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
| 2218 | /* TODO: check if h/w supports alg */ | 2176 | /* TODO: check if h/w supports alg */ |
| 2219 | struct caam_crypto_alg *t_alg; | 2177 | struct caam_crypto_alg *t_alg; |
| 2220 | 2178 | ||
| 2221 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); | 2179 | t_alg = caam_alg_alloc(&driver_algs[i]); |
| 2222 | if (IS_ERR(t_alg)) { | 2180 | if (IS_ERR(t_alg)) { |
| 2223 | err = PTR_ERR(t_alg); | 2181 | err = PTR_ERR(t_alg); |
| 2224 | dev_warn(ctrldev, "%s alg allocation failed\n", | 2182 | pr_warn("%s alg allocation failed\n", |
| 2225 | driver_algs[i].driver_name); | 2183 | driver_algs[i].driver_name); |
| 2226 | continue; | 2184 | continue; |
| 2227 | } | 2185 | } |
| 2228 | 2186 | ||
| 2229 | err = crypto_register_alg(&t_alg->crypto_alg); | 2187 | err = crypto_register_alg(&t_alg->crypto_alg); |
| 2230 | if (err) { | 2188 | if (err) { |
| 2231 | dev_warn(ctrldev, "%s alg registration failed\n", | 2189 | pr_warn("%s alg registration failed\n", |
| 2232 | t_alg->crypto_alg.cra_driver_name); | 2190 | t_alg->crypto_alg.cra_driver_name); |
| 2233 | kfree(t_alg); | 2191 | kfree(t_alg); |
| 2234 | } else | 2192 | } else |
| 2235 | list_add_tail(&t_alg->entry, &priv->alg_list); | 2193 | list_add_tail(&t_alg->entry, &alg_list); |
| 2236 | } | 2194 | } |
| 2237 | if (!list_empty(&priv->alg_list)) | 2195 | if (!list_empty(&alg_list)) |
| 2238 | dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", | 2196 | pr_info("caam algorithms registered in /proc/crypto\n"); |
| 2239 | (char *)of_get_property(dev_node, "compatible", NULL)); | ||
| 2240 | 2197 | ||
| 2241 | return err; | 2198 | return err; |
| 2242 | } | 2199 | } |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index e732bd962e98..0378328f47a7 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -94,6 +94,9 @@ | |||
| 94 | #define debug(format, arg...) | 94 | #define debug(format, arg...) |
| 95 | #endif | 95 | #endif |
| 96 | 96 | ||
| 97 | |||
| 98 | static struct list_head hash_list; | ||
| 99 | |||
| 97 | /* ahash per-session context */ | 100 | /* ahash per-session context */ |
| 98 | struct caam_hash_ctx { | 101 | struct caam_hash_ctx { |
| 99 | struct device *jrdev; | 102 | struct device *jrdev; |
| @@ -1653,7 +1656,6 @@ static struct caam_hash_template driver_hash[] = { | |||
| 1653 | 1656 | ||
| 1654 | struct caam_hash_alg { | 1657 | struct caam_hash_alg { |
| 1655 | struct list_head entry; | 1658 | struct list_head entry; |
| 1656 | struct device *ctrldev; | ||
| 1657 | int alg_type; | 1659 | int alg_type; |
| 1658 | int alg_op; | 1660 | int alg_op; |
| 1659 | struct ahash_alg ahash_alg; | 1661 | struct ahash_alg ahash_alg; |
| @@ -1670,7 +1672,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
| 1670 | struct caam_hash_alg *caam_hash = | 1672 | struct caam_hash_alg *caam_hash = |
| 1671 | container_of(alg, struct caam_hash_alg, ahash_alg); | 1673 | container_of(alg, struct caam_hash_alg, ahash_alg); |
| 1672 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 1674 | struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
| 1673 | struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev); | ||
| 1674 | /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ | 1675 | /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ |
| 1675 | static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, | 1676 | static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, |
| 1676 | HASH_MSG_LEN + SHA1_DIGEST_SIZE, | 1677 | HASH_MSG_LEN + SHA1_DIGEST_SIZE, |
| @@ -1678,15 +1679,17 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) | |||
| 1678 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, | 1679 | HASH_MSG_LEN + SHA256_DIGEST_SIZE, |
| 1679 | HASH_MSG_LEN + 64, | 1680 | HASH_MSG_LEN + 64, |
| 1680 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; | 1681 | HASH_MSG_LEN + SHA512_DIGEST_SIZE }; |
| 1681 | int tgt_jr = atomic_inc_return(&priv->tfm_count); | ||
| 1682 | int ret = 0; | 1682 | int ret = 0; |
| 1683 | 1683 | ||
| 1684 | /* | 1684 | /* |
| 1685 | * distribute tfms across job rings to ensure in-order | 1685 | * Get a Job ring from Job Ring driver to ensure in-order |
| 1686 | * crypto request processing per tfm | 1686 | * crypto request processing per tfm |
| 1687 | */ | 1687 | */ |
| 1688 | ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs]; | 1688 | ctx->jrdev = caam_jr_alloc(); |
| 1689 | 1689 | if (IS_ERR(ctx->jrdev)) { | |
| 1690 | pr_err("Job Ring Device allocation for transform failed\n"); | ||
| 1691 | return PTR_ERR(ctx->jrdev); | ||
| 1692 | } | ||
| 1690 | /* copy descriptor header template value */ | 1693 | /* copy descriptor header template value */ |
| 1691 | ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; | 1694 | ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; |
| 1692 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; | 1695 | ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; |
| @@ -1729,35 +1732,18 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) | |||
| 1729 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) | 1732 | !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) |
| 1730 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, | 1733 | dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, |
| 1731 | desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); | 1734 | desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); |
| 1735 | |||
| 1736 | caam_jr_free(ctx->jrdev); | ||
| 1732 | } | 1737 | } |
| 1733 | 1738 | ||
| 1734 | static void __exit caam_algapi_hash_exit(void) | 1739 | static void __exit caam_algapi_hash_exit(void) |
| 1735 | { | 1740 | { |
| 1736 | struct device_node *dev_node; | ||
| 1737 | struct platform_device *pdev; | ||
| 1738 | struct device *ctrldev; | ||
| 1739 | struct caam_drv_private *priv; | ||
| 1740 | struct caam_hash_alg *t_alg, *n; | 1741 | struct caam_hash_alg *t_alg, *n; |
| 1741 | 1742 | ||
| 1742 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 1743 | if (!hash_list.next) |
| 1743 | if (!dev_node) { | ||
| 1744 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 1745 | if (!dev_node) | ||
| 1746 | return; | ||
| 1747 | } | ||
| 1748 | |||
| 1749 | pdev = of_find_device_by_node(dev_node); | ||
| 1750 | if (!pdev) | ||
| 1751 | return; | 1744 | return; |
| 1752 | 1745 | ||
| 1753 | ctrldev = &pdev->dev; | 1746 | list_for_each_entry_safe(t_alg, n, &hash_list, entry) { |
| 1754 | of_node_put(dev_node); | ||
| 1755 | priv = dev_get_drvdata(ctrldev); | ||
| 1756 | |||
| 1757 | if (!priv->hash_list.next) | ||
| 1758 | return; | ||
| 1759 | |||
| 1760 | list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) { | ||
| 1761 | crypto_unregister_ahash(&t_alg->ahash_alg); | 1747 | crypto_unregister_ahash(&t_alg->ahash_alg); |
| 1762 | list_del(&t_alg->entry); | 1748 | list_del(&t_alg->entry); |
| 1763 | kfree(t_alg); | 1749 | kfree(t_alg); |
| @@ -1765,7 +1751,7 @@ static void __exit caam_algapi_hash_exit(void) | |||
| 1765 | } | 1751 | } |
| 1766 | 1752 | ||
| 1767 | static struct caam_hash_alg * | 1753 | static struct caam_hash_alg * |
| 1768 | caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, | 1754 | caam_hash_alloc(struct caam_hash_template *template, |
| 1769 | bool keyed) | 1755 | bool keyed) |
| 1770 | { | 1756 | { |
| 1771 | struct caam_hash_alg *t_alg; | 1757 | struct caam_hash_alg *t_alg; |
| @@ -1774,7 +1760,7 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, | |||
| 1774 | 1760 | ||
| 1775 | t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); | 1761 | t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); |
| 1776 | if (!t_alg) { | 1762 | if (!t_alg) { |
| 1777 | dev_err(ctrldev, "failed to allocate t_alg\n"); | 1763 | pr_err("failed to allocate t_alg\n"); |
| 1778 | return ERR_PTR(-ENOMEM); | 1764 | return ERR_PTR(-ENOMEM); |
| 1779 | } | 1765 | } |
| 1780 | 1766 | ||
| @@ -1805,37 +1791,15 @@ caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, | |||
| 1805 | 1791 | ||
| 1806 | t_alg->alg_type = template->alg_type; | 1792 | t_alg->alg_type = template->alg_type; |
| 1807 | t_alg->alg_op = template->alg_op; | 1793 | t_alg->alg_op = template->alg_op; |
| 1808 | t_alg->ctrldev = ctrldev; | ||
| 1809 | 1794 | ||
| 1810 | return t_alg; | 1795 | return t_alg; |
| 1811 | } | 1796 | } |
| 1812 | 1797 | ||
| 1813 | static int __init caam_algapi_hash_init(void) | 1798 | static int __init caam_algapi_hash_init(void) |
| 1814 | { | 1799 | { |
| 1815 | struct device_node *dev_node; | ||
| 1816 | struct platform_device *pdev; | ||
| 1817 | struct device *ctrldev; | ||
| 1818 | struct caam_drv_private *priv; | ||
| 1819 | int i = 0, err = 0; | 1800 | int i = 0, err = 0; |
| 1820 | 1801 | ||
| 1821 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | 1802 | INIT_LIST_HEAD(&hash_list); |
| 1822 | if (!dev_node) { | ||
| 1823 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 1824 | if (!dev_node) | ||
| 1825 | return -ENODEV; | ||
| 1826 | } | ||
| 1827 | |||
| 1828 | pdev = of_find_device_by_node(dev_node); | ||
| 1829 | if (!pdev) | ||
| 1830 | return -ENODEV; | ||
| 1831 | |||
| 1832 | ctrldev = &pdev->dev; | ||
| 1833 | priv = dev_get_drvdata(ctrldev); | ||
| 1834 | of_node_put(dev_node); | ||
| 1835 | |||
| 1836 | INIT_LIST_HEAD(&priv->hash_list); | ||
| 1837 | |||
| 1838 | atomic_set(&priv->tfm_count, -1); | ||
| 1839 | 1803 | ||
| 1840 | /* register crypto algorithms the device supports */ | 1804 | /* register crypto algorithms the device supports */ |
| 1841 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { | 1805 | for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { |
| @@ -1843,38 +1807,38 @@ static int __init caam_algapi_hash_init(void) | |||
| 1843 | struct caam_hash_alg *t_alg; | 1807 | struct caam_hash_alg *t_alg; |
| 1844 | 1808 | ||
| 1845 | /* register hmac version */ | 1809 | /* register hmac version */ |
| 1846 | t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); | 1810 | t_alg = caam_hash_alloc(&driver_hash[i], true); |
| 1847 | if (IS_ERR(t_alg)) { | 1811 | if (IS_ERR(t_alg)) { |
| 1848 | err = PTR_ERR(t_alg); | 1812 | err = PTR_ERR(t_alg); |
| 1849 | dev_warn(ctrldev, "%s alg allocation failed\n", | 1813 | pr_warn("%s alg allocation failed\n", |
| 1850 | driver_hash[i].driver_name); | 1814 | driver_hash[i].driver_name); |
| 1851 | continue; | 1815 | continue; |
| 1852 | } | 1816 | } |
| 1853 | 1817 | ||
| 1854 | err = crypto_register_ahash(&t_alg->ahash_alg); | 1818 | err = crypto_register_ahash(&t_alg->ahash_alg); |
| 1855 | if (err) { | 1819 | if (err) { |
| 1856 | dev_warn(ctrldev, "%s alg registration failed\n", | 1820 | pr_warn("%s alg registration failed\n", |
| 1857 | t_alg->ahash_alg.halg.base.cra_driver_name); | 1821 | t_alg->ahash_alg.halg.base.cra_driver_name); |
| 1858 | kfree(t_alg); | 1822 | kfree(t_alg); |
| 1859 | } else | 1823 | } else |
| 1860 | list_add_tail(&t_alg->entry, &priv->hash_list); | 1824 | list_add_tail(&t_alg->entry, &hash_list); |
| 1861 | 1825 | ||
| 1862 | /* register unkeyed version */ | 1826 | /* register unkeyed version */ |
| 1863 | t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); | 1827 | t_alg = caam_hash_alloc(&driver_hash[i], false); |
| 1864 | if (IS_ERR(t_alg)) { | 1828 | if (IS_ERR(t_alg)) { |
| 1865 | err = PTR_ERR(t_alg); | 1829 | err = PTR_ERR(t_alg); |
| 1866 | dev_warn(ctrldev, "%s alg allocation failed\n", | 1830 | pr_warn("%s alg allocation failed\n", |
| 1867 | driver_hash[i].driver_name); | 1831 | driver_hash[i].driver_name); |
| 1868 | continue; | 1832 | continue; |
| 1869 | } | 1833 | } |
| 1870 | 1834 | ||
| 1871 | err = crypto_register_ahash(&t_alg->ahash_alg); | 1835 | err = crypto_register_ahash(&t_alg->ahash_alg); |
| 1872 | if (err) { | 1836 | if (err) { |
| 1873 | dev_warn(ctrldev, "%s alg registration failed\n", | 1837 | pr_warn("%s alg registration failed\n", |
| 1874 | t_alg->ahash_alg.halg.base.cra_driver_name); | 1838 | t_alg->ahash_alg.halg.base.cra_driver_name); |
| 1875 | kfree(t_alg); | 1839 | kfree(t_alg); |
| 1876 | } else | 1840 | } else |
| 1877 | list_add_tail(&t_alg->entry, &priv->hash_list); | 1841 | list_add_tail(&t_alg->entry, &hash_list); |
| 1878 | } | 1842 | } |
| 1879 | 1843 | ||
| 1880 | return err; | 1844 | return err; |
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index d1939a9539c0..28486b19fc36 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
| @@ -273,34 +273,23 @@ static struct hwrng caam_rng = { | |||
| 273 | 273 | ||
| 274 | static void __exit caam_rng_exit(void) | 274 | static void __exit caam_rng_exit(void) |
| 275 | { | 275 | { |
| 276 | caam_jr_free(rng_ctx.jrdev); | ||
| 276 | hwrng_unregister(&caam_rng); | 277 | hwrng_unregister(&caam_rng); |
| 277 | } | 278 | } |
| 278 | 279 | ||
| 279 | static int __init caam_rng_init(void) | 280 | static int __init caam_rng_init(void) |
| 280 | { | 281 | { |
| 281 | struct device_node *dev_node; | 282 | struct device *dev; |
| 282 | struct platform_device *pdev; | ||
| 283 | struct device *ctrldev; | ||
| 284 | struct caam_drv_private *priv; | ||
| 285 | |||
| 286 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); | ||
| 287 | if (!dev_node) { | ||
| 288 | dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); | ||
| 289 | if (!dev_node) | ||
| 290 | return -ENODEV; | ||
| 291 | } | ||
| 292 | |||
| 293 | pdev = of_find_device_by_node(dev_node); | ||
| 294 | if (!pdev) | ||
| 295 | return -ENODEV; | ||
| 296 | 283 | ||
| 297 | ctrldev = &pdev->dev; | 284 | dev = caam_jr_alloc(); |
| 298 | priv = dev_get_drvdata(ctrldev); | 285 | if (IS_ERR(dev)) { |
| 299 | of_node_put(dev_node); | 286 | pr_err("Job Ring Device allocation for transform failed\n"); |
| 287 | return PTR_ERR(dev); | ||
| 288 | } | ||
| 300 | 289 | ||
| 301 | caam_init_rng(&rng_ctx, priv->jrdev[0]); | 290 | caam_init_rng(&rng_ctx, dev); |
| 302 | 291 | ||
| 303 | dev_info(priv->jrdev[0], "registering rng-caam\n"); | 292 | dev_info(dev, "registering rng-caam\n"); |
| 304 | return hwrng_register(&caam_rng); | 293 | return hwrng_register(&caam_rng); |
| 305 | } | 294 | } |
| 306 | 295 | ||
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index bc6d820812b6..63fb1af2c431 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
| @@ -16,82 +16,75 @@ | |||
| 16 | #include "error.h" | 16 | #include "error.h" |
| 17 | #include "ctrl.h" | 17 | #include "ctrl.h" |
| 18 | 18 | ||
| 19 | static int caam_remove(struct platform_device *pdev) | ||
| 20 | { | ||
| 21 | struct device *ctrldev; | ||
| 22 | struct caam_drv_private *ctrlpriv; | ||
| 23 | struct caam_drv_private_jr *jrpriv; | ||
| 24 | struct caam_full __iomem *topregs; | ||
| 25 | int ring, ret = 0; | ||
| 26 | |||
| 27 | ctrldev = &pdev->dev; | ||
| 28 | ctrlpriv = dev_get_drvdata(ctrldev); | ||
| 29 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | ||
| 30 | |||
| 31 | /* shut down JobRs */ | ||
| 32 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { | ||
| 33 | ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]); | ||
| 34 | jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); | ||
| 35 | irq_dispose_mapping(jrpriv->irq); | ||
| 36 | } | ||
| 37 | |||
| 38 | /* Shut down debug views */ | ||
| 39 | #ifdef CONFIG_DEBUG_FS | ||
| 40 | debugfs_remove_recursive(ctrlpriv->dfs_root); | ||
| 41 | #endif | ||
| 42 | |||
| 43 | /* Unmap controller region */ | ||
| 44 | iounmap(&topregs->ctrl); | ||
| 45 | |||
| 46 | kfree(ctrlpriv->jrdev); | ||
| 47 | kfree(ctrlpriv); | ||
| 48 | |||
| 49 | return ret; | ||
| 50 | } | ||
| 51 | |||
| 52 | /* | 19 | /* |
| 53 | * Descriptor to instantiate RNG State Handle 0 in normal mode and | 20 | * Descriptor to instantiate RNG State Handle 0 in normal mode and |
| 54 | * load the JDKEK, TDKEK and TDSK registers | 21 | * load the JDKEK, TDKEK and TDSK registers |
| 55 | */ | 22 | */ |
| 56 | static void build_instantiation_desc(u32 *desc) | 23 | static void build_instantiation_desc(u32 *desc, int handle, int do_sk) |
| 57 | { | 24 | { |
| 58 | u32 *jump_cmd; | 25 | u32 *jump_cmd, op_flags; |
| 59 | 26 | ||
| 60 | init_job_desc(desc, 0); | 27 | init_job_desc(desc, 0); |
| 61 | 28 | ||
| 29 | op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | ||
| 30 | (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT; | ||
| 31 | |||
| 62 | /* INIT RNG in non-test mode */ | 32 | /* INIT RNG in non-test mode */ |
| 63 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | 33 | append_operation(desc, op_flags); |
| 64 | OP_ALG_AS_INIT); | 34 | |
| 35 | if (!handle && do_sk) { | ||
| 36 | /* | ||
| 37 | * For SH0, Secure Keys must be generated as well | ||
| 38 | */ | ||
| 39 | |||
| 40 | /* wait for done */ | ||
| 41 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); | ||
| 42 | set_jump_tgt_here(desc, jump_cmd); | ||
| 43 | |||
| 44 | /* | ||
| 45 | * load 1 to clear written reg: | ||
| 46 | * resets the done interrrupt and returns the RNG to idle. | ||
| 47 | */ | ||
| 48 | append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); | ||
| 49 | |||
| 50 | /* Initialize State Handle */ | ||
| 51 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | ||
| 52 | OP_ALG_AAI_RNG4_SK); | ||
| 53 | } | ||
| 65 | 54 | ||
| 66 | /* wait for done */ | 55 | append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); |
| 67 | jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); | 56 | } |
| 68 | set_jump_tgt_here(desc, jump_cmd); | ||
| 69 | 57 | ||
| 70 | /* | 58 | /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */ |
| 71 | * load 1 to clear written reg: | 59 | static void build_deinstantiation_desc(u32 *desc, int handle) |
| 72 | * resets the done interrupt and returns the RNG to idle. | 60 | { |
| 73 | */ | 61 | init_job_desc(desc, 0); |
| 74 | append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); | ||
| 75 | 62 | ||
| 76 | /* generate secure keys (non-test) */ | 63 | /* Uninstantiate State Handle 0 */ |
| 77 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | | 64 | append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | |
| 78 | OP_ALG_RNG4_SK); | 65 | (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL); |
| 66 | |||
| 67 | append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); | ||
| 79 | } | 68 | } |
| 80 | 69 | ||
| 81 | static int instantiate_rng(struct device *ctrldev) | 70 | /* |
| 71 | * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of | ||
| 72 | * the software (no JR/QI used). | ||
| 73 | * @ctrldev - pointer to device | ||
| 74 | * @status - descriptor status, after being run | ||
| 75 | * | ||
| 76 | * Return: - 0 if no error occurred | ||
| 77 | * - -ENODEV if the DECO couldn't be acquired | ||
| 78 | * - -EAGAIN if an error occurred while executing the descriptor | ||
| 79 | */ | ||
| 80 | static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, | ||
| 81 | u32 *status) | ||
| 82 | { | 82 | { |
| 83 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | 83 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); |
| 84 | struct caam_full __iomem *topregs; | 84 | struct caam_full __iomem *topregs; |
| 85 | unsigned int timeout = 100000; | 85 | unsigned int timeout = 100000; |
| 86 | u32 *desc; | 86 | u32 deco_dbg_reg, flags; |
| 87 | int i, ret = 0; | 87 | int i; |
| 88 | |||
| 89 | desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); | ||
| 90 | if (!desc) { | ||
| 91 | dev_err(ctrldev, "can't allocate RNG init descriptor memory\n"); | ||
| 92 | return -ENOMEM; | ||
| 93 | } | ||
| 94 | build_instantiation_desc(desc); | ||
| 95 | 88 | ||
| 96 | /* Set the bit to request direct access to DECO0 */ | 89 | /* Set the bit to request direct access to DECO0 */ |
| 97 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | 90 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; |
| @@ -103,36 +96,219 @@ static int instantiate_rng(struct device *ctrldev) | |||
| 103 | 96 | ||
| 104 | if (!timeout) { | 97 | if (!timeout) { |
| 105 | dev_err(ctrldev, "failed to acquire DECO 0\n"); | 98 | dev_err(ctrldev, "failed to acquire DECO 0\n"); |
| 106 | ret = -EIO; | 99 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); |
| 107 | goto out; | 100 | return -ENODEV; |
| 108 | } | 101 | } |
| 109 | 102 | ||
| 110 | for (i = 0; i < desc_len(desc); i++) | 103 | for (i = 0; i < desc_len(desc); i++) |
| 111 | topregs->deco.descbuf[i] = *(desc + i); | 104 | wr_reg32(&topregs->deco.descbuf[i], *(desc + i)); |
| 105 | |||
| 106 | flags = DECO_JQCR_WHL; | ||
| 107 | /* | ||
| 108 | * If the descriptor length is longer than 4 words, then the | ||
| 109 | * FOUR bit in JRCTRL register must be set. | ||
| 110 | */ | ||
| 111 | if (desc_len(desc) >= 4) | ||
| 112 | flags |= DECO_JQCR_FOUR; | ||
| 112 | 113 | ||
| 113 | wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR); | 114 | /* Instruct the DECO to execute it */ |
| 115 | wr_reg32(&topregs->deco.jr_ctl_hi, flags); | ||
| 114 | 116 | ||
| 115 | timeout = 10000000; | 117 | timeout = 10000000; |
| 116 | while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) && | 118 | do { |
| 117 | --timeout) | 119 | deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg); |
| 120 | /* | ||
| 121 | * If an error occured in the descriptor, then | ||
| 122 | * the DECO status field will be set to 0x0D | ||
| 123 | */ | ||
| 124 | if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) == | ||
| 125 | DESC_DBG_DECO_STAT_HOST_ERR) | ||
| 126 | break; | ||
| 118 | cpu_relax(); | 127 | cpu_relax(); |
| 128 | } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); | ||
| 119 | 129 | ||
| 120 | if (!timeout) { | 130 | *status = rd_reg32(&topregs->deco.op_status_hi) & |
| 121 | dev_err(ctrldev, "failed to instantiate RNG\n"); | 131 | DECO_OP_STATUS_HI_ERR_MASK; |
| 122 | ret = -EIO; | ||
| 123 | } | ||
| 124 | 132 | ||
| 133 | /* Mark the DECO as free */ | ||
| 125 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); | 134 | clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); |
| 126 | out: | 135 | |
| 136 | if (!timeout) | ||
| 137 | return -EAGAIN; | ||
| 138 | |||
| 139 | return 0; | ||
| 140 | } | ||
| 141 | |||
| 142 | /* | ||
| 143 | * instantiate_rng - builds and executes a descriptor on DECO0, | ||
| 144 | * which initializes the RNG block. | ||
| 145 | * @ctrldev - pointer to device | ||
| 146 | * @state_handle_mask - bitmask containing the instantiation status | ||
| 147 | * for the RNG4 state handles which exist in | ||
| 148 | * the RNG4 block: 1 if it's been instantiated | ||
| 149 | * by an external entry, 0 otherwise. | ||
| 150 | * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK; | ||
| 151 | * Caution: this can be done only once; if the keys need to be | ||
| 152 | * regenerated, a POR is required | ||
| 153 | * | ||
| 154 | * Return: - 0 if no error occurred | ||
| 155 | * - -ENOMEM if there isn't enough memory to allocate the descriptor | ||
| 156 | * - -ENODEV if DECO0 couldn't be acquired | ||
| 157 | * - -EAGAIN if an error occurred when executing the descriptor | ||
| 158 | * f.i. there was a RNG hardware error due to not "good enough" | ||
| 159 | * entropy being aquired. | ||
| 160 | */ | ||
| 161 | static int instantiate_rng(struct device *ctrldev, int state_handle_mask, | ||
| 162 | int gen_sk) | ||
| 163 | { | ||
| 164 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | ||
| 165 | struct caam_full __iomem *topregs; | ||
| 166 | struct rng4tst __iomem *r4tst; | ||
| 167 | u32 *desc, status, rdsta_val; | ||
| 168 | int ret = 0, sh_idx; | ||
| 169 | |||
| 170 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | ||
| 171 | r4tst = &topregs->ctrl.r4tst[0]; | ||
| 172 | |||
| 173 | desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); | ||
| 174 | if (!desc) | ||
| 175 | return -ENOMEM; | ||
| 176 | |||
| 177 | for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { | ||
| 178 | /* | ||
| 179 | * If the corresponding bit is set, this state handle | ||
| 180 | * was initialized by somebody else, so it's left alone. | ||
| 181 | */ | ||
| 182 | if ((1 << sh_idx) & state_handle_mask) | ||
| 183 | continue; | ||
| 184 | |||
| 185 | /* Create the descriptor for instantiating RNG State Handle */ | ||
| 186 | build_instantiation_desc(desc, sh_idx, gen_sk); | ||
| 187 | |||
| 188 | /* Try to run it through DECO0 */ | ||
| 189 | ret = run_descriptor_deco0(ctrldev, desc, &status); | ||
| 190 | |||
| 191 | /* | ||
| 192 | * If ret is not 0, or descriptor status is not 0, then | ||
| 193 | * something went wrong. No need to try the next state | ||
| 194 | * handle (if available), bail out here. | ||
| 195 | * Also, if for some reason, the State Handle didn't get | ||
| 196 | * instantiated although the descriptor has finished | ||
| 197 | * without any error (HW optimizations for later | ||
| 198 | * CAAM eras), then try again. | ||
| 199 | */ | ||
| 200 | rdsta_val = | ||
| 201 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK; | ||
| 202 | if (status || !(rdsta_val & (1 << sh_idx))) | ||
| 203 | ret = -EAGAIN; | ||
| 204 | if (ret) | ||
| 205 | break; | ||
| 206 | |||
| 207 | dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); | ||
| 208 | /* Clear the contents before recreating the descriptor */ | ||
| 209 | memset(desc, 0x00, CAAM_CMD_SZ * 7); | ||
| 210 | } | ||
| 211 | |||
| 127 | kfree(desc); | 212 | kfree(desc); |
| 213 | |||
| 128 | return ret; | 214 | return ret; |
| 129 | } | 215 | } |
| 130 | 216 | ||
| 131 | /* | 217 | /* |
| 132 | * By default, the TRNG runs for 200 clocks per sample; | 218 | * deinstantiate_rng - builds and executes a descriptor on DECO0, |
| 133 | * 1600 clocks per sample generates better entropy. | 219 | * which deinitializes the RNG block. |
| 220 | * @ctrldev - pointer to device | ||
| 221 | * @state_handle_mask - bitmask containing the instantiation status | ||
| 222 | * for the RNG4 state handles which exist in | ||
| 223 | * the RNG4 block: 1 if it's been instantiated | ||
| 224 | * | ||
| 225 | * Return: - 0 if no error occurred | ||
| 226 | * - -ENOMEM if there isn't enough memory to allocate the descriptor | ||
| 227 | * - -ENODEV if DECO0 couldn't be acquired | ||
| 228 | * - -EAGAIN if an error occurred when executing the descriptor | ||
| 134 | */ | 229 | */ |
| 135 | static void kick_trng(struct platform_device *pdev) | 230 | static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) |
| 231 | { | ||
| 232 | u32 *desc, status; | ||
| 233 | int sh_idx, ret = 0; | ||
| 234 | |||
| 235 | desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL); | ||
| 236 | if (!desc) | ||
| 237 | return -ENOMEM; | ||
| 238 | |||
| 239 | for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { | ||
| 240 | /* | ||
| 241 | * If the corresponding bit is set, then it means the state | ||
| 242 | * handle was initialized by us, and thus it needs to be | ||
| 243 | * deintialized as well | ||
| 244 | */ | ||
| 245 | if ((1 << sh_idx) & state_handle_mask) { | ||
| 246 | /* | ||
| 247 | * Create the descriptor for deinstantating this state | ||
| 248 | * handle | ||
| 249 | */ | ||
| 250 | build_deinstantiation_desc(desc, sh_idx); | ||
| 251 | |||
| 252 | /* Try to run it through DECO0 */ | ||
| 253 | ret = run_descriptor_deco0(ctrldev, desc, &status); | ||
| 254 | |||
| 255 | if (ret || status) { | ||
| 256 | dev_err(ctrldev, | ||
| 257 | "Failed to deinstantiate RNG4 SH%d\n", | ||
| 258 | sh_idx); | ||
| 259 | break; | ||
| 260 | } | ||
| 261 | dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx); | ||
| 262 | } | ||
| 263 | } | ||
| 264 | |||
| 265 | kfree(desc); | ||
| 266 | |||
| 267 | return ret; | ||
| 268 | } | ||
| 269 | |||
| 270 | static int caam_remove(struct platform_device *pdev) | ||
| 271 | { | ||
| 272 | struct device *ctrldev; | ||
| 273 | struct caam_drv_private *ctrlpriv; | ||
| 274 | struct caam_full __iomem *topregs; | ||
| 275 | int ring, ret = 0; | ||
| 276 | |||
| 277 | ctrldev = &pdev->dev; | ||
| 278 | ctrlpriv = dev_get_drvdata(ctrldev); | ||
| 279 | topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; | ||
| 280 | |||
| 281 | /* Remove platform devices for JobRs */ | ||
| 282 | for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { | ||
| 283 | if (ctrlpriv->jrpdev[ring]) | ||
| 284 | of_device_unregister(ctrlpriv->jrpdev[ring]); | ||
| 285 | } | ||
| 286 | |||
| 287 | /* De-initialize RNG state handles initialized by this driver. */ | ||
| 288 | if (ctrlpriv->rng4_sh_init) | ||
| 289 | deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); | ||
| 290 | |||
| 291 | /* Shut down debug views */ | ||
| 292 | #ifdef CONFIG_DEBUG_FS | ||
| 293 | debugfs_remove_recursive(ctrlpriv->dfs_root); | ||
| 294 | #endif | ||
| 295 | |||
| 296 | /* Unmap controller region */ | ||
| 297 | iounmap(&topregs->ctrl); | ||
| 298 | |||
| 299 | kfree(ctrlpriv->jrpdev); | ||
| 300 | kfree(ctrlpriv); | ||
| 301 | |||
| 302 | return ret; | ||
| 303 | } | ||
| 304 | |||
| 305 | /* | ||
| 306 | * kick_trng - sets the various parameters for enabling the initialization | ||
| 307 | * of the RNG4 block in CAAM | ||
| 308 | * @pdev - pointer to the platform device | ||
| 309 | * @ent_delay - Defines the length (in system clocks) of each entropy sample. | ||
| 310 | */ | ||
| 311 | static void kick_trng(struct platform_device *pdev, int ent_delay) | ||
| 136 | { | 312 | { |
| 137 | struct device *ctrldev = &pdev->dev; | 313 | struct device *ctrldev = &pdev->dev; |
| 138 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); | 314 | struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); |
| @@ -145,14 +321,31 @@ static void kick_trng(struct platform_device *pdev) | |||
| 145 | 321 | ||
| 146 | /* put RNG4 into program mode */ | 322 | /* put RNG4 into program mode */ |
| 147 | setbits32(&r4tst->rtmctl, RTMCTL_PRGM); | 323 | setbits32(&r4tst->rtmctl, RTMCTL_PRGM); |
| 148 | /* 1600 clocks per sample */ | 324 | |
| 325 | /* | ||
| 326 | * Performance-wise, it does not make sense to | ||
| 327 | * set the delay to a value that is lower | ||
| 328 | * than the last one that worked (i.e. the state handles | ||
| 329 | * were instantiated properly. Thus, instead of wasting | ||
| 330 | * time trying to set the values controlling the sample | ||
| 331 | * frequency, the function simply returns. | ||
| 332 | */ | ||
| 333 | val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) | ||
| 334 | >> RTSDCTL_ENT_DLY_SHIFT; | ||
| 335 | if (ent_delay <= val) { | ||
| 336 | /* put RNG4 into run mode */ | ||
| 337 | clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); | ||
| 338 | return; | ||
| 339 | } | ||
| 340 | |||
| 149 | val = rd_reg32(&r4tst->rtsdctl); | 341 | val = rd_reg32(&r4tst->rtsdctl); |
| 150 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT); | 342 | val = (val & ~RTSDCTL_ENT_DLY_MASK) | |
| 343 | (ent_delay << RTSDCTL_ENT_DLY_SHIFT); | ||
| 151 | wr_reg32(&r4tst->rtsdctl, val); | 344 | wr_reg32(&r4tst->rtsdctl, val); |
| 152 | /* min. freq. count */ | 345 | /* min. freq. count, equal to 1/4 of the entropy sample length */ |
| 153 | wr_reg32(&r4tst->rtfrqmin, 400); | 346 | wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); |
| 154 | /* max. freq. count */ | 347 | /* max. freq. count, equal to 8 times the entropy sample length */ |
| 155 | wr_reg32(&r4tst->rtfrqmax, 6400); | 348 | wr_reg32(&r4tst->rtfrqmax, ent_delay << 3); |
| 156 | /* put RNG4 into run mode */ | 349 | /* put RNG4 into run mode */ |
| 157 | clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); | 350 | clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); |
| 158 | } | 351 | } |
| @@ -193,7 +386,7 @@ EXPORT_SYMBOL(caam_get_era); | |||
| 193 | /* Probe routine for CAAM top (controller) level */ | 386 | /* Probe routine for CAAM top (controller) level */ |
| 194 | static int caam_probe(struct platform_device *pdev) | 387 | static int caam_probe(struct platform_device *pdev) |
| 195 | { | 388 | { |
| 196 | int ret, ring, rspec; | 389 | int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; |
| 197 | u64 caam_id; | 390 | u64 caam_id; |
| 198 | struct device *dev; | 391 | struct device *dev; |
| 199 | struct device_node *nprop, *np; | 392 | struct device_node *nprop, *np; |
| @@ -258,8 +451,9 @@ static int caam_probe(struct platform_device *pdev) | |||
| 258 | rspec++; | 451 | rspec++; |
| 259 | } | 452 | } |
| 260 | 453 | ||
| 261 | ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL); | 454 | ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, |
| 262 | if (ctrlpriv->jrdev == NULL) { | 455 | GFP_KERNEL); |
| 456 | if (ctrlpriv->jrpdev == NULL) { | ||
| 263 | iounmap(&topregs->ctrl); | 457 | iounmap(&topregs->ctrl); |
| 264 | return -ENOMEM; | 458 | return -ENOMEM; |
| 265 | } | 459 | } |
| @@ -267,13 +461,24 @@ static int caam_probe(struct platform_device *pdev) | |||
| 267 | ring = 0; | 461 | ring = 0; |
| 268 | ctrlpriv->total_jobrs = 0; | 462 | ctrlpriv->total_jobrs = 0; |
| 269 | for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { | 463 | for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { |
| 270 | caam_jr_probe(pdev, np, ring); | 464 | ctrlpriv->jrpdev[ring] = |
| 465 | of_platform_device_create(np, NULL, dev); | ||
| 466 | if (!ctrlpriv->jrpdev[ring]) { | ||
| 467 | pr_warn("JR%d Platform device creation error\n", ring); | ||
| 468 | continue; | ||
| 469 | } | ||
| 271 | ctrlpriv->total_jobrs++; | 470 | ctrlpriv->total_jobrs++; |
| 272 | ring++; | 471 | ring++; |
| 273 | } | 472 | } |
| 274 | if (!ring) { | 473 | if (!ring) { |
| 275 | for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { | 474 | for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { |
| 276 | caam_jr_probe(pdev, np, ring); | 475 | ctrlpriv->jrpdev[ring] = |
| 476 | of_platform_device_create(np, NULL, dev); | ||
| 477 | if (!ctrlpriv->jrpdev[ring]) { | ||
| 478 | pr_warn("JR%d Platform device creation error\n", | ||
| 479 | ring); | ||
| 480 | continue; | ||
| 481 | } | ||
| 277 | ctrlpriv->total_jobrs++; | 482 | ctrlpriv->total_jobrs++; |
| 278 | ring++; | 483 | ring++; |
| 279 | } | 484 | } |
| @@ -299,16 +504,55 @@ static int caam_probe(struct platform_device *pdev) | |||
| 299 | 504 | ||
| 300 | /* | 505 | /* |
| 301 | * If SEC has RNG version >= 4 and RNG state handle has not been | 506 | * If SEC has RNG version >= 4 and RNG state handle has not been |
| 302 | * already instantiated ,do RNG instantiation | 507 | * already instantiated, do RNG instantiation |
| 303 | */ | 508 | */ |
| 304 | if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && | 509 | if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) { |
| 305 | !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { | 510 | ctrlpriv->rng4_sh_init = |
| 306 | kick_trng(pdev); | 511 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta); |
| 307 | ret = instantiate_rng(dev); | 512 | /* |
| 513 | * If the secure keys (TDKEK, JDKEK, TDSK), were already | ||
| 514 | * generated, signal this to the function that is instantiating | ||
| 515 | * the state handles. An error would occur if RNG4 attempts | ||
| 516 | * to regenerate these keys before the next POR. | ||
| 517 | */ | ||
| 518 | gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1; | ||
| 519 | ctrlpriv->rng4_sh_init &= RDSTA_IFMASK; | ||
| 520 | do { | ||
| 521 | int inst_handles = | ||
| 522 | rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & | ||
| 523 | RDSTA_IFMASK; | ||
| 524 | /* | ||
| 525 | * If either SH were instantiated by somebody else | ||
| 526 | * (e.g. u-boot) then it is assumed that the entropy | ||
| 527 | * parameters are properly set and thus the function | ||
| 528 | * setting these (kick_trng(...)) is skipped. | ||
| 529 | * Also, if a handle was instantiated, do not change | ||
| 530 | * the TRNG parameters. | ||
| 531 | */ | ||
| 532 | if (!(ctrlpriv->rng4_sh_init || inst_handles)) { | ||
| 533 | kick_trng(pdev, ent_delay); | ||
| 534 | ent_delay += 400; | ||
| 535 | } | ||
| 536 | /* | ||
| 537 | * if instantiate_rng(...) fails, the loop will rerun | ||
| 538 | * and the kick_trng(...) function will modfiy the | ||
| 539 | * upper and lower limits of the entropy sampling | ||
| 540 | * interval, leading to a sucessful initialization of | ||
| 541 | * the RNG. | ||
| 542 | */ | ||
| 543 | ret = instantiate_rng(dev, inst_handles, | ||
| 544 | gen_sk); | ||
| 545 | } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); | ||
| 308 | if (ret) { | 546 | if (ret) { |
| 547 | dev_err(dev, "failed to instantiate RNG"); | ||
| 309 | caam_remove(pdev); | 548 | caam_remove(pdev); |
| 310 | return ret; | 549 | return ret; |
| 311 | } | 550 | } |
| 551 | /* | ||
| 552 | * Set handles init'ed by this module as the complement of the | ||
| 553 | * already initialized ones | ||
| 554 | */ | ||
| 555 | ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK; | ||
| 312 | 556 | ||
| 313 | /* Enable RDB bit so that RNG works faster */ | 557 | /* Enable RDB bit so that RNG works faster */ |
| 314 | setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); | 558 | setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE); |
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index 53b296f78b0d..7e4500f18df6 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h | |||
| @@ -1155,8 +1155,15 @@ struct sec4_sg_entry { | |||
| 1155 | 1155 | ||
| 1156 | /* randomizer AAI set */ | 1156 | /* randomizer AAI set */ |
| 1157 | #define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) | 1157 | #define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) |
| 1158 | #define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT) | 1158 | #define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT) |
| 1159 | #define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT) | 1159 | #define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT) |
| 1160 | |||
| 1161 | /* RNG4 AAI set */ | ||
| 1162 | #define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT) | ||
| 1163 | #define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT) | ||
| 1164 | #define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT) | ||
| 1165 | #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT) | ||
| 1166 | #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT) | ||
| 1160 | 1167 | ||
| 1161 | /* hmac/smac AAI set */ | 1168 | /* hmac/smac AAI set */ |
| 1162 | #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) | 1169 | #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) |
| @@ -1178,12 +1185,6 @@ struct sec4_sg_entry { | |||
| 1178 | #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) | 1185 | #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) |
| 1179 | #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) | 1186 | #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) |
| 1180 | 1187 | ||
| 1181 | /* RNG4 set */ | ||
| 1182 | #define OP_ALG_RNG4_SHIFT 4 | ||
| 1183 | #define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT) | ||
| 1184 | |||
| 1185 | #define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT) | ||
| 1186 | |||
| 1187 | #define OP_ALG_AS_SHIFT 2 | 1188 | #define OP_ALG_AS_SHIFT 2 |
| 1188 | #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) | 1189 | #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT) |
| 1189 | #define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) | 1190 | #define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT) |
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index 34c4b9f7fbfa..6d85fcc5bd0a 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h | |||
| @@ -37,13 +37,16 @@ struct caam_jrentry_info { | |||
| 37 | 37 | ||
| 38 | /* Private sub-storage for a single JobR */ | 38 | /* Private sub-storage for a single JobR */ |
| 39 | struct caam_drv_private_jr { | 39 | struct caam_drv_private_jr { |
| 40 | struct device *parentdev; /* points back to controller dev */ | 40 | struct list_head list_node; /* Job Ring device list */ |
| 41 | struct platform_device *jr_pdev;/* points to platform device for JR */ | 41 | struct device *dev; |
| 42 | int ridx; | 42 | int ridx; |
| 43 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ | 43 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ |
| 44 | struct tasklet_struct irqtask; | 44 | struct tasklet_struct irqtask; |
| 45 | int irq; /* One per queue */ | 45 | int irq; /* One per queue */ |
| 46 | 46 | ||
| 47 | /* Number of scatterlist crypt transforms active on the JobR */ | ||
| 48 | atomic_t tfm_count ____cacheline_aligned; | ||
| 49 | |||
| 47 | /* Job ring info */ | 50 | /* Job ring info */ |
| 48 | int ringsize; /* Size of rings (assume input = output) */ | 51 | int ringsize; /* Size of rings (assume input = output) */ |
| 49 | struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ | 52 | struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */ |
| @@ -63,7 +66,7 @@ struct caam_drv_private_jr { | |||
| 63 | struct caam_drv_private { | 66 | struct caam_drv_private { |
| 64 | 67 | ||
| 65 | struct device *dev; | 68 | struct device *dev; |
| 66 | struct device **jrdev; /* Alloc'ed array per sub-device */ | 69 | struct platform_device **jrpdev; /* Alloc'ed array per sub-device */ |
| 67 | struct platform_device *pdev; | 70 | struct platform_device *pdev; |
| 68 | 71 | ||
| 69 | /* Physical-presence section */ | 72 | /* Physical-presence section */ |
| @@ -80,12 +83,11 @@ struct caam_drv_private { | |||
| 80 | u8 qi_present; /* Nonzero if QI present in device */ | 83 | u8 qi_present; /* Nonzero if QI present in device */ |
| 81 | int secvio_irq; /* Security violation interrupt number */ | 84 | int secvio_irq; /* Security violation interrupt number */ |
| 82 | 85 | ||
| 83 | /* which jr allocated to scatterlist crypto */ | 86 | #define RNG4_MAX_HANDLES 2 |
| 84 | atomic_t tfm_count ____cacheline_aligned; | 87 | /* RNG4 block */ |
| 85 | /* list of registered crypto algorithms (mk generic context handle?) */ | 88 | u32 rng4_sh_init; /* This bitmap shows which of the State |
| 86 | struct list_head alg_list; | 89 | Handles of the RNG4 block are initialized |
| 87 | /* list of registered hash algorithms (mk generic context handle?) */ | 90 | by this driver */ |
| 88 | struct list_head hash_list; | ||
| 89 | 91 | ||
| 90 | /* | 92 | /* |
| 91 | * debugfs entries for developer view into driver/device | 93 | * debugfs entries for developer view into driver/device |
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index bdb786d5a5e5..d23356d20e1c 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c | |||
| @@ -13,6 +13,113 @@ | |||
| 13 | #include "desc.h" | 13 | #include "desc.h" |
| 14 | #include "intern.h" | 14 | #include "intern.h" |
| 15 | 15 | ||
| 16 | struct jr_driver_data { | ||
| 17 | /* List of Physical JobR's with the Driver */ | ||
| 18 | struct list_head jr_list; | ||
| 19 | spinlock_t jr_alloc_lock; /* jr_list lock */ | ||
| 20 | } ____cacheline_aligned; | ||
| 21 | |||
| 22 | static struct jr_driver_data driver_data; | ||
| 23 | |||
| 24 | static int caam_reset_hw_jr(struct device *dev) | ||
| 25 | { | ||
| 26 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
| 27 | unsigned int timeout = 100000; | ||
| 28 | |||
| 29 | /* | ||
| 30 | * mask interrupts since we are going to poll | ||
| 31 | * for reset completion status | ||
| 32 | */ | ||
| 33 | setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
| 34 | |||
| 35 | /* initiate flush (required prior to reset) */ | ||
| 36 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
| 37 | while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == | ||
| 38 | JRINT_ERR_HALT_INPROGRESS) && --timeout) | ||
| 39 | cpu_relax(); | ||
| 40 | |||
| 41 | if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != | ||
| 42 | JRINT_ERR_HALT_COMPLETE || timeout == 0) { | ||
| 43 | dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); | ||
| 44 | return -EIO; | ||
| 45 | } | ||
| 46 | |||
| 47 | /* initiate reset */ | ||
| 48 | timeout = 100000; | ||
| 49 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
| 50 | while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) | ||
| 51 | cpu_relax(); | ||
| 52 | |||
| 53 | if (timeout == 0) { | ||
| 54 | dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); | ||
| 55 | return -EIO; | ||
| 56 | } | ||
| 57 | |||
| 58 | /* unmask interrupts */ | ||
| 59 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
| 60 | |||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 64 | /* | ||
| 65 | * Shutdown JobR independent of platform property code | ||
| 66 | */ | ||
| 67 | int caam_jr_shutdown(struct device *dev) | ||
| 68 | { | ||
| 69 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
| 70 | dma_addr_t inpbusaddr, outbusaddr; | ||
| 71 | int ret; | ||
| 72 | |||
| 73 | ret = caam_reset_hw_jr(dev); | ||
| 74 | |||
| 75 | tasklet_kill(&jrp->irqtask); | ||
| 76 | |||
| 77 | /* Release interrupt */ | ||
| 78 | free_irq(jrp->irq, dev); | ||
| 79 | |||
| 80 | /* Free rings */ | ||
| 81 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); | ||
| 82 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); | ||
| 83 | dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, | ||
| 84 | jrp->inpring, inpbusaddr); | ||
| 85 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
| 86 | jrp->outring, outbusaddr); | ||
| 87 | kfree(jrp->entinfo); | ||
| 88 | |||
| 89 | return ret; | ||
| 90 | } | ||
| 91 | |||
| 92 | static int caam_jr_remove(struct platform_device *pdev) | ||
| 93 | { | ||
| 94 | int ret; | ||
| 95 | struct device *jrdev; | ||
| 96 | struct caam_drv_private_jr *jrpriv; | ||
| 97 | |||
| 98 | jrdev = &pdev->dev; | ||
| 99 | jrpriv = dev_get_drvdata(jrdev); | ||
| 100 | |||
| 101 | /* | ||
| 102 | * Return EBUSY if job ring already allocated. | ||
| 103 | */ | ||
| 104 | if (atomic_read(&jrpriv->tfm_count)) { | ||
| 105 | dev_err(jrdev, "Device is busy\n"); | ||
| 106 | return -EBUSY; | ||
| 107 | } | ||
| 108 | |||
| 109 | /* Remove the node from Physical JobR list maintained by driver */ | ||
| 110 | spin_lock(&driver_data.jr_alloc_lock); | ||
| 111 | list_del(&jrpriv->list_node); | ||
| 112 | spin_unlock(&driver_data.jr_alloc_lock); | ||
| 113 | |||
| 114 | /* Release ring */ | ||
| 115 | ret = caam_jr_shutdown(jrdev); | ||
| 116 | if (ret) | ||
| 117 | dev_err(jrdev, "Failed to shut down job ring\n"); | ||
| 118 | irq_dispose_mapping(jrpriv->irq); | ||
| 119 | |||
| 120 | return ret; | ||
| 121 | } | ||
| 122 | |||
| 16 | /* Main per-ring interrupt handler */ | 123 | /* Main per-ring interrupt handler */ |
| 17 | static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) | 124 | static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) |
| 18 | { | 125 | { |
| @@ -128,6 +235,59 @@ static void caam_jr_dequeue(unsigned long devarg) | |||
| 128 | } | 235 | } |
| 129 | 236 | ||
| 130 | /** | 237 | /** |
| 238 | * caam_jr_alloc() - Alloc a job ring for someone to use as needed. | ||
| 239 | * | ||
| 240 | * returns : pointer to the newly allocated physical | ||
| 241 | * JobR dev can be written to if successful. | ||
| 242 | **/ | ||
| 243 | struct device *caam_jr_alloc(void) | ||
| 244 | { | ||
| 245 | struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; | ||
| 246 | struct device *dev = NULL; | ||
| 247 | int min_tfm_cnt = INT_MAX; | ||
| 248 | int tfm_cnt; | ||
| 249 | |||
| 250 | spin_lock(&driver_data.jr_alloc_lock); | ||
| 251 | |||
| 252 | if (list_empty(&driver_data.jr_list)) { | ||
| 253 | spin_unlock(&driver_data.jr_alloc_lock); | ||
| 254 | return ERR_PTR(-ENODEV); | ||
| 255 | } | ||
| 256 | |||
| 257 | list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { | ||
| 258 | tfm_cnt = atomic_read(&jrpriv->tfm_count); | ||
| 259 | if (tfm_cnt < min_tfm_cnt) { | ||
| 260 | min_tfm_cnt = tfm_cnt; | ||
| 261 | min_jrpriv = jrpriv; | ||
| 262 | } | ||
| 263 | if (!min_tfm_cnt) | ||
| 264 | break; | ||
| 265 | } | ||
| 266 | |||
| 267 | if (min_jrpriv) { | ||
| 268 | atomic_inc(&min_jrpriv->tfm_count); | ||
| 269 | dev = min_jrpriv->dev; | ||
| 270 | } | ||
| 271 | spin_unlock(&driver_data.jr_alloc_lock); | ||
| 272 | |||
| 273 | return dev; | ||
| 274 | } | ||
| 275 | EXPORT_SYMBOL(caam_jr_alloc); | ||
| 276 | |||
| 277 | /** | ||
| 278 | * caam_jr_free() - Free the Job Ring | ||
| 279 | * @rdev - points to the dev that identifies the Job ring to | ||
| 280 | * be released. | ||
| 281 | **/ | ||
| 282 | void caam_jr_free(struct device *rdev) | ||
| 283 | { | ||
| 284 | struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); | ||
| 285 | |||
| 286 | atomic_dec(&jrpriv->tfm_count); | ||
| 287 | } | ||
| 288 | EXPORT_SYMBOL(caam_jr_free); | ||
| 289 | |||
| 290 | /** | ||
| 131 | * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, | 291 | * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, |
| 132 | * -EBUSY if the queue is full, -EIO if it cannot map the caller's | 292 | * -EBUSY if the queue is full, -EIO if it cannot map the caller's |
| 133 | * descriptor. | 293 | * descriptor. |
| @@ -207,46 +367,6 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, | |||
| 207 | } | 367 | } |
| 208 | EXPORT_SYMBOL(caam_jr_enqueue); | 368 | EXPORT_SYMBOL(caam_jr_enqueue); |
| 209 | 369 | ||
| 210 | static int caam_reset_hw_jr(struct device *dev) | ||
| 211 | { | ||
| 212 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
| 213 | unsigned int timeout = 100000; | ||
| 214 | |||
| 215 | /* | ||
| 216 | * mask interrupts since we are going to poll | ||
| 217 | * for reset completion status | ||
| 218 | */ | ||
| 219 | setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
| 220 | |||
| 221 | /* initiate flush (required prior to reset) */ | ||
| 222 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
| 223 | while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == | ||
| 224 | JRINT_ERR_HALT_INPROGRESS) && --timeout) | ||
| 225 | cpu_relax(); | ||
| 226 | |||
| 227 | if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != | ||
| 228 | JRINT_ERR_HALT_COMPLETE || timeout == 0) { | ||
| 229 | dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); | ||
| 230 | return -EIO; | ||
| 231 | } | ||
| 232 | |||
| 233 | /* initiate reset */ | ||
| 234 | timeout = 100000; | ||
| 235 | wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); | ||
| 236 | while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) | ||
| 237 | cpu_relax(); | ||
| 238 | |||
| 239 | if (timeout == 0) { | ||
| 240 | dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); | ||
| 241 | return -EIO; | ||
| 242 | } | ||
| 243 | |||
| 244 | /* unmask interrupts */ | ||
| 245 | clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); | ||
| 246 | |||
| 247 | return 0; | ||
| 248 | } | ||
| 249 | |||
| 250 | /* | 370 | /* |
| 251 | * Init JobR independent of platform property detection | 371 | * Init JobR independent of platform property detection |
| 252 | */ | 372 | */ |
| @@ -262,7 +382,7 @@ static int caam_jr_init(struct device *dev) | |||
| 262 | 382 | ||
| 263 | /* Connect job ring interrupt handler. */ | 383 | /* Connect job ring interrupt handler. */ |
| 264 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, | 384 | error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, |
| 265 | "caam-jobr", dev); | 385 | dev_name(dev), dev); |
| 266 | if (error) { | 386 | if (error) { |
| 267 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", | 387 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", |
| 268 | jrp->ridx, jrp->irq); | 388 | jrp->ridx, jrp->irq); |
| @@ -318,86 +438,43 @@ static int caam_jr_init(struct device *dev) | |||
| 318 | return 0; | 438 | return 0; |
| 319 | } | 439 | } |
| 320 | 440 | ||
| 321 | /* | ||
| 322 | * Shutdown JobR independent of platform property code | ||
| 323 | */ | ||
| 324 | int caam_jr_shutdown(struct device *dev) | ||
| 325 | { | ||
| 326 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); | ||
| 327 | dma_addr_t inpbusaddr, outbusaddr; | ||
| 328 | int ret; | ||
| 329 | |||
| 330 | ret = caam_reset_hw_jr(dev); | ||
| 331 | |||
| 332 | tasklet_kill(&jrp->irqtask); | ||
| 333 | |||
| 334 | /* Release interrupt */ | ||
| 335 | free_irq(jrp->irq, dev); | ||
| 336 | |||
| 337 | /* Free rings */ | ||
| 338 | inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); | ||
| 339 | outbusaddr = rd_reg64(&jrp->rregs->outring_base); | ||
| 340 | dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, | ||
| 341 | jrp->inpring, inpbusaddr); | ||
| 342 | dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, | ||
| 343 | jrp->outring, outbusaddr); | ||
| 344 | kfree(jrp->entinfo); | ||
| 345 | of_device_unregister(jrp->jr_pdev); | ||
| 346 | |||
| 347 | return ret; | ||
| 348 | } | ||
| 349 | 441 | ||
| 350 | /* | 442 | /* |
| 351 | * Probe routine for each detected JobR subsystem. It assumes that | 443 | * Probe routine for each detected JobR subsystem. |
| 352 | * property detection was picked up externally. | ||
| 353 | */ | 444 | */ |
| 354 | int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | 445 | static int caam_jr_probe(struct platform_device *pdev) |
| 355 | int ring) | ||
| 356 | { | 446 | { |
| 357 | struct device *ctrldev, *jrdev; | 447 | struct device *jrdev; |
| 358 | struct platform_device *jr_pdev; | 448 | struct device_node *nprop; |
| 359 | struct caam_drv_private *ctrlpriv; | 449 | struct caam_job_ring __iomem *ctrl; |
| 360 | struct caam_drv_private_jr *jrpriv; | 450 | struct caam_drv_private_jr *jrpriv; |
| 361 | u32 *jroffset; | 451 | static int total_jobrs; |
| 362 | int error; | 452 | int error; |
| 363 | 453 | ||
| 364 | ctrldev = &pdev->dev; | 454 | jrdev = &pdev->dev; |
| 365 | ctrlpriv = dev_get_drvdata(ctrldev); | ||
| 366 | |||
| 367 | jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), | 455 | jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), |
| 368 | GFP_KERNEL); | 456 | GFP_KERNEL); |
| 369 | if (jrpriv == NULL) { | 457 | if (!jrpriv) |
| 370 | dev_err(ctrldev, "can't alloc private mem for job ring %d\n", | ||
| 371 | ring); | ||
| 372 | return -ENOMEM; | 458 | return -ENOMEM; |
| 373 | } | ||
| 374 | jrpriv->parentdev = ctrldev; /* point back to parent */ | ||
| 375 | jrpriv->ridx = ring; /* save ring identity relative to detection */ | ||
| 376 | 459 | ||
| 377 | /* | 460 | dev_set_drvdata(jrdev, jrpriv); |
| 378 | * Derive a pointer to the detected JobRs regs | ||
| 379 | * Driver has already iomapped the entire space, we just | ||
| 380 | * need to add in the offset to this JobR. Don't know if I | ||
| 381 | * like this long-term, but it'll run | ||
| 382 | */ | ||
| 383 | jroffset = (u32 *)of_get_property(np, "reg", NULL); | ||
| 384 | jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl | ||
| 385 | + *jroffset); | ||
| 386 | 461 | ||
| 387 | /* Build a local dev for each detected queue */ | 462 | /* save ring identity relative to detection */ |
| 388 | jr_pdev = of_platform_device_create(np, NULL, ctrldev); | 463 | jrpriv->ridx = total_jobrs++; |
| 389 | if (jr_pdev == NULL) { | 464 | |
| 390 | kfree(jrpriv); | 465 | nprop = pdev->dev.of_node; |
| 391 | return -EINVAL; | 466 | /* Get configuration properties from device tree */ |
| 467 | /* First, get register page */ | ||
| 468 | ctrl = of_iomap(nprop, 0); | ||
| 469 | if (!ctrl) { | ||
| 470 | dev_err(jrdev, "of_iomap() failed\n"); | ||
| 471 | return -ENOMEM; | ||
| 392 | } | 472 | } |
| 393 | 473 | ||
| 394 | jrpriv->jr_pdev = jr_pdev; | 474 | jrpriv->rregs = (struct caam_job_ring __force *)ctrl; |
| 395 | jrdev = &jr_pdev->dev; | ||
| 396 | dev_set_drvdata(jrdev, jrpriv); | ||
| 397 | ctrlpriv->jrdev[ring] = jrdev; | ||
| 398 | 475 | ||
| 399 | if (sizeof(dma_addr_t) == sizeof(u64)) | 476 | if (sizeof(dma_addr_t) == sizeof(u64)) |
| 400 | if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring")) | 477 | if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) |
| 401 | dma_set_mask(jrdev, DMA_BIT_MASK(40)); | 478 | dma_set_mask(jrdev, DMA_BIT_MASK(40)); |
| 402 | else | 479 | else |
| 403 | dma_set_mask(jrdev, DMA_BIT_MASK(36)); | 480 | dma_set_mask(jrdev, DMA_BIT_MASK(36)); |
| @@ -405,15 +482,61 @@ int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | |||
| 405 | dma_set_mask(jrdev, DMA_BIT_MASK(32)); | 482 | dma_set_mask(jrdev, DMA_BIT_MASK(32)); |
| 406 | 483 | ||
| 407 | /* Identify the interrupt */ | 484 | /* Identify the interrupt */ |
| 408 | jrpriv->irq = irq_of_parse_and_map(np, 0); | 485 | jrpriv->irq = irq_of_parse_and_map(nprop, 0); |
| 409 | 486 | ||
| 410 | /* Now do the platform independent part */ | 487 | /* Now do the platform independent part */ |
| 411 | error = caam_jr_init(jrdev); /* now turn on hardware */ | 488 | error = caam_jr_init(jrdev); /* now turn on hardware */ |
| 412 | if (error) { | 489 | if (error) { |
| 413 | of_device_unregister(jr_pdev); | ||
| 414 | kfree(jrpriv); | 490 | kfree(jrpriv); |
| 415 | return error; | 491 | return error; |
| 416 | } | 492 | } |
| 417 | 493 | ||
| 418 | return error; | 494 | jrpriv->dev = jrdev; |
| 495 | spin_lock(&driver_data.jr_alloc_lock); | ||
| 496 | list_add_tail(&jrpriv->list_node, &driver_data.jr_list); | ||
| 497 | spin_unlock(&driver_data.jr_alloc_lock); | ||
| 498 | |||
| 499 | atomic_set(&jrpriv->tfm_count, 0); | ||
| 500 | |||
| 501 | return 0; | ||
| 502 | } | ||
| 503 | |||
| 504 | static struct of_device_id caam_jr_match[] = { | ||
| 505 | { | ||
| 506 | .compatible = "fsl,sec-v4.0-job-ring", | ||
| 507 | }, | ||
| 508 | { | ||
| 509 | .compatible = "fsl,sec4.0-job-ring", | ||
| 510 | }, | ||
| 511 | {}, | ||
| 512 | }; | ||
| 513 | MODULE_DEVICE_TABLE(of, caam_jr_match); | ||
| 514 | |||
| 515 | static struct platform_driver caam_jr_driver = { | ||
| 516 | .driver = { | ||
| 517 | .name = "caam_jr", | ||
| 518 | .owner = THIS_MODULE, | ||
| 519 | .of_match_table = caam_jr_match, | ||
| 520 | }, | ||
| 521 | .probe = caam_jr_probe, | ||
| 522 | .remove = caam_jr_remove, | ||
| 523 | }; | ||
| 524 | |||
| 525 | static int __init jr_driver_init(void) | ||
| 526 | { | ||
| 527 | spin_lock_init(&driver_data.jr_alloc_lock); | ||
| 528 | INIT_LIST_HEAD(&driver_data.jr_list); | ||
| 529 | return platform_driver_register(&caam_jr_driver); | ||
| 530 | } | ||
| 531 | |||
| 532 | static void __exit jr_driver_exit(void) | ||
| 533 | { | ||
| 534 | platform_driver_unregister(&caam_jr_driver); | ||
| 419 | } | 535 | } |
| 536 | |||
| 537 | module_init(jr_driver_init); | ||
| 538 | module_exit(jr_driver_exit); | ||
| 539 | |||
| 540 | MODULE_LICENSE("GPL"); | ||
| 541 | MODULE_DESCRIPTION("FSL CAAM JR request backend"); | ||
| 542 | MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); | ||
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h index 9d8741a59037..97113a6d6c58 100644 --- a/drivers/crypto/caam/jr.h +++ b/drivers/crypto/caam/jr.h | |||
| @@ -8,12 +8,11 @@ | |||
| 8 | #define JR_H | 8 | #define JR_H |
| 9 | 9 | ||
| 10 | /* Prototypes for backend-level services exposed to APIs */ | 10 | /* Prototypes for backend-level services exposed to APIs */ |
| 11 | struct device *caam_jr_alloc(void); | ||
| 12 | void caam_jr_free(struct device *rdev); | ||
| 11 | int caam_jr_enqueue(struct device *dev, u32 *desc, | 13 | int caam_jr_enqueue(struct device *dev, u32 *desc, |
| 12 | void (*cbk)(struct device *dev, u32 *desc, u32 status, | 14 | void (*cbk)(struct device *dev, u32 *desc, u32 status, |
| 13 | void *areq), | 15 | void *areq), |
| 14 | void *areq); | 16 | void *areq); |
| 15 | 17 | ||
| 16 | extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np, | ||
| 17 | int ring); | ||
| 18 | extern int caam_jr_shutdown(struct device *dev); | ||
| 19 | #endif /* JR_H */ | 18 | #endif /* JR_H */ |
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 4455396918de..d50174f45b21 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h | |||
| @@ -245,7 +245,7 @@ struct rngtst { | |||
| 245 | 245 | ||
| 246 | /* RNG4 TRNG test registers */ | 246 | /* RNG4 TRNG test registers */ |
| 247 | struct rng4tst { | 247 | struct rng4tst { |
| 248 | #define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ | 248 | #define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ |
| 249 | u32 rtmctl; /* misc. control register */ | 249 | u32 rtmctl; /* misc. control register */ |
| 250 | u32 rtscmisc; /* statistical check misc. register */ | 250 | u32 rtscmisc; /* statistical check misc. register */ |
| 251 | u32 rtpkrrng; /* poker range register */ | 251 | u32 rtpkrrng; /* poker range register */ |
| @@ -255,6 +255,8 @@ struct rng4tst { | |||
| 255 | }; | 255 | }; |
| 256 | #define RTSDCTL_ENT_DLY_SHIFT 16 | 256 | #define RTSDCTL_ENT_DLY_SHIFT 16 |
| 257 | #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) | 257 | #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) |
| 258 | #define RTSDCTL_ENT_DLY_MIN 1200 | ||
| 259 | #define RTSDCTL_ENT_DLY_MAX 12800 | ||
| 258 | u32 rtsdctl; /* seed control register */ | 260 | u32 rtsdctl; /* seed control register */ |
| 259 | union { | 261 | union { |
| 260 | u32 rtsblim; /* PRGM=1: sparse bit limit register */ | 262 | u32 rtsblim; /* PRGM=1: sparse bit limit register */ |
| @@ -266,7 +268,11 @@ struct rng4tst { | |||
| 266 | u32 rtfrqcnt; /* PRGM=0: freq. count register */ | 268 | u32 rtfrqcnt; /* PRGM=0: freq. count register */ |
| 267 | }; | 269 | }; |
| 268 | u32 rsvd1[40]; | 270 | u32 rsvd1[40]; |
| 271 | #define RDSTA_SKVT 0x80000000 | ||
| 272 | #define RDSTA_SKVN 0x40000000 | ||
| 269 | #define RDSTA_IF0 0x00000001 | 273 | #define RDSTA_IF0 0x00000001 |
| 274 | #define RDSTA_IF1 0x00000002 | ||
| 275 | #define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0) | ||
| 270 | u32 rdsta; | 276 | u32 rdsta; |
| 271 | u32 rsvd2[15]; | 277 | u32 rsvd2[15]; |
| 272 | }; | 278 | }; |
| @@ -692,6 +698,7 @@ struct caam_deco { | |||
| 692 | u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ | 698 | u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ |
| 693 | u32 jr_ctl_lo; | 699 | u32 jr_ctl_lo; |
| 694 | u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ | 700 | u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ |
| 701 | #define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF | ||
| 695 | u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ | 702 | u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ |
| 696 | u32 op_status_lo; | 703 | u32 op_status_lo; |
| 697 | u32 rsvd24[2]; | 704 | u32 rsvd24[2]; |
| @@ -706,12 +713,13 @@ struct caam_deco { | |||
| 706 | u32 rsvd29[48]; | 713 | u32 rsvd29[48]; |
| 707 | u32 descbuf[64]; /* DxDESB - Descriptor buffer */ | 714 | u32 descbuf[64]; /* DxDESB - Descriptor buffer */ |
| 708 | u32 rscvd30[193]; | 715 | u32 rscvd30[193]; |
| 716 | #define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000 | ||
| 717 | #define DESC_DBG_DECO_STAT_VALID 0x80000000 | ||
| 718 | #define DESC_DBG_DECO_STAT_MASK 0x00F00000 | ||
| 709 | u32 desc_dbg; /* DxDDR - DECO Debug Register */ | 719 | u32 desc_dbg; /* DxDDR - DECO Debug Register */ |
| 710 | u32 rsvd31[126]; | 720 | u32 rsvd31[126]; |
| 711 | }; | 721 | }; |
| 712 | 722 | ||
| 713 | /* DECO DBG Register Valid Bit*/ | ||
| 714 | #define DECO_DBG_VALID 0x80000000 | ||
| 715 | #define DECO_JQCR_WHL 0x20000000 | 723 | #define DECO_JQCR_WHL 0x20000000 |
| 716 | #define DECO_JQCR_FOUR 0x10000000 | 724 | #define DECO_JQCR_FOUR 0x10000000 |
| 717 | 725 | ||
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index e0037c8ee243..b12ff85f4241 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h | |||
| @@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, | |||
| 117 | return nents; | 117 | return nents; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | /* Map SG page in kernel virtual address space and copy */ | ||
| 121 | static inline void sg_map_copy(u8 *dest, struct scatterlist *sg, | ||
| 122 | int len, int offset) | ||
| 123 | { | ||
| 124 | u8 *mapped_addr; | ||
| 125 | |||
| 126 | /* | ||
| 127 | * Page here can be user-space pinned using get_user_pages | ||
| 128 | * Same must be kmapped before use and kunmapped subsequently | ||
| 129 | */ | ||
| 130 | mapped_addr = kmap_atomic(sg_page(sg)); | ||
| 131 | memcpy(dest, mapped_addr + offset, len); | ||
| 132 | kunmap_atomic(mapped_addr); | ||
| 133 | } | ||
| 134 | |||
| 120 | /* Copy from len bytes of sg to dest, starting from beginning */ | 135 | /* Copy from len bytes of sg to dest, starting from beginning */ |
| 121 | static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) | 136 | static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) |
| 122 | { | 137 | { |
| @@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) | |||
| 124 | int cpy_index = 0, next_cpy_index = current_sg->length; | 139 | int cpy_index = 0, next_cpy_index = current_sg->length; |
| 125 | 140 | ||
| 126 | while (next_cpy_index < len) { | 141 | while (next_cpy_index < len) { |
| 127 | memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), | 142 | sg_map_copy(dest + cpy_index, current_sg, current_sg->length, |
| 128 | current_sg->length); | 143 | current_sg->offset); |
| 129 | current_sg = scatterwalk_sg_next(current_sg); | 144 | current_sg = scatterwalk_sg_next(current_sg); |
| 130 | cpy_index = next_cpy_index; | 145 | cpy_index = next_cpy_index; |
| 131 | next_cpy_index += current_sg->length; | 146 | next_cpy_index += current_sg->length; |
| 132 | } | 147 | } |
| 133 | if (cpy_index < len) | 148 | if (cpy_index < len) |
| 134 | memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), | 149 | sg_map_copy(dest + cpy_index, current_sg, len-cpy_index, |
| 135 | len - cpy_index); | 150 | current_sg->offset); |
| 136 | } | 151 | } |
| 137 | 152 | ||
| 138 | /* Copy sg data, from to_skip to end, to dest */ | 153 | /* Copy sg data, from to_skip to end, to dest */ |
| @@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, | |||
| 140 | int to_skip, unsigned int end) | 155 | int to_skip, unsigned int end) |
| 141 | { | 156 | { |
| 142 | struct scatterlist *current_sg = sg; | 157 | struct scatterlist *current_sg = sg; |
| 143 | int sg_index, cpy_index; | 158 | int sg_index, cpy_index, offset; |
| 144 | 159 | ||
| 145 | sg_index = current_sg->length; | 160 | sg_index = current_sg->length; |
| 146 | while (sg_index <= to_skip) { | 161 | while (sg_index <= to_skip) { |
| @@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, | |||
| 148 | sg_index += current_sg->length; | 163 | sg_index += current_sg->length; |
| 149 | } | 164 | } |
| 150 | cpy_index = sg_index - to_skip; | 165 | cpy_index = sg_index - to_skip; |
| 151 | memcpy(dest, (u8 *) sg_virt(current_sg) + | 166 | offset = current_sg->offset + current_sg->length - cpy_index; |
| 152 | current_sg->length - cpy_index, cpy_index); | 167 | sg_map_copy(dest, current_sg, cpy_index, offset); |
| 153 | current_sg = scatterwalk_sg_next(current_sg); | 168 | if (end - sg_index) { |
| 154 | if (end - sg_index) | 169 | current_sg = scatterwalk_sg_next(current_sg); |
| 155 | sg_copy(dest + cpy_index, current_sg, end - sg_index); | 170 | sg_copy(dest + cpy_index, current_sg, end - sg_index); |
| 171 | } | ||
| 156 | } | 172 | } |
diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c index a8a7dd4b0d25..247ab8048f5b 100644 --- a/drivers/crypto/dcp.c +++ b/drivers/crypto/dcp.c | |||
| @@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev) | |||
| 733 | platform_set_drvdata(pdev, dev); | 733 | platform_set_drvdata(pdev, dev); |
| 734 | 734 | ||
| 735 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 735 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 736 | if (!r) { | 736 | dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r); |
| 737 | dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); | 737 | if (IS_ERR(dev->dcp_regs_base)) |
| 738 | return -ENXIO; | 738 | return PTR_ERR(dev->dcp_regs_base); |
| 739 | } | ||
| 740 | dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start, | ||
| 741 | resource_size(r)); | ||
| 742 | 739 | ||
| 743 | dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); | 740 | dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); |
| 744 | udelay(10); | 741 | udelay(10); |
| @@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev) | |||
| 762 | return -EIO; | 759 | return -EIO; |
| 763 | } | 760 | } |
| 764 | dev->dcp_vmi_irq = r->start; | 761 | dev->dcp_vmi_irq = r->start; |
| 765 | ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev); | 762 | ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0, |
| 763 | "dcp", dev); | ||
| 766 | if (ret != 0) { | 764 | if (ret != 0) { |
| 767 | dev_err(&pdev->dev, "can't request_irq (0)\n"); | 765 | dev_err(&pdev->dev, "can't request_irq (0)\n"); |
| 768 | return -EIO; | 766 | return -EIO; |
| @@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev) | |||
| 771 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | 769 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); |
| 772 | if (!r) { | 770 | if (!r) { |
| 773 | dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); | 771 | dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); |
| 774 | ret = -EIO; | 772 | return -EIO; |
| 775 | goto err_free_irq0; | ||
| 776 | } | 773 | } |
| 777 | dev->dcp_irq = r->start; | 774 | dev->dcp_irq = r->start; |
| 778 | ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev); | 775 | ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp", |
| 776 | dev); | ||
| 779 | if (ret != 0) { | 777 | if (ret != 0) { |
| 780 | dev_err(&pdev->dev, "can't request_irq (1)\n"); | 778 | dev_err(&pdev->dev, "can't request_irq (1)\n"); |
| 781 | ret = -EIO; | 779 | return -EIO; |
| 782 | goto err_free_irq0; | ||
| 783 | } | 780 | } |
| 784 | 781 | ||
| 785 | dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, | 782 | dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, |
| @@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev) | |||
| 788 | GFP_KERNEL); | 785 | GFP_KERNEL); |
| 789 | if (!dev->hw_pkg[0]) { | 786 | if (!dev->hw_pkg[0]) { |
| 790 | dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); | 787 | dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); |
| 791 | ret = -ENOMEM; | 788 | return -ENOMEM; |
| 792 | goto err_free_irq1; | ||
| 793 | } | 789 | } |
| 794 | 790 | ||
| 795 | for (i = 1; i < DCP_MAX_PKG; i++) { | 791 | for (i = 1; i < DCP_MAX_PKG; i++) { |
| @@ -848,16 +844,14 @@ err_unregister: | |||
| 848 | for (j = 0; j < i; j++) | 844 | for (j = 0; j < i; j++) |
| 849 | crypto_unregister_alg(&algs[j]); | 845 | crypto_unregister_alg(&algs[j]); |
| 850 | err_free_key_iv: | 846 | err_free_key_iv: |
| 847 | tasklet_kill(&dev->done_task); | ||
| 848 | tasklet_kill(&dev->queue_task); | ||
| 851 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, | 849 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, |
| 852 | dev->payload_base_dma); | 850 | dev->payload_base_dma); |
| 853 | err_free_hw_packet: | 851 | err_free_hw_packet: |
| 854 | dma_free_coherent(&pdev->dev, DCP_MAX_PKG * | 852 | dma_free_coherent(&pdev->dev, DCP_MAX_PKG * |
| 855 | sizeof(struct dcp_hw_packet), dev->hw_pkg[0], | 853 | sizeof(struct dcp_hw_packet), dev->hw_pkg[0], |
| 856 | dev->hw_phys_pkg); | 854 | dev->hw_phys_pkg); |
| 857 | err_free_irq1: | ||
| 858 | free_irq(dev->dcp_irq, dev); | ||
| 859 | err_free_irq0: | ||
| 860 | free_irq(dev->dcp_vmi_irq, dev); | ||
| 861 | 855 | ||
| 862 | return ret; | 856 | return ret; |
| 863 | } | 857 | } |
| @@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev) | |||
| 868 | int j; | 862 | int j; |
| 869 | dev = platform_get_drvdata(pdev); | 863 | dev = platform_get_drvdata(pdev); |
| 870 | 864 | ||
| 871 | dma_free_coherent(&pdev->dev, | 865 | misc_deregister(&dev->dcp_bootstream_misc); |
| 872 | DCP_MAX_PKG * sizeof(struct dcp_hw_packet), | ||
| 873 | dev->hw_pkg[0], dev->hw_phys_pkg); | ||
| 874 | |||
| 875 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, | ||
| 876 | dev->payload_base_dma); | ||
| 877 | 866 | ||
| 878 | free_irq(dev->dcp_irq, dev); | 867 | for (j = 0; j < ARRAY_SIZE(algs); j++) |
| 879 | free_irq(dev->dcp_vmi_irq, dev); | 868 | crypto_unregister_alg(&algs[j]); |
| 880 | 869 | ||
| 881 | tasklet_kill(&dev->done_task); | 870 | tasklet_kill(&dev->done_task); |
| 882 | tasklet_kill(&dev->queue_task); | 871 | tasklet_kill(&dev->queue_task); |
| 883 | 872 | ||
| 884 | for (j = 0; j < ARRAY_SIZE(algs); j++) | 873 | dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, |
| 885 | crypto_unregister_alg(&algs[j]); | 874 | dev->payload_base_dma); |
| 886 | 875 | ||
| 887 | misc_deregister(&dev->dcp_bootstream_misc); | 876 | dma_free_coherent(&pdev->dev, |
| 877 | DCP_MAX_PKG * sizeof(struct dcp_hw_packet), | ||
| 878 | dev->hw_pkg[0], dev->hw_phys_pkg); | ||
| 888 | 879 | ||
| 889 | return 0; | 880 | return 0; |
| 890 | } | 881 | } |
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 214357e12dc0..9dd6e01eac33 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
| @@ -1149,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 1149 | unsigned int keylen) | 1149 | unsigned int keylen) |
| 1150 | { | 1150 | { |
| 1151 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | 1151 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); |
| 1152 | struct rtattr *rta = (struct rtattr *)key; | 1152 | struct crypto_authenc_keys keys; |
| 1153 | struct crypto_authenc_key_param *param; | ||
| 1154 | 1153 | ||
| 1155 | if (!RTA_OK(rta, keylen)) | 1154 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
| 1156 | goto badkey; | ||
| 1157 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | ||
| 1158 | goto badkey; | ||
| 1159 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | ||
| 1160 | goto badkey; | 1155 | goto badkey; |
| 1161 | 1156 | ||
| 1162 | param = RTA_DATA(rta); | 1157 | if (keys.authkeylen > sizeof(ctx->authkey)) |
| 1163 | ctx->enckey_len = be32_to_cpu(param->enckeylen); | 1158 | goto badkey; |
| 1164 | |||
| 1165 | key += RTA_ALIGN(rta->rta_len); | ||
| 1166 | keylen -= RTA_ALIGN(rta->rta_len); | ||
| 1167 | 1159 | ||
| 1168 | if (keylen < ctx->enckey_len) | 1160 | if (keys.enckeylen > sizeof(ctx->enckey)) |
| 1169 | goto badkey; | 1161 | goto badkey; |
| 1170 | 1162 | ||
| 1171 | ctx->authkey_len = keylen - ctx->enckey_len; | 1163 | memcpy(ctx->authkey, keys.authkey, keys.authkeylen); |
| 1172 | memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len); | 1164 | memcpy(ctx->enckey, keys.enckey, keys.enckeylen); |
| 1173 | memcpy(ctx->authkey, key, ctx->authkey_len); | 1165 | ctx->authkey_len = keys.authkeylen; |
| 1166 | ctx->enckey_len = keys.enckeylen; | ||
| 1174 | 1167 | ||
| 1175 | return aead_setup(tfm, crypto_aead_authsize(tfm)); | 1168 | return aead_setup(tfm, crypto_aead_authsize(tfm)); |
| 1176 | badkey: | 1169 | badkey: |
| 1177 | ctx->enckey_len = 0; | ||
| 1178 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 1170 | crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 1179 | return -EINVAL; | 1171 | return -EINVAL; |
| 1180 | } | 1172 | } |
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 3374a3ebe4c7..8d1e6f8e9e9c 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c | |||
| @@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) | |||
| 907 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); | 907 | return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); |
| 908 | } | 908 | } |
| 909 | 909 | ||
| 910 | irqreturn_t crypto_int(int irq, void *priv) | 910 | static irqreturn_t crypto_int(int irq, void *priv) |
| 911 | { | 911 | { |
| 912 | u32 val; | 912 | u32 val; |
| 913 | 913 | ||
| @@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv) | |||
| 928 | return IRQ_HANDLED; | 928 | return IRQ_HANDLED; |
| 929 | } | 929 | } |
| 930 | 930 | ||
| 931 | struct crypto_alg mv_aes_alg_ecb = { | 931 | static struct crypto_alg mv_aes_alg_ecb = { |
| 932 | .cra_name = "ecb(aes)", | 932 | .cra_name = "ecb(aes)", |
| 933 | .cra_driver_name = "mv-ecb-aes", | 933 | .cra_driver_name = "mv-ecb-aes", |
| 934 | .cra_priority = 300, | 934 | .cra_priority = 300, |
| @@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = { | |||
| 951 | }, | 951 | }, |
| 952 | }; | 952 | }; |
| 953 | 953 | ||
| 954 | struct crypto_alg mv_aes_alg_cbc = { | 954 | static struct crypto_alg mv_aes_alg_cbc = { |
| 955 | .cra_name = "cbc(aes)", | 955 | .cra_name = "cbc(aes)", |
| 956 | .cra_driver_name = "mv-cbc-aes", | 956 | .cra_driver_name = "mv-cbc-aes", |
| 957 | .cra_priority = 300, | 957 | .cra_priority = 300, |
| @@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = { | |||
| 975 | }, | 975 | }, |
| 976 | }; | 976 | }; |
| 977 | 977 | ||
| 978 | struct ahash_alg mv_sha1_alg = { | 978 | static struct ahash_alg mv_sha1_alg = { |
| 979 | .init = mv_hash_init, | 979 | .init = mv_hash_init, |
| 980 | .update = mv_hash_update, | 980 | .update = mv_hash_update, |
| 981 | .final = mv_hash_final, | 981 | .final = mv_hash_final, |
| @@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = { | |||
| 999 | } | 999 | } |
| 1000 | }; | 1000 | }; |
| 1001 | 1001 | ||
| 1002 | struct ahash_alg mv_hmac_sha1_alg = { | 1002 | static struct ahash_alg mv_hmac_sha1_alg = { |
| 1003 | .init = mv_hash_init, | 1003 | .init = mv_hash_init, |
| 1004 | .update = mv_hash_update, | 1004 | .update = mv_hash_update, |
| 1005 | .final = mv_hash_final, | 1005 | .final = mv_hash_final, |
| @@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev) | |||
| 1084 | goto err_unmap_sram; | 1084 | goto err_unmap_sram; |
| 1085 | } | 1085 | } |
| 1086 | 1086 | ||
| 1087 | ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), | 1087 | ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), |
| 1088 | cp); | 1088 | cp); |
| 1089 | if (ret) | 1089 | if (ret) |
| 1090 | goto err_thread; | 1090 | goto err_thread; |
| @@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = { | |||
| 1187 | .driver = { | 1187 | .driver = { |
| 1188 | .owner = THIS_MODULE, | 1188 | .owner = THIS_MODULE, |
| 1189 | .name = "mv_crypto", | 1189 | .name = "mv_crypto", |
| 1190 | .of_match_table = of_match_ptr(mv_cesa_of_match_table), | 1190 | .of_match_table = mv_cesa_of_match_table, |
| 1191 | }, | 1191 | }, |
| 1192 | }; | 1192 | }; |
| 1193 | MODULE_ALIAS("platform:mv_crypto"); | 1193 | MODULE_ALIAS("platform:mv_crypto"); |
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index ce791c2f81f7..a9ccbf14096e 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c | |||
| @@ -275,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) | |||
| 275 | if (dd->flags & FLAGS_CBC) | 275 | if (dd->flags & FLAGS_CBC) |
| 276 | val |= AES_REG_CTRL_CBC; | 276 | val |= AES_REG_CTRL_CBC; |
| 277 | if (dd->flags & FLAGS_CTR) { | 277 | if (dd->flags & FLAGS_CTR) { |
| 278 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; | 278 | val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; |
| 279 | mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; | 279 | mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; |
| 280 | } | 280 | } |
| 281 | if (dd->flags & FLAGS_ENCRYPT) | 281 | if (dd->flags & FLAGS_ENCRYPT) |
| @@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) | |||
| 554 | return err; | 554 | return err; |
| 555 | } | 555 | } |
| 556 | 556 | ||
| 557 | int omap_aes_check_aligned(struct scatterlist *sg) | 557 | static int omap_aes_check_aligned(struct scatterlist *sg) |
| 558 | { | 558 | { |
| 559 | while (sg) { | 559 | while (sg) { |
| 560 | if (!IS_ALIGNED(sg->offset, 4)) | 560 | if (!IS_ALIGNED(sg->offset, 4)) |
| @@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg) | |||
| 566 | return 0; | 566 | return 0; |
| 567 | } | 567 | } |
| 568 | 568 | ||
| 569 | int omap_aes_copy_sgs(struct omap_aes_dev *dd) | 569 | static int omap_aes_copy_sgs(struct omap_aes_dev *dd) |
| 570 | { | 570 | { |
| 571 | void *buf_in, *buf_out; | 571 | void *buf_in, *buf_out; |
| 572 | int pages; | 572 | int pages; |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index e28104b4aab0..e45aaaf0db30 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
| @@ -2033,3 +2033,4 @@ module_platform_driver(omap_sham_driver); | |||
| 2033 | MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); | 2033 | MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); |
| 2034 | MODULE_LICENSE("GPL v2"); | 2034 | MODULE_LICENSE("GPL v2"); |
| 2035 | MODULE_AUTHOR("Dmitry Kasatkin"); | 2035 | MODULE_AUTHOR("Dmitry Kasatkin"); |
| 2036 | MODULE_ALIAS("platform:omap-sham"); | ||
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 888f7f4a6d3f..a6175ba6d238 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c | |||
| @@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |||
| 495 | { | 495 | { |
| 496 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | 496 | struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 497 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | 497 | struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); |
| 498 | struct rtattr *rta = (void *)key; | 498 | struct crypto_authenc_keys keys; |
| 499 | struct crypto_authenc_key_param *param; | ||
| 500 | unsigned int authkeylen, enckeylen; | ||
| 501 | int err = -EINVAL; | 499 | int err = -EINVAL; |
| 502 | 500 | ||
| 503 | if (!RTA_OK(rta, keylen)) | 501 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
| 504 | goto badkey; | 502 | goto badkey; |
| 505 | 503 | ||
| 506 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 504 | if (keys.enckeylen > AES_MAX_KEY_SIZE) |
| 507 | goto badkey; | 505 | goto badkey; |
| 508 | 506 | ||
| 509 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | 507 | if (keys.authkeylen > sizeof(ctx->hash_ctx)) |
| 510 | goto badkey; | ||
| 511 | |||
| 512 | param = RTA_DATA(rta); | ||
| 513 | enckeylen = be32_to_cpu(param->enckeylen); | ||
| 514 | |||
| 515 | key += RTA_ALIGN(rta->rta_len); | ||
| 516 | keylen -= RTA_ALIGN(rta->rta_len); | ||
| 517 | |||
| 518 | if (keylen < enckeylen) | ||
| 519 | goto badkey; | ||
| 520 | |||
| 521 | authkeylen = keylen - enckeylen; | ||
| 522 | |||
| 523 | if (enckeylen > AES_MAX_KEY_SIZE) | ||
| 524 | goto badkey; | 508 | goto badkey; |
| 525 | 509 | ||
| 526 | if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | 510 | if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == |
| 527 | SPA_CTRL_CIPH_ALG_AES) | 511 | SPA_CTRL_CIPH_ALG_AES) |
| 528 | err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); | 512 | err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen); |
| 529 | else | 513 | else |
| 530 | err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); | 514 | err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen); |
| 531 | 515 | ||
| 532 | if (err) | 516 | if (err) |
| 533 | goto badkey; | 517 | goto badkey; |
| 534 | 518 | ||
| 535 | memcpy(ctx->hash_ctx, key, authkeylen); | 519 | memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); |
| 536 | ctx->hash_key_len = authkeylen; | 520 | ctx->hash_key_len = keys.authkeylen; |
| 537 | 521 | ||
| 538 | return 0; | 522 | return 0; |
| 539 | 523 | ||
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index d7bb8bac36e9..785a9ded7bdf 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c | |||
| @@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = { | |||
| 1058 | .driver = { | 1058 | .driver = { |
| 1059 | .name = SAHARA_NAME, | 1059 | .name = SAHARA_NAME, |
| 1060 | .owner = THIS_MODULE, | 1060 | .owner = THIS_MODULE, |
| 1061 | .of_match_table = of_match_ptr(sahara_dt_ids), | 1061 | .of_match_table = sahara_dt_ids, |
| 1062 | }, | 1062 | }, |
| 1063 | .id_table = sahara_platform_ids, | 1063 | .id_table = sahara_platform_ids, |
| 1064 | }; | 1064 | }; |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 6cd0e6038583..905de4427e7c 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
| @@ -673,39 +673,20 @@ static int aead_setkey(struct crypto_aead *authenc, | |||
| 673 | const u8 *key, unsigned int keylen) | 673 | const u8 *key, unsigned int keylen) |
| 674 | { | 674 | { |
| 675 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); | 675 | struct talitos_ctx *ctx = crypto_aead_ctx(authenc); |
| 676 | struct rtattr *rta = (void *)key; | 676 | struct crypto_authenc_keys keys; |
| 677 | struct crypto_authenc_key_param *param; | ||
| 678 | unsigned int authkeylen; | ||
| 679 | unsigned int enckeylen; | ||
| 680 | |||
| 681 | if (!RTA_OK(rta, keylen)) | ||
| 682 | goto badkey; | ||
| 683 | 677 | ||
| 684 | if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 678 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) |
| 685 | goto badkey; | 679 | goto badkey; |
| 686 | 680 | ||
| 687 | if (RTA_PAYLOAD(rta) < sizeof(*param)) | 681 | if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) |
| 688 | goto badkey; | 682 | goto badkey; |
| 689 | 683 | ||
| 690 | param = RTA_DATA(rta); | 684 | memcpy(ctx->key, keys.authkey, keys.authkeylen); |
| 691 | enckeylen = be32_to_cpu(param->enckeylen); | 685 | memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); |
| 692 | |||
| 693 | key += RTA_ALIGN(rta->rta_len); | ||
| 694 | keylen -= RTA_ALIGN(rta->rta_len); | ||
| 695 | |||
| 696 | if (keylen < enckeylen) | ||
| 697 | goto badkey; | ||
| 698 | 686 | ||
| 699 | authkeylen = keylen - enckeylen; | 687 | ctx->keylen = keys.authkeylen + keys.enckeylen; |
| 700 | 688 | ctx->enckeylen = keys.enckeylen; | |
| 701 | if (keylen > TALITOS_MAX_KEY_SIZE) | 689 | ctx->authkeylen = keys.authkeylen; |
| 702 | goto badkey; | ||
| 703 | |||
| 704 | memcpy(&ctx->key, key, keylen); | ||
| 705 | |||
| 706 | ctx->keylen = keylen; | ||
| 707 | ctx->enckeylen = enckeylen; | ||
| 708 | ctx->authkeylen = authkeylen; | ||
| 709 | 690 | ||
| 710 | return 0; | 691 | return 0; |
| 711 | 692 | ||
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c index fa05e3c329bd..060eecc5dbc3 100644 --- a/drivers/crypto/tegra-aes.c +++ b/drivers/crypto/tegra-aes.c | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 27 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 31 | |||
| 30 | #include <linux/module.h> | 32 | #include <linux/module.h> |
| 31 | #include <linux/init.h> | 33 | #include <linux/init.h> |
| 32 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
| @@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work); | |||
| 199 | static DECLARE_WORK(aes_work, aes_workqueue_handler); | 201 | static DECLARE_WORK(aes_work, aes_workqueue_handler); |
| 200 | static struct workqueue_struct *aes_wq; | 202 | static struct workqueue_struct *aes_wq; |
| 201 | 203 | ||
| 202 | extern unsigned long long tegra_chip_uid(void); | ||
| 203 | |||
| 204 | static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) | 204 | static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) |
| 205 | { | 205 | { |
| 206 | return readl(dd->io_base + offset); | 206 | return readl(dd->io_base + offset); |
| @@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, | |||
| 713 | struct tegra_aes_dev *dd = aes_dev; | 713 | struct tegra_aes_dev *dd = aes_dev; |
| 714 | struct tegra_aes_ctx *ctx = &rng_ctx; | 714 | struct tegra_aes_ctx *ctx = &rng_ctx; |
| 715 | struct tegra_aes_slot *key_slot; | 715 | struct tegra_aes_slot *key_slot; |
| 716 | struct timespec ts; | ||
| 717 | int ret = 0; | 716 | int ret = 0; |
| 718 | u64 nsec, tmp[2]; | 717 | u8 tmp[16]; /* 16 bytes = 128 bits of entropy */ |
| 719 | u8 *dt; | 718 | u8 *dt; |
| 720 | 719 | ||
| 721 | if (!ctx || !dd) { | 720 | if (!ctx || !dd) { |
| 722 | dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n", | 721 | pr_err("ctx=0x%x, dd=0x%x\n", |
| 723 | (unsigned int)ctx, (unsigned int)dd); | 722 | (unsigned int)ctx, (unsigned int)dd); |
| 724 | return -EINVAL; | 723 | return -EINVAL; |
| 725 | } | 724 | } |
| @@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed, | |||
| 778 | if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { | 777 | if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { |
| 779 | dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; | 778 | dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; |
| 780 | } else { | 779 | } else { |
| 781 | getnstimeofday(&ts); | 780 | get_random_bytes(tmp, sizeof(tmp)); |
| 782 | nsec = timespec_to_ns(&ts); | 781 | dt = tmp; |
| 783 | do_div(nsec, 1000); | ||
| 784 | nsec ^= dd->ctr << 56; | ||
| 785 | dd->ctr++; | ||
| 786 | tmp[0] = nsec; | ||
| 787 | tmp[1] = tegra_chip_uid(); | ||
| 788 | dt = (u8 *)tmp; | ||
| 789 | } | 782 | } |
| 790 | memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); | 783 | memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); |
| 791 | 784 | ||
| @@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm) | |||
| 804 | return 0; | 797 | return 0; |
| 805 | } | 798 | } |
| 806 | 799 | ||
| 807 | void tegra_aes_cra_exit(struct crypto_tfm *tfm) | 800 | static void tegra_aes_cra_exit(struct crypto_tfm *tfm) |
| 808 | { | 801 | { |
| 809 | struct tegra_aes_ctx *ctx = | 802 | struct tegra_aes_ctx *ctx = |
| 810 | crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); | 803 | crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); |
| @@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev) | |||
| 924 | } | 917 | } |
| 925 | 918 | ||
| 926 | /* Initialize the vde clock */ | 919 | /* Initialize the vde clock */ |
| 927 | dd->aes_clk = clk_get(dev, "vde"); | 920 | dd->aes_clk = devm_clk_get(dev, "vde"); |
| 928 | if (IS_ERR(dd->aes_clk)) { | 921 | if (IS_ERR(dd->aes_clk)) { |
| 929 | dev_err(dev, "iclock intialization failed.\n"); | 922 | dev_err(dev, "iclock intialization failed.\n"); |
| 930 | err = -ENODEV; | 923 | err = -ENODEV; |
| @@ -1033,8 +1026,6 @@ out: | |||
| 1033 | if (dd->buf_out) | 1026 | if (dd->buf_out) |
| 1034 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | 1027 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, |
| 1035 | dd->buf_out, dd->dma_buf_out); | 1028 | dd->buf_out, dd->dma_buf_out); |
| 1036 | if (!IS_ERR(dd->aes_clk)) | ||
| 1037 | clk_put(dd->aes_clk); | ||
| 1038 | if (aes_wq) | 1029 | if (aes_wq) |
| 1039 | destroy_workqueue(aes_wq); | 1030 | destroy_workqueue(aes_wq); |
| 1040 | spin_lock(&list_lock); | 1031 | spin_lock(&list_lock); |
| @@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev) | |||
| 1068 | dd->buf_in, dd->dma_buf_in); | 1059 | dd->buf_in, dd->dma_buf_in); |
| 1069 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, | 1060 | dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, |
| 1070 | dd->buf_out, dd->dma_buf_out); | 1061 | dd->buf_out, dd->dma_buf_out); |
| 1071 | clk_put(dd->aes_clk); | ||
| 1072 | aes_dev = NULL; | 1062 | aes_dev = NULL; |
| 1073 | 1063 | ||
| 1074 | return 0; | 1064 | return 0; |
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h new file mode 100644 index 000000000000..f57eb7b5c23b --- /dev/null +++ b/include/asm-generic/simd.h | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | |||
| 2 | #include <linux/hardirq.h> | ||
| 3 | |||
| 4 | /* | ||
| 5 | * may_use_simd - whether it is allowable at this time to issue SIMD | ||
| 6 | * instructions or access the SIMD register file | ||
| 7 | * | ||
| 8 | * As architectures typically don't preserve the SIMD register file when | ||
| 9 | * taking an interrupt, !in_interrupt() should be a reasonable default. | ||
| 10 | */ | ||
| 11 | static __must_check inline bool may_use_simd(void) | ||
| 12 | { | ||
| 13 | return !in_interrupt(); | ||
| 14 | } | ||
diff --git a/arch/x86/include/asm/crypto/ablk_helper.h b/include/crypto/ablk_helper.h index 4f93df50c23e..4f93df50c23e 100644 --- a/arch/x86/include/asm/crypto/ablk_helper.h +++ b/include/crypto/ablk_helper.h | |||
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 418d270e1806..e73c19e90e38 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h | |||
| @@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask) | |||
| 386 | return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; | 386 | return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; |
| 387 | } | 387 | } |
| 388 | 388 | ||
| 389 | #endif /* _CRYPTO_ALGAPI_H */ | 389 | noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); |
| 390 | |||
| 391 | /** | ||
| 392 | * crypto_memneq - Compare two areas of memory without leaking | ||
| 393 | * timing information. | ||
| 394 | * | ||
| 395 | * @a: One area of memory | ||
| 396 | * @b: Another area of memory | ||
| 397 | * @size: The size of the area. | ||
| 398 | * | ||
| 399 | * Returns 0 when data is equal, 1 otherwise. | ||
| 400 | */ | ||
| 401 | static inline int crypto_memneq(const void *a, const void *b, size_t size) | ||
| 402 | { | ||
| 403 | return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; | ||
| 404 | } | ||
| 390 | 405 | ||
| 406 | #endif /* _CRYPTO_ALGAPI_H */ | ||
diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h index e47b044929a8..6775059539b5 100644 --- a/include/crypto/authenc.h +++ b/include/crypto/authenc.h | |||
| @@ -23,5 +23,15 @@ struct crypto_authenc_key_param { | |||
| 23 | __be32 enckeylen; | 23 | __be32 enckeylen; |
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | #endif /* _CRYPTO_AUTHENC_H */ | 26 | struct crypto_authenc_keys { |
| 27 | const u8 *authkey; | ||
| 28 | const u8 *enckey; | ||
| 29 | |||
| 30 | unsigned int authkeylen; | ||
| 31 | unsigned int enckeylen; | ||
| 32 | }; | ||
| 27 | 33 | ||
| 34 | int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, | ||
| 35 | unsigned int keylen); | ||
| 36 | |||
| 37 | #endif /* _CRYPTO_AUTHENC_H */ | ||
diff --git a/include/linux/padata.h b/include/linux/padata.h index 86292beebfe2..438694650471 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h | |||
| @@ -129,10 +129,9 @@ struct parallel_data { | |||
| 129 | struct padata_serial_queue __percpu *squeue; | 129 | struct padata_serial_queue __percpu *squeue; |
| 130 | atomic_t reorder_objects; | 130 | atomic_t reorder_objects; |
| 131 | atomic_t refcnt; | 131 | atomic_t refcnt; |
| 132 | atomic_t seq_nr; | ||
| 132 | struct padata_cpumask cpumask; | 133 | struct padata_cpumask cpumask; |
| 133 | spinlock_t lock ____cacheline_aligned; | 134 | spinlock_t lock ____cacheline_aligned; |
| 134 | spinlock_t seq_lock; | ||
| 135 | unsigned int seq_nr; | ||
| 136 | unsigned int processed; | 135 | unsigned int processed; |
| 137 | struct timer_list timer; | 136 | struct timer_list timer; |
| 138 | }; | 137 | }; |
diff --git a/kernel/padata.c b/kernel/padata.c index 07af2c95dcfe..2abd25d79cc8 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
| @@ -46,6 +46,7 @@ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) | |||
| 46 | 46 | ||
| 47 | static int padata_cpu_hash(struct parallel_data *pd) | 47 | static int padata_cpu_hash(struct parallel_data *pd) |
| 48 | { | 48 | { |
| 49 | unsigned int seq_nr; | ||
| 49 | int cpu_index; | 50 | int cpu_index; |
| 50 | 51 | ||
| 51 | /* | 52 | /* |
| @@ -53,10 +54,8 @@ static int padata_cpu_hash(struct parallel_data *pd) | |||
| 53 | * seq_nr mod. number of cpus in use. | 54 | * seq_nr mod. number of cpus in use. |
| 54 | */ | 55 | */ |
| 55 | 56 | ||
| 56 | spin_lock(&pd->seq_lock); | 57 | seq_nr = atomic_inc_return(&pd->seq_nr); |
| 57 | cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); | 58 | cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); |
| 58 | pd->seq_nr++; | ||
| 59 | spin_unlock(&pd->seq_lock); | ||
| 60 | 59 | ||
| 61 | return padata_index_to_cpu(pd, cpu_index); | 60 | return padata_index_to_cpu(pd, cpu_index); |
| 62 | } | 61 | } |
| @@ -429,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | |||
| 429 | padata_init_pqueues(pd); | 428 | padata_init_pqueues(pd); |
| 430 | padata_init_squeues(pd); | 429 | padata_init_squeues(pd); |
| 431 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); | 430 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); |
| 432 | pd->seq_nr = 0; | 431 | atomic_set(&pd->seq_nr, -1); |
| 433 | atomic_set(&pd->reorder_objects, 0); | 432 | atomic_set(&pd->reorder_objects, 0); |
| 434 | atomic_set(&pd->refcnt, 0); | 433 | atomic_set(&pd->refcnt, 0); |
| 435 | pd->pinst = pinst; | 434 | pd->pinst = pinst; |
