diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2016-10-09 23:19:47 -0400 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2016-10-09 23:19:47 -0400 |
commit | c3afafa47898e34eb49828ec4ac92bcdc81c8f0c (patch) | |
tree | 055f603e131de9c4d36dafb1856fe69c737152de | |
parent | f97581cfa6e7db9818520597b8a44f8268d75013 (diff) | |
parent | 80da44c29d997e28c4442825f35f4ac339813877 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Merge the crypto tree to pull in vmx ghash fix.
-rw-r--r-- | arch/arm/crypto/aes-ce-glue.c | 2 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-glue.c | 2 | ||||
-rw-r--r-- | arch/x86/crypto/sha256-mb/sha256_mb.c | 4 | ||||
-rw-r--r-- | arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S | 7 | ||||
-rw-r--r-- | arch/x86/crypto/sha512-mb/sha512_mb.c | 4 | ||||
-rw-r--r-- | crypto/blkcipher.c | 3 | ||||
-rw-r--r-- | crypto/cryptd.c | 12 | ||||
-rw-r--r-- | crypto/echainiv.c | 115 | ||||
-rw-r--r-- | crypto/ghash-generic.c | 13 | ||||
-rw-r--r-- | crypto/rsa-pkcs1pad.c | 41 | ||||
-rw-r--r-- | drivers/char/hw_random/Kconfig | 2 | ||||
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 77 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_algs.c | 4 | ||||
-rw-r--r-- | drivers/crypto/vmx/aes_xts.c | 2 | ||||
-rw-r--r-- | drivers/crypto/vmx/ghash.c | 31 | ||||
-rw-r--r-- | include/crypto/ghash.h | 23 | ||||
-rw-r--r-- | security/keys/encrypted-keys/encrypted.c | 11 |
17 files changed, 157 insertions, 196 deletions
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index da3c0428507b..aef022a87c53 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c | |||
@@ -284,7 +284,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
284 | err = blkcipher_walk_done(desc, &walk, | 284 | err = blkcipher_walk_done(desc, &walk, |
285 | walk.nbytes % AES_BLOCK_SIZE); | 285 | walk.nbytes % AES_BLOCK_SIZE); |
286 | } | 286 | } |
287 | if (nbytes) { | 287 | if (walk.nbytes % AES_BLOCK_SIZE) { |
288 | u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; | 288 | u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; |
289 | u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; | 289 | u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; |
290 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; | 290 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; |
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 5c888049d061..6b2aa0fd6cd0 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c | |||
@@ -216,7 +216,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
216 | err = blkcipher_walk_done(desc, &walk, | 216 | err = blkcipher_walk_done(desc, &walk, |
217 | walk.nbytes % AES_BLOCK_SIZE); | 217 | walk.nbytes % AES_BLOCK_SIZE); |
218 | } | 218 | } |
219 | if (nbytes) { | 219 | if (walk.nbytes % AES_BLOCK_SIZE) { |
220 | u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; | 220 | u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; |
221 | u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; | 221 | u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; |
222 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; | 222 | u8 __aligned(8) tail[AES_BLOCK_SIZE]; |
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c index 89fa85e8b10c..6f97fb33ae21 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb.c +++ b/arch/x86/crypto/sha256-mb/sha256_mb.c | |||
@@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, | |||
485 | 485 | ||
486 | req = cast_mcryptd_ctx_to_req(req_ctx); | 486 | req = cast_mcryptd_ctx_to_req(req_ctx); |
487 | if (irqs_disabled()) | 487 | if (irqs_disabled()) |
488 | rctx->complete(&req->base, ret); | 488 | req_ctx->complete(&req->base, ret); |
489 | else { | 489 | else { |
490 | local_bh_disable(); | 490 | local_bh_disable(); |
491 | rctx->complete(&req->base, ret); | 491 | req_ctx->complete(&req->base, ret); |
492 | local_bh_enable(); | 492 | local_bh_enable(); |
493 | } | 493 | } |
494 | } | 494 | } |
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S index b691da981cd9..a78a0694ddef 100644 --- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S +++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S | |||
@@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2) | |||
265 | vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 | 265 | vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 |
266 | vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 | 266 | vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 |
267 | vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 | 267 | vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 |
268 | movl _args_digest+4*32(state, idx, 4), tmp2_w | 268 | vmovd _args_digest(state , idx, 4) , %xmm0 |
269 | vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 | 269 | vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 |
270 | vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 | 270 | vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 |
271 | vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 | 271 | vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 |
272 | 272 | ||
273 | vmovdqu %xmm0, _result_digest(job_rax) | 273 | vmovdqu %xmm0, _result_digest(job_rax) |
274 | movl tmp2_w, _result_digest+1*16(job_rax) | 274 | offset = (_result_digest + 1*16) |
275 | vmovdqu %xmm1, offset(job_rax) | ||
275 | 276 | ||
276 | pop %rbx | 277 | pop %rbx |
277 | 278 | ||
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c index f4cf5b78fd36..d210174a52b0 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb.c +++ b/arch/x86/crypto/sha512-mb/sha512_mb.c | |||
@@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, | |||
497 | 497 | ||
498 | req = cast_mcryptd_ctx_to_req(req_ctx); | 498 | req = cast_mcryptd_ctx_to_req(req_ctx); |
499 | if (irqs_disabled()) | 499 | if (irqs_disabled()) |
500 | rctx->complete(&req->base, ret); | 500 | req_ctx->complete(&req->base, ret); |
501 | else { | 501 | else { |
502 | local_bh_disable(); | 502 | local_bh_disable(); |
503 | rctx->complete(&req->base, ret); | 503 | req_ctx->complete(&req->base, ret); |
504 | local_bh_enable(); | 504 | local_bh_enable(); |
505 | } | 505 | } |
506 | } | 506 | } |
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 369999530108..a832426820e8 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c | |||
@@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, | |||
233 | return blkcipher_walk_done(desc, walk, -EINVAL); | 233 | return blkcipher_walk_done(desc, walk, -EINVAL); |
234 | } | 234 | } |
235 | 235 | ||
236 | bsize = min(walk->walk_blocksize, n); | ||
237 | |||
236 | walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | | 238 | walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | |
237 | BLKCIPHER_WALK_DIFF); | 239 | BLKCIPHER_WALK_DIFF); |
238 | if (!scatterwalk_aligned(&walk->in, walk->alignmask) || | 240 | if (!scatterwalk_aligned(&walk->in, walk->alignmask) || |
@@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, | |||
245 | } | 247 | } |
246 | } | 248 | } |
247 | 249 | ||
248 | bsize = min(walk->walk_blocksize, n); | ||
249 | n = scatterwalk_clamp(&walk->in, n); | 250 | n = scatterwalk_clamp(&walk->in, n); |
250 | n = scatterwalk_clamp(&walk->out, n); | 251 | n = scatterwalk_clamp(&walk->out, n); |
251 | 252 | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index cf8037a87b2d..0c654e59f215 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -631,9 +631,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out) | |||
631 | 631 | ||
632 | static int cryptd_hash_import(struct ahash_request *req, const void *in) | 632 | static int cryptd_hash_import(struct ahash_request *req, const void *in) |
633 | { | 633 | { |
634 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | 634 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
635 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); | ||
636 | struct shash_desc *desc = cryptd_shash_desc(req); | ||
637 | |||
638 | desc->tfm = ctx->child; | ||
639 | desc->flags = req->base.flags; | ||
635 | 640 | ||
636 | return crypto_shash_import(&rctx->desc, in); | 641 | return crypto_shash_import(desc, in); |
637 | } | 642 | } |
638 | 643 | ||
639 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | 644 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
@@ -733,13 +738,14 @@ static void cryptd_aead_crypt(struct aead_request *req, | |||
733 | rctx = aead_request_ctx(req); | 738 | rctx = aead_request_ctx(req); |
734 | compl = rctx->complete; | 739 | compl = rctx->complete; |
735 | 740 | ||
741 | tfm = crypto_aead_reqtfm(req); | ||
742 | |||
736 | if (unlikely(err == -EINPROGRESS)) | 743 | if (unlikely(err == -EINPROGRESS)) |
737 | goto out; | 744 | goto out; |
738 | aead_request_set_tfm(req, child); | 745 | aead_request_set_tfm(req, child); |
739 | err = crypt( req ); | 746 | err = crypt( req ); |
740 | 747 | ||
741 | out: | 748 | out: |
742 | tfm = crypto_aead_reqtfm(req); | ||
743 | ctx = crypto_aead_ctx(tfm); | 749 | ctx = crypto_aead_ctx(tfm); |
744 | refcnt = atomic_read(&ctx->refcnt); | 750 | refcnt = atomic_read(&ctx->refcnt); |
745 | 751 | ||
diff --git a/crypto/echainiv.c b/crypto/echainiv.c index 1b01fe98e91f..e3d889b122e0 100644 --- a/crypto/echainiv.c +++ b/crypto/echainiv.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * echainiv: Encrypted Chain IV Generator | 2 | * echainiv: Encrypted Chain IV Generator |
3 | * | 3 | * |
4 | * This generator generates an IV based on a sequence number by xoring it | 4 | * This generator generates an IV based on a sequence number by multiplying |
5 | * with a salt and then encrypting it with the same key as used to encrypt | 5 | * it with a salt and then encrypting it with the same key as used to encrypt |
6 | * the plain text. This algorithm requires that the block size be equal | 6 | * the plain text. This algorithm requires that the block size be equal |
7 | * to the IV size. It is mainly useful for CBC. | 7 | * to the IV size. It is mainly useful for CBC. |
8 | * | 8 | * |
@@ -24,81 +24,17 @@ | |||
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> | ||
28 | #include <linux/module.h> | 27 | #include <linux/module.h> |
29 | #include <linux/percpu.h> | 28 | #include <linux/slab.h> |
30 | #include <linux/spinlock.h> | ||
31 | #include <linux/string.h> | 29 | #include <linux/string.h> |
32 | 30 | ||
33 | #define MAX_IV_SIZE 16 | ||
34 | |||
35 | static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); | ||
36 | |||
37 | /* We don't care if we get preempted and read/write IVs from the next CPU. */ | ||
38 | static void echainiv_read_iv(u8 *dst, unsigned size) | ||
39 | { | ||
40 | u32 *a = (u32 *)dst; | ||
41 | u32 __percpu *b = echainiv_iv; | ||
42 | |||
43 | for (; size >= 4; size -= 4) { | ||
44 | *a++ = this_cpu_read(*b); | ||
45 | b++; | ||
46 | } | ||
47 | } | ||
48 | |||
49 | static void echainiv_write_iv(const u8 *src, unsigned size) | ||
50 | { | ||
51 | const u32 *a = (const u32 *)src; | ||
52 | u32 __percpu *b = echainiv_iv; | ||
53 | |||
54 | for (; size >= 4; size -= 4) { | ||
55 | this_cpu_write(*b, *a); | ||
56 | a++; | ||
57 | b++; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | static void echainiv_encrypt_complete2(struct aead_request *req, int err) | ||
62 | { | ||
63 | struct aead_request *subreq = aead_request_ctx(req); | ||
64 | struct crypto_aead *geniv; | ||
65 | unsigned int ivsize; | ||
66 | |||
67 | if (err == -EINPROGRESS) | ||
68 | return; | ||
69 | |||
70 | if (err) | ||
71 | goto out; | ||
72 | |||
73 | geniv = crypto_aead_reqtfm(req); | ||
74 | ivsize = crypto_aead_ivsize(geniv); | ||
75 | |||
76 | echainiv_write_iv(subreq->iv, ivsize); | ||
77 | |||
78 | if (req->iv != subreq->iv) | ||
79 | memcpy(req->iv, subreq->iv, ivsize); | ||
80 | |||
81 | out: | ||
82 | if (req->iv != subreq->iv) | ||
83 | kzfree(subreq->iv); | ||
84 | } | ||
85 | |||
86 | static void echainiv_encrypt_complete(struct crypto_async_request *base, | ||
87 | int err) | ||
88 | { | ||
89 | struct aead_request *req = base->data; | ||
90 | |||
91 | echainiv_encrypt_complete2(req, err); | ||
92 | aead_request_complete(req, err); | ||
93 | } | ||
94 | |||
95 | static int echainiv_encrypt(struct aead_request *req) | 31 | static int echainiv_encrypt(struct aead_request *req) |
96 | { | 32 | { |
97 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | 33 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); |
98 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | 34 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); |
99 | struct aead_request *subreq = aead_request_ctx(req); | 35 | struct aead_request *subreq = aead_request_ctx(req); |
100 | crypto_completion_t compl; | 36 | __be64 nseqno; |
101 | void *data; | 37 | u64 seqno; |
102 | u8 *info; | 38 | u8 *info; |
103 | unsigned int ivsize = crypto_aead_ivsize(geniv); | 39 | unsigned int ivsize = crypto_aead_ivsize(geniv); |
104 | int err; | 40 | int err; |
@@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req) | |||
108 | 44 | ||
109 | aead_request_set_tfm(subreq, ctx->child); | 45 | aead_request_set_tfm(subreq, ctx->child); |
110 | 46 | ||
111 | compl = echainiv_encrypt_complete; | ||
112 | data = req; | ||
113 | info = req->iv; | 47 | info = req->iv; |
114 | 48 | ||
115 | if (req->src != req->dst) { | 49 | if (req->src != req->dst) { |
@@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req) | |||
127 | return err; | 61 | return err; |
128 | } | 62 | } |
129 | 63 | ||
130 | if (unlikely(!IS_ALIGNED((unsigned long)info, | 64 | aead_request_set_callback(subreq, req->base.flags, |
131 | crypto_aead_alignmask(geniv) + 1))) { | 65 | req->base.complete, req->base.data); |
132 | info = kmalloc(ivsize, req->base.flags & | ||
133 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | ||
134 | GFP_ATOMIC); | ||
135 | if (!info) | ||
136 | return -ENOMEM; | ||
137 | |||
138 | memcpy(info, req->iv, ivsize); | ||
139 | } | ||
140 | |||
141 | aead_request_set_callback(subreq, req->base.flags, compl, data); | ||
142 | aead_request_set_crypt(subreq, req->dst, req->dst, | 66 | aead_request_set_crypt(subreq, req->dst, req->dst, |
143 | req->cryptlen, info); | 67 | req->cryptlen, info); |
144 | aead_request_set_ad(subreq, req->assoclen); | 68 | aead_request_set_ad(subreq, req->assoclen); |
145 | 69 | ||
146 | crypto_xor(info, ctx->salt, ivsize); | 70 | memcpy(&nseqno, info + ivsize - 8, 8); |
71 | seqno = be64_to_cpu(nseqno); | ||
72 | memset(info, 0, ivsize); | ||
73 | |||
147 | scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); | 74 | scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); |
148 | echainiv_read_iv(info, ivsize); | ||
149 | 75 | ||
150 | err = crypto_aead_encrypt(subreq); | 76 | do { |
151 | echainiv_encrypt_complete2(req, err); | 77 | u64 a; |
152 | return err; | 78 | |
79 | memcpy(&a, ctx->salt + ivsize - 8, 8); | ||
80 | |||
81 | a |= 1; | ||
82 | a *= seqno; | ||
83 | |||
84 | memcpy(info + ivsize - 8, &a, 8); | ||
85 | } while ((ivsize -= 8)); | ||
86 | |||
87 | return crypto_aead_encrypt(subreq); | ||
153 | } | 88 | } |
154 | 89 | ||
155 | static int echainiv_decrypt(struct aead_request *req) | 90 | static int echainiv_decrypt(struct aead_request *req) |
@@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl, | |||
196 | alg = crypto_spawn_aead_alg(spawn); | 131 | alg = crypto_spawn_aead_alg(spawn); |
197 | 132 | ||
198 | err = -EINVAL; | 133 | err = -EINVAL; |
199 | if (inst->alg.ivsize & (sizeof(u32) - 1) || | 134 | if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize) |
200 | inst->alg.ivsize > MAX_IV_SIZE) | ||
201 | goto free_inst; | 135 | goto free_inst; |
202 | 136 | ||
203 | inst->alg.encrypt = echainiv_encrypt; | 137 | inst->alg.encrypt = echainiv_encrypt; |
@@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl, | |||
206 | inst->alg.init = aead_init_geniv; | 140 | inst->alg.init = aead_init_geniv; |
207 | inst->alg.exit = aead_exit_geniv; | 141 | inst->alg.exit = aead_exit_geniv; |
208 | 142 | ||
209 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; | ||
210 | inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); | 143 | inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); |
211 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; | 144 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; |
212 | 145 | ||
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index bac70995e064..12ad3e3a84e3 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c | |||
@@ -14,24 +14,13 @@ | |||
14 | 14 | ||
15 | #include <crypto/algapi.h> | 15 | #include <crypto/algapi.h> |
16 | #include <crypto/gf128mul.h> | 16 | #include <crypto/gf128mul.h> |
17 | #include <crypto/ghash.h> | ||
17 | #include <crypto/internal/hash.h> | 18 | #include <crypto/internal/hash.h> |
18 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> |
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
22 | 23 | ||
23 | #define GHASH_BLOCK_SIZE 16 | ||
24 | #define GHASH_DIGEST_SIZE 16 | ||
25 | |||
26 | struct ghash_ctx { | ||
27 | struct gf128mul_4k *gf128; | ||
28 | }; | ||
29 | |||
30 | struct ghash_desc_ctx { | ||
31 | u8 buffer[GHASH_BLOCK_SIZE]; | ||
32 | u32 bytes; | ||
33 | }; | ||
34 | |||
35 | static int ghash_init(struct shash_desc *desc) | 24 | static int ghash_init(struct shash_desc *desc) |
36 | { | 25 | { |
37 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); | 26 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 877019a6d3ea..8baab4307f7b 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c | |||
@@ -298,41 +298,48 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err) | |||
298 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | 298 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); |
299 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); | 299 | struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); |
300 | struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); | 300 | struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); |
301 | unsigned int dst_len; | ||
301 | unsigned int pos; | 302 | unsigned int pos; |
302 | 303 | u8 *out_buf; | |
303 | if (err == -EOVERFLOW) | ||
304 | /* Decrypted value had no leading 0 byte */ | ||
305 | err = -EINVAL; | ||
306 | 304 | ||
307 | if (err) | 305 | if (err) |
308 | goto done; | 306 | goto done; |
309 | 307 | ||
310 | if (req_ctx->child_req.dst_len != ctx->key_size - 1) { | 308 | err = -EINVAL; |
311 | err = -EINVAL; | 309 | dst_len = req_ctx->child_req.dst_len; |
310 | if (dst_len < ctx->key_size - 1) | ||
312 | goto done; | 311 | goto done; |
312 | |||
313 | out_buf = req_ctx->out_buf; | ||
314 | if (dst_len == ctx->key_size) { | ||
315 | if (out_buf[0] != 0x00) | ||
316 | /* Decrypted value had no leading 0 byte */ | ||
317 | goto done; | ||
318 | |||
319 | dst_len--; | ||
320 | out_buf++; | ||
313 | } | 321 | } |
314 | 322 | ||
315 | if (req_ctx->out_buf[0] != 0x02) { | 323 | if (out_buf[0] != 0x02) |
316 | err = -EINVAL; | ||
317 | goto done; | 324 | goto done; |
318 | } | 325 | |
319 | for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) | 326 | for (pos = 1; pos < dst_len; pos++) |
320 | if (req_ctx->out_buf[pos] == 0x00) | 327 | if (out_buf[pos] == 0x00) |
321 | break; | 328 | break; |
322 | if (pos < 9 || pos == req_ctx->child_req.dst_len) { | 329 | if (pos < 9 || pos == dst_len) |
323 | err = -EINVAL; | ||
324 | goto done; | 330 | goto done; |
325 | } | ||
326 | pos++; | 331 | pos++; |
327 | 332 | ||
328 | if (req->dst_len < req_ctx->child_req.dst_len - pos) | 333 | err = 0; |
334 | |||
335 | if (req->dst_len < dst_len - pos) | ||
329 | err = -EOVERFLOW; | 336 | err = -EOVERFLOW; |
330 | req->dst_len = req_ctx->child_req.dst_len - pos; | 337 | req->dst_len = dst_len - pos; |
331 | 338 | ||
332 | if (!err) | 339 | if (!err) |
333 | sg_copy_from_buffer(req->dst, | 340 | sg_copy_from_buffer(req->dst, |
334 | sg_nents_for_len(req->dst, req->dst_len), | 341 | sg_nents_for_len(req->dst, req->dst_len), |
335 | req_ctx->out_buf + pos, req->dst_len); | 342 | out_buf + pos, req->dst_len); |
336 | 343 | ||
337 | done: | 344 | done: |
338 | kzfree(req_ctx->out_buf); | 345 | kzfree(req_ctx->out_buf); |
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index fb9c7adfdb3a..200dab5136a7 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig | |||
@@ -244,7 +244,7 @@ config HW_RANDOM_TX4939 | |||
244 | 244 | ||
245 | config HW_RANDOM_MXC_RNGA | 245 | config HW_RANDOM_MXC_RNGA |
246 | tristate "Freescale i.MX RNGA Random Number Generator" | 246 | tristate "Freescale i.MX RNGA Random Number Generator" |
247 | depends on ARCH_HAS_RNGA | 247 | depends on SOC_IMX31 |
248 | default HW_RANDOM | 248 | default HW_RANDOM |
249 | ---help--- | 249 | ---help--- |
250 | This driver provides kernel-side support for the Random Number | 250 | This driver provides kernel-side support for the Random Number |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index eb97562414d2..156aad167cd6 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -592,7 +592,10 @@ skip_enc: | |||
592 | 592 | ||
593 | /* Read and write assoclen bytes */ | 593 | /* Read and write assoclen bytes */ |
594 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 594 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
595 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 595 | if (alg->caam.geniv) |
596 | append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); | ||
597 | else | ||
598 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | ||
596 | 599 | ||
597 | /* Skip assoc data */ | 600 | /* Skip assoc data */ |
598 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | 601 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
@@ -601,6 +604,14 @@ skip_enc: | |||
601 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | | 604 | append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | |
602 | KEY_VLF); | 605 | KEY_VLF); |
603 | 606 | ||
607 | if (alg->caam.geniv) { | ||
608 | append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | | ||
609 | LDST_SRCDST_BYTE_CONTEXT | | ||
610 | (ctx1_iv_off << LDST_OFFSET_SHIFT)); | ||
611 | append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | | ||
612 | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize); | ||
613 | } | ||
614 | |||
604 | /* Load Counter into CONTEXT1 reg */ | 615 | /* Load Counter into CONTEXT1 reg */ |
605 | if (is_rfc3686) | 616 | if (is_rfc3686) |
606 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | | 617 | append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | |
@@ -2184,7 +2195,7 @@ static void init_authenc_job(struct aead_request *req, | |||
2184 | 2195 | ||
2185 | init_aead_job(req, edesc, all_contig, encrypt); | 2196 | init_aead_job(req, edesc, all_contig, encrypt); |
2186 | 2197 | ||
2187 | if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt))) | 2198 | if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) |
2188 | append_load_as_imm(desc, req->iv, ivsize, | 2199 | append_load_as_imm(desc, req->iv, ivsize, |
2189 | LDST_CLASS_1_CCB | | 2200 | LDST_CLASS_1_CCB | |
2190 | LDST_SRCDST_BYTE_CONTEXT | | 2201 | LDST_SRCDST_BYTE_CONTEXT | |
@@ -2578,20 +2589,6 @@ static int aead_decrypt(struct aead_request *req) | |||
2578 | return ret; | 2589 | return ret; |
2579 | } | 2590 | } |
2580 | 2591 | ||
2581 | static int aead_givdecrypt(struct aead_request *req) | ||
2582 | { | ||
2583 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | ||
2584 | unsigned int ivsize = crypto_aead_ivsize(aead); | ||
2585 | |||
2586 | if (req->cryptlen < ivsize) | ||
2587 | return -EINVAL; | ||
2588 | |||
2589 | req->cryptlen -= ivsize; | ||
2590 | req->assoclen += ivsize; | ||
2591 | |||
2592 | return aead_decrypt(req); | ||
2593 | } | ||
2594 | |||
2595 | /* | 2592 | /* |
2596 | * allocate and map the ablkcipher extended descriptor for ablkcipher | 2593 | * allocate and map the ablkcipher extended descriptor for ablkcipher |
2597 | */ | 2594 | */ |
@@ -3251,7 +3248,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3251 | .setkey = aead_setkey, | 3248 | .setkey = aead_setkey, |
3252 | .setauthsize = aead_setauthsize, | 3249 | .setauthsize = aead_setauthsize, |
3253 | .encrypt = aead_encrypt, | 3250 | .encrypt = aead_encrypt, |
3254 | .decrypt = aead_givdecrypt, | 3251 | .decrypt = aead_decrypt, |
3255 | .ivsize = AES_BLOCK_SIZE, | 3252 | .ivsize = AES_BLOCK_SIZE, |
3256 | .maxauthsize = MD5_DIGEST_SIZE, | 3253 | .maxauthsize = MD5_DIGEST_SIZE, |
3257 | }, | 3254 | }, |
@@ -3297,7 +3294,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3297 | .setkey = aead_setkey, | 3294 | .setkey = aead_setkey, |
3298 | .setauthsize = aead_setauthsize, | 3295 | .setauthsize = aead_setauthsize, |
3299 | .encrypt = aead_encrypt, | 3296 | .encrypt = aead_encrypt, |
3300 | .decrypt = aead_givdecrypt, | 3297 | .decrypt = aead_decrypt, |
3301 | .ivsize = AES_BLOCK_SIZE, | 3298 | .ivsize = AES_BLOCK_SIZE, |
3302 | .maxauthsize = SHA1_DIGEST_SIZE, | 3299 | .maxauthsize = SHA1_DIGEST_SIZE, |
3303 | }, | 3300 | }, |
@@ -3343,7 +3340,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3343 | .setkey = aead_setkey, | 3340 | .setkey = aead_setkey, |
3344 | .setauthsize = aead_setauthsize, | 3341 | .setauthsize = aead_setauthsize, |
3345 | .encrypt = aead_encrypt, | 3342 | .encrypt = aead_encrypt, |
3346 | .decrypt = aead_givdecrypt, | 3343 | .decrypt = aead_decrypt, |
3347 | .ivsize = AES_BLOCK_SIZE, | 3344 | .ivsize = AES_BLOCK_SIZE, |
3348 | .maxauthsize = SHA224_DIGEST_SIZE, | 3345 | .maxauthsize = SHA224_DIGEST_SIZE, |
3349 | }, | 3346 | }, |
@@ -3389,7 +3386,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3389 | .setkey = aead_setkey, | 3386 | .setkey = aead_setkey, |
3390 | .setauthsize = aead_setauthsize, | 3387 | .setauthsize = aead_setauthsize, |
3391 | .encrypt = aead_encrypt, | 3388 | .encrypt = aead_encrypt, |
3392 | .decrypt = aead_givdecrypt, | 3389 | .decrypt = aead_decrypt, |
3393 | .ivsize = AES_BLOCK_SIZE, | 3390 | .ivsize = AES_BLOCK_SIZE, |
3394 | .maxauthsize = SHA256_DIGEST_SIZE, | 3391 | .maxauthsize = SHA256_DIGEST_SIZE, |
3395 | }, | 3392 | }, |
@@ -3435,7 +3432,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3435 | .setkey = aead_setkey, | 3432 | .setkey = aead_setkey, |
3436 | .setauthsize = aead_setauthsize, | 3433 | .setauthsize = aead_setauthsize, |
3437 | .encrypt = aead_encrypt, | 3434 | .encrypt = aead_encrypt, |
3438 | .decrypt = aead_givdecrypt, | 3435 | .decrypt = aead_decrypt, |
3439 | .ivsize = AES_BLOCK_SIZE, | 3436 | .ivsize = AES_BLOCK_SIZE, |
3440 | .maxauthsize = SHA384_DIGEST_SIZE, | 3437 | .maxauthsize = SHA384_DIGEST_SIZE, |
3441 | }, | 3438 | }, |
@@ -3481,7 +3478,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3481 | .setkey = aead_setkey, | 3478 | .setkey = aead_setkey, |
3482 | .setauthsize = aead_setauthsize, | 3479 | .setauthsize = aead_setauthsize, |
3483 | .encrypt = aead_encrypt, | 3480 | .encrypt = aead_encrypt, |
3484 | .decrypt = aead_givdecrypt, | 3481 | .decrypt = aead_decrypt, |
3485 | .ivsize = AES_BLOCK_SIZE, | 3482 | .ivsize = AES_BLOCK_SIZE, |
3486 | .maxauthsize = SHA512_DIGEST_SIZE, | 3483 | .maxauthsize = SHA512_DIGEST_SIZE, |
3487 | }, | 3484 | }, |
@@ -3527,7 +3524,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3527 | .setkey = aead_setkey, | 3524 | .setkey = aead_setkey, |
3528 | .setauthsize = aead_setauthsize, | 3525 | .setauthsize = aead_setauthsize, |
3529 | .encrypt = aead_encrypt, | 3526 | .encrypt = aead_encrypt, |
3530 | .decrypt = aead_givdecrypt, | 3527 | .decrypt = aead_decrypt, |
3531 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3528 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3532 | .maxauthsize = MD5_DIGEST_SIZE, | 3529 | .maxauthsize = MD5_DIGEST_SIZE, |
3533 | }, | 3530 | }, |
@@ -3575,7 +3572,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3575 | .setkey = aead_setkey, | 3572 | .setkey = aead_setkey, |
3576 | .setauthsize = aead_setauthsize, | 3573 | .setauthsize = aead_setauthsize, |
3577 | .encrypt = aead_encrypt, | 3574 | .encrypt = aead_encrypt, |
3578 | .decrypt = aead_givdecrypt, | 3575 | .decrypt = aead_decrypt, |
3579 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3576 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3580 | .maxauthsize = SHA1_DIGEST_SIZE, | 3577 | .maxauthsize = SHA1_DIGEST_SIZE, |
3581 | }, | 3578 | }, |
@@ -3623,7 +3620,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3623 | .setkey = aead_setkey, | 3620 | .setkey = aead_setkey, |
3624 | .setauthsize = aead_setauthsize, | 3621 | .setauthsize = aead_setauthsize, |
3625 | .encrypt = aead_encrypt, | 3622 | .encrypt = aead_encrypt, |
3626 | .decrypt = aead_givdecrypt, | 3623 | .decrypt = aead_decrypt, |
3627 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3624 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3628 | .maxauthsize = SHA224_DIGEST_SIZE, | 3625 | .maxauthsize = SHA224_DIGEST_SIZE, |
3629 | }, | 3626 | }, |
@@ -3671,7 +3668,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3671 | .setkey = aead_setkey, | 3668 | .setkey = aead_setkey, |
3672 | .setauthsize = aead_setauthsize, | 3669 | .setauthsize = aead_setauthsize, |
3673 | .encrypt = aead_encrypt, | 3670 | .encrypt = aead_encrypt, |
3674 | .decrypt = aead_givdecrypt, | 3671 | .decrypt = aead_decrypt, |
3675 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3672 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3676 | .maxauthsize = SHA256_DIGEST_SIZE, | 3673 | .maxauthsize = SHA256_DIGEST_SIZE, |
3677 | }, | 3674 | }, |
@@ -3719,7 +3716,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3719 | .setkey = aead_setkey, | 3716 | .setkey = aead_setkey, |
3720 | .setauthsize = aead_setauthsize, | 3717 | .setauthsize = aead_setauthsize, |
3721 | .encrypt = aead_encrypt, | 3718 | .encrypt = aead_encrypt, |
3722 | .decrypt = aead_givdecrypt, | 3719 | .decrypt = aead_decrypt, |
3723 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3720 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3724 | .maxauthsize = SHA384_DIGEST_SIZE, | 3721 | .maxauthsize = SHA384_DIGEST_SIZE, |
3725 | }, | 3722 | }, |
@@ -3767,7 +3764,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3767 | .setkey = aead_setkey, | 3764 | .setkey = aead_setkey, |
3768 | .setauthsize = aead_setauthsize, | 3765 | .setauthsize = aead_setauthsize, |
3769 | .encrypt = aead_encrypt, | 3766 | .encrypt = aead_encrypt, |
3770 | .decrypt = aead_givdecrypt, | 3767 | .decrypt = aead_decrypt, |
3771 | .ivsize = DES3_EDE_BLOCK_SIZE, | 3768 | .ivsize = DES3_EDE_BLOCK_SIZE, |
3772 | .maxauthsize = SHA512_DIGEST_SIZE, | 3769 | .maxauthsize = SHA512_DIGEST_SIZE, |
3773 | }, | 3770 | }, |
@@ -3813,7 +3810,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3813 | .setkey = aead_setkey, | 3810 | .setkey = aead_setkey, |
3814 | .setauthsize = aead_setauthsize, | 3811 | .setauthsize = aead_setauthsize, |
3815 | .encrypt = aead_encrypt, | 3812 | .encrypt = aead_encrypt, |
3816 | .decrypt = aead_givdecrypt, | 3813 | .decrypt = aead_decrypt, |
3817 | .ivsize = DES_BLOCK_SIZE, | 3814 | .ivsize = DES_BLOCK_SIZE, |
3818 | .maxauthsize = MD5_DIGEST_SIZE, | 3815 | .maxauthsize = MD5_DIGEST_SIZE, |
3819 | }, | 3816 | }, |
@@ -3859,7 +3856,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3859 | .setkey = aead_setkey, | 3856 | .setkey = aead_setkey, |
3860 | .setauthsize = aead_setauthsize, | 3857 | .setauthsize = aead_setauthsize, |
3861 | .encrypt = aead_encrypt, | 3858 | .encrypt = aead_encrypt, |
3862 | .decrypt = aead_givdecrypt, | 3859 | .decrypt = aead_decrypt, |
3863 | .ivsize = DES_BLOCK_SIZE, | 3860 | .ivsize = DES_BLOCK_SIZE, |
3864 | .maxauthsize = SHA1_DIGEST_SIZE, | 3861 | .maxauthsize = SHA1_DIGEST_SIZE, |
3865 | }, | 3862 | }, |
@@ -3905,7 +3902,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3905 | .setkey = aead_setkey, | 3902 | .setkey = aead_setkey, |
3906 | .setauthsize = aead_setauthsize, | 3903 | .setauthsize = aead_setauthsize, |
3907 | .encrypt = aead_encrypt, | 3904 | .encrypt = aead_encrypt, |
3908 | .decrypt = aead_givdecrypt, | 3905 | .decrypt = aead_decrypt, |
3909 | .ivsize = DES_BLOCK_SIZE, | 3906 | .ivsize = DES_BLOCK_SIZE, |
3910 | .maxauthsize = SHA224_DIGEST_SIZE, | 3907 | .maxauthsize = SHA224_DIGEST_SIZE, |
3911 | }, | 3908 | }, |
@@ -3951,7 +3948,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3951 | .setkey = aead_setkey, | 3948 | .setkey = aead_setkey, |
3952 | .setauthsize = aead_setauthsize, | 3949 | .setauthsize = aead_setauthsize, |
3953 | .encrypt = aead_encrypt, | 3950 | .encrypt = aead_encrypt, |
3954 | .decrypt = aead_givdecrypt, | 3951 | .decrypt = aead_decrypt, |
3955 | .ivsize = DES_BLOCK_SIZE, | 3952 | .ivsize = DES_BLOCK_SIZE, |
3956 | .maxauthsize = SHA256_DIGEST_SIZE, | 3953 | .maxauthsize = SHA256_DIGEST_SIZE, |
3957 | }, | 3954 | }, |
@@ -3997,7 +3994,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
3997 | .setkey = aead_setkey, | 3994 | .setkey = aead_setkey, |
3998 | .setauthsize = aead_setauthsize, | 3995 | .setauthsize = aead_setauthsize, |
3999 | .encrypt = aead_encrypt, | 3996 | .encrypt = aead_encrypt, |
4000 | .decrypt = aead_givdecrypt, | 3997 | .decrypt = aead_decrypt, |
4001 | .ivsize = DES_BLOCK_SIZE, | 3998 | .ivsize = DES_BLOCK_SIZE, |
4002 | .maxauthsize = SHA384_DIGEST_SIZE, | 3999 | .maxauthsize = SHA384_DIGEST_SIZE, |
4003 | }, | 4000 | }, |
@@ -4043,7 +4040,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4043 | .setkey = aead_setkey, | 4040 | .setkey = aead_setkey, |
4044 | .setauthsize = aead_setauthsize, | 4041 | .setauthsize = aead_setauthsize, |
4045 | .encrypt = aead_encrypt, | 4042 | .encrypt = aead_encrypt, |
4046 | .decrypt = aead_givdecrypt, | 4043 | .decrypt = aead_decrypt, |
4047 | .ivsize = DES_BLOCK_SIZE, | 4044 | .ivsize = DES_BLOCK_SIZE, |
4048 | .maxauthsize = SHA512_DIGEST_SIZE, | 4045 | .maxauthsize = SHA512_DIGEST_SIZE, |
4049 | }, | 4046 | }, |
@@ -4092,7 +4089,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4092 | .setkey = aead_setkey, | 4089 | .setkey = aead_setkey, |
4093 | .setauthsize = aead_setauthsize, | 4090 | .setauthsize = aead_setauthsize, |
4094 | .encrypt = aead_encrypt, | 4091 | .encrypt = aead_encrypt, |
4095 | .decrypt = aead_givdecrypt, | 4092 | .decrypt = aead_decrypt, |
4096 | .ivsize = CTR_RFC3686_IV_SIZE, | 4093 | .ivsize = CTR_RFC3686_IV_SIZE, |
4097 | .maxauthsize = MD5_DIGEST_SIZE, | 4094 | .maxauthsize = MD5_DIGEST_SIZE, |
4098 | }, | 4095 | }, |
@@ -4143,7 +4140,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4143 | .setkey = aead_setkey, | 4140 | .setkey = aead_setkey, |
4144 | .setauthsize = aead_setauthsize, | 4141 | .setauthsize = aead_setauthsize, |
4145 | .encrypt = aead_encrypt, | 4142 | .encrypt = aead_encrypt, |
4146 | .decrypt = aead_givdecrypt, | 4143 | .decrypt = aead_decrypt, |
4147 | .ivsize = CTR_RFC3686_IV_SIZE, | 4144 | .ivsize = CTR_RFC3686_IV_SIZE, |
4148 | .maxauthsize = SHA1_DIGEST_SIZE, | 4145 | .maxauthsize = SHA1_DIGEST_SIZE, |
4149 | }, | 4146 | }, |
@@ -4194,7 +4191,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4194 | .setkey = aead_setkey, | 4191 | .setkey = aead_setkey, |
4195 | .setauthsize = aead_setauthsize, | 4192 | .setauthsize = aead_setauthsize, |
4196 | .encrypt = aead_encrypt, | 4193 | .encrypt = aead_encrypt, |
4197 | .decrypt = aead_givdecrypt, | 4194 | .decrypt = aead_decrypt, |
4198 | .ivsize = CTR_RFC3686_IV_SIZE, | 4195 | .ivsize = CTR_RFC3686_IV_SIZE, |
4199 | .maxauthsize = SHA224_DIGEST_SIZE, | 4196 | .maxauthsize = SHA224_DIGEST_SIZE, |
4200 | }, | 4197 | }, |
@@ -4245,7 +4242,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4245 | .setkey = aead_setkey, | 4242 | .setkey = aead_setkey, |
4246 | .setauthsize = aead_setauthsize, | 4243 | .setauthsize = aead_setauthsize, |
4247 | .encrypt = aead_encrypt, | 4244 | .encrypt = aead_encrypt, |
4248 | .decrypt = aead_givdecrypt, | 4245 | .decrypt = aead_decrypt, |
4249 | .ivsize = CTR_RFC3686_IV_SIZE, | 4246 | .ivsize = CTR_RFC3686_IV_SIZE, |
4250 | .maxauthsize = SHA256_DIGEST_SIZE, | 4247 | .maxauthsize = SHA256_DIGEST_SIZE, |
4251 | }, | 4248 | }, |
@@ -4296,7 +4293,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4296 | .setkey = aead_setkey, | 4293 | .setkey = aead_setkey, |
4297 | .setauthsize = aead_setauthsize, | 4294 | .setauthsize = aead_setauthsize, |
4298 | .encrypt = aead_encrypt, | 4295 | .encrypt = aead_encrypt, |
4299 | .decrypt = aead_givdecrypt, | 4296 | .decrypt = aead_decrypt, |
4300 | .ivsize = CTR_RFC3686_IV_SIZE, | 4297 | .ivsize = CTR_RFC3686_IV_SIZE, |
4301 | .maxauthsize = SHA384_DIGEST_SIZE, | 4298 | .maxauthsize = SHA384_DIGEST_SIZE, |
4302 | }, | 4299 | }, |
@@ -4347,7 +4344,7 @@ static struct caam_aead_alg driver_aeads[] = { | |||
4347 | .setkey = aead_setkey, | 4344 | .setkey = aead_setkey, |
4348 | .setauthsize = aead_setauthsize, | 4345 | .setauthsize = aead_setauthsize, |
4349 | .encrypt = aead_encrypt, | 4346 | .encrypt = aead_encrypt, |
4350 | .decrypt = aead_givdecrypt, | 4347 | .decrypt = aead_decrypt, |
4351 | .ivsize = CTR_RFC3686_IV_SIZE, | 4348 | .ivsize = CTR_RFC3686_IV_SIZE, |
4352 | .maxauthsize = SHA512_DIGEST_SIZE, | 4349 | .maxauthsize = SHA512_DIGEST_SIZE, |
4353 | }, | 4350 | }, |
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 769148dbaeb3..20f35df8a01f 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c | |||
@@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { { | |||
1260 | .setkey = qat_alg_ablkcipher_xts_setkey, | 1260 | .setkey = qat_alg_ablkcipher_xts_setkey, |
1261 | .decrypt = qat_alg_ablkcipher_decrypt, | 1261 | .decrypt = qat_alg_ablkcipher_decrypt, |
1262 | .encrypt = qat_alg_ablkcipher_encrypt, | 1262 | .encrypt = qat_alg_ablkcipher_encrypt, |
1263 | .min_keysize = AES_MIN_KEY_SIZE, | 1263 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
1264 | .max_keysize = AES_MAX_KEY_SIZE, | 1264 | .max_keysize = 2 * AES_MAX_KEY_SIZE, |
1265 | .ivsize = AES_BLOCK_SIZE, | 1265 | .ivsize = AES_BLOCK_SIZE, |
1266 | }, | 1266 | }, |
1267 | }, | 1267 | }, |
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index cfb25413917c..24353ec336c5 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c | |||
@@ -129,8 +129,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, | |||
129 | 129 | ||
130 | blkcipher_walk_init(&walk, dst, src, nbytes); | 130 | blkcipher_walk_init(&walk, dst, src, nbytes); |
131 | 131 | ||
132 | iv = (u8 *)walk.iv; | ||
133 | ret = blkcipher_walk_virt(desc, &walk); | 132 | ret = blkcipher_walk_virt(desc, &walk); |
133 | iv = walk.iv; | ||
134 | memset(tweak, 0, AES_BLOCK_SIZE); | 134 | memset(tweak, 0, AES_BLOCK_SIZE); |
135 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); | 135 | aes_p8_encrypt(iv, tweak, &ctx->tweak_key); |
136 | 136 | ||
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 6c999cb01b80..27a94a119009 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c | |||
@@ -26,16 +26,13 @@ | |||
26 | #include <linux/hardirq.h> | 26 | #include <linux/hardirq.h> |
27 | #include <asm/switch_to.h> | 27 | #include <asm/switch_to.h> |
28 | #include <crypto/aes.h> | 28 | #include <crypto/aes.h> |
29 | #include <crypto/ghash.h> | ||
29 | #include <crypto/scatterwalk.h> | 30 | #include <crypto/scatterwalk.h> |
30 | #include <crypto/internal/hash.h> | 31 | #include <crypto/internal/hash.h> |
31 | #include <crypto/b128ops.h> | 32 | #include <crypto/b128ops.h> |
32 | 33 | ||
33 | #define IN_INTERRUPT in_interrupt() | 34 | #define IN_INTERRUPT in_interrupt() |
34 | 35 | ||
35 | #define GHASH_BLOCK_SIZE (16) | ||
36 | #define GHASH_DIGEST_SIZE (16) | ||
37 | #define GHASH_KEY_LEN (16) | ||
38 | |||
39 | void gcm_init_p8(u128 htable[16], const u64 Xi[2]); | 36 | void gcm_init_p8(u128 htable[16], const u64 Xi[2]); |
40 | void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); | 37 | void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); |
41 | void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], | 38 | void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], |
@@ -55,16 +52,11 @@ struct p8_ghash_desc_ctx { | |||
55 | 52 | ||
56 | static int p8_ghash_init_tfm(struct crypto_tfm *tfm) | 53 | static int p8_ghash_init_tfm(struct crypto_tfm *tfm) |
57 | { | 54 | { |
58 | const char *alg; | 55 | const char *alg = "ghash-generic"; |
59 | struct crypto_shash *fallback; | 56 | struct crypto_shash *fallback; |
60 | struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); | 57 | struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); |
61 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); | 58 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); |
62 | 59 | ||
63 | if (!(alg = crypto_tfm_alg_name(tfm))) { | ||
64 | printk(KERN_ERR "Failed to get algorithm name.\n"); | ||
65 | return -ENOENT; | ||
66 | } | ||
67 | |||
68 | fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); | 60 | fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); |
69 | if (IS_ERR(fallback)) { | 61 | if (IS_ERR(fallback)) { |
70 | printk(KERN_ERR | 62 | printk(KERN_ERR |
@@ -78,10 +70,18 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm) | |||
78 | crypto_shash_set_flags(fallback, | 70 | crypto_shash_set_flags(fallback, |
79 | crypto_shash_get_flags((struct crypto_shash | 71 | crypto_shash_get_flags((struct crypto_shash |
80 | *) tfm)); | 72 | *) tfm)); |
81 | ctx->fallback = fallback; | ||
82 | 73 | ||
83 | shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) | 74 | /* Check if the descsize defined in the algorithm is still enough. */ |
84 | + crypto_shash_descsize(fallback); | 75 | if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) |
76 | + crypto_shash_descsize(fallback)) { | ||
77 | printk(KERN_ERR | ||
78 | "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", | ||
79 | alg, | ||
80 | shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), | ||
81 | crypto_shash_descsize(fallback)); | ||
82 | return -EINVAL; | ||
83 | } | ||
84 | ctx->fallback = fallback; | ||
85 | 85 | ||
86 | return 0; | 86 | return 0; |
87 | } | 87 | } |
@@ -113,7 +113,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, | |||
113 | { | 113 | { |
114 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); | 114 | struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); |
115 | 115 | ||
116 | if (keylen != GHASH_KEY_LEN) | 116 | if (keylen != GHASH_BLOCK_SIZE) |
117 | return -EINVAL; | 117 | return -EINVAL; |
118 | 118 | ||
119 | preempt_disable(); | 119 | preempt_disable(); |
@@ -211,7 +211,8 @@ struct shash_alg p8_ghash_alg = { | |||
211 | .update = p8_ghash_update, | 211 | .update = p8_ghash_update, |
212 | .final = p8_ghash_final, | 212 | .final = p8_ghash_final, |
213 | .setkey = p8_ghash_setkey, | 213 | .setkey = p8_ghash_setkey, |
214 | .descsize = sizeof(struct p8_ghash_desc_ctx), | 214 | .descsize = sizeof(struct p8_ghash_desc_ctx) |
215 | + sizeof(struct ghash_desc_ctx), | ||
215 | .base = { | 216 | .base = { |
216 | .cra_name = "ghash", | 217 | .cra_name = "ghash", |
217 | .cra_driver_name = "p8_ghash", | 218 | .cra_driver_name = "p8_ghash", |
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h new file mode 100644 index 000000000000..2a61c9bbab8f --- /dev/null +++ b/include/crypto/ghash.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Common values for GHASH algorithms | ||
3 | */ | ||
4 | |||
5 | #ifndef __CRYPTO_GHASH_H__ | ||
6 | #define __CRYPTO_GHASH_H__ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <crypto/gf128mul.h> | ||
10 | |||
11 | #define GHASH_BLOCK_SIZE 16 | ||
12 | #define GHASH_DIGEST_SIZE 16 | ||
13 | |||
14 | struct ghash_ctx { | ||
15 | struct gf128mul_4k *gf128; | ||
16 | }; | ||
17 | |||
18 | struct ghash_desc_ctx { | ||
19 | u8 buffer[GHASH_BLOCK_SIZE]; | ||
20 | u32 bytes; | ||
21 | }; | ||
22 | |||
23 | #endif | ||
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 5adbfc32242f..17a06105ccb6 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/rcupdate.h> | 29 | #include <linux/rcupdate.h> |
30 | #include <linux/scatterlist.h> | 30 | #include <linux/scatterlist.h> |
31 | #include <linux/ctype.h> | 31 | #include <linux/ctype.h> |
32 | #include <crypto/aes.h> | ||
32 | #include <crypto/hash.h> | 33 | #include <crypto/hash.h> |
33 | #include <crypto/sha.h> | 34 | #include <crypto/sha.h> |
34 | #include <crypto/skcipher.h> | 35 | #include <crypto/skcipher.h> |
@@ -478,6 +479,7 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload, | |||
478 | struct crypto_skcipher *tfm; | 479 | struct crypto_skcipher *tfm; |
479 | struct skcipher_request *req; | 480 | struct skcipher_request *req; |
480 | unsigned int encrypted_datalen; | 481 | unsigned int encrypted_datalen; |
482 | u8 iv[AES_BLOCK_SIZE]; | ||
481 | unsigned int padlen; | 483 | unsigned int padlen; |
482 | char pad[16]; | 484 | char pad[16]; |
483 | int ret; | 485 | int ret; |
@@ -500,8 +502,8 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload, | |||
500 | sg_init_table(sg_out, 1); | 502 | sg_init_table(sg_out, 1); |
501 | sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); | 503 | sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); |
502 | 504 | ||
503 | skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, | 505 | memcpy(iv, epayload->iv, sizeof(iv)); |
504 | epayload->iv); | 506 | skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); |
505 | ret = crypto_skcipher_encrypt(req); | 507 | ret = crypto_skcipher_encrypt(req); |
506 | tfm = crypto_skcipher_reqtfm(req); | 508 | tfm = crypto_skcipher_reqtfm(req); |
507 | skcipher_request_free(req); | 509 | skcipher_request_free(req); |
@@ -581,6 +583,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload, | |||
581 | struct crypto_skcipher *tfm; | 583 | struct crypto_skcipher *tfm; |
582 | struct skcipher_request *req; | 584 | struct skcipher_request *req; |
583 | unsigned int encrypted_datalen; | 585 | unsigned int encrypted_datalen; |
586 | u8 iv[AES_BLOCK_SIZE]; | ||
584 | char pad[16]; | 587 | char pad[16]; |
585 | int ret; | 588 | int ret; |
586 | 589 | ||
@@ -599,8 +602,8 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload, | |||
599 | epayload->decrypted_datalen); | 602 | epayload->decrypted_datalen); |
600 | sg_set_buf(&sg_out[1], pad, sizeof pad); | 603 | sg_set_buf(&sg_out[1], pad, sizeof pad); |
601 | 604 | ||
602 | skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, | 605 | memcpy(iv, epayload->iv, sizeof(iv)); |
603 | epayload->iv); | 606 | skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); |
604 | ret = crypto_skcipher_decrypt(req); | 607 | ret = crypto_skcipher_decrypt(req); |
605 | tfm = crypto_skcipher_reqtfm(req); | 608 | tfm = crypto_skcipher_reqtfm(req); |
606 | skcipher_request_free(req); | 609 | skcipher_request_free(req); |