summaryrefslogtreecommitdiffstats
path: root/crypto/chainiv.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-06-03 02:49:23 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2015-06-04 03:04:59 -0400
commit65fe6740d472aee158275fd1103586dee2ffc5cb (patch)
tree2479627ae9dc0dae8edaf8027d7a8e33279f2b23 /crypto/chainiv.c
parent3154de71258a32040214fda174e67b975b0810ef (diff)
crypto: chainiv - Move IV seeding into init function
We currently do the IV seeding on the first givencrypt call in order to conserve entropy. However, this does not work with DRBG which cannot be called from interrupt context. In fact, with DRBG we don't need to conserve entropy anyway. So this patch moves the seeding into the init function. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/chainiv.c')
-rw-r--r--crypto/chainiv.c66
1 files changed, 9 insertions, 57 deletions
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 63c17d5992f7..be0bd521c46f 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -80,35 +80,15 @@ unlock:
80 return err; 80 return err;
81} 81}
82 82
83static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) 83static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
84{ 84{
85 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); 85 struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
86 struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
87 int err = 0;
88
89 spin_lock_bh(&ctx->lock);
90 if (crypto_ablkcipher_crt(geniv)->givencrypt !=
91 chainiv_givencrypt_first)
92 goto unlock;
93
94 crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
95 err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
96 crypto_ablkcipher_ivsize(geniv));
97
98unlock:
99 spin_unlock_bh(&ctx->lock);
100 86
101 if (err)
102 return err;
103
104 return chainiv_givencrypt(req);
105}
106
107static int chainiv_init_common(struct crypto_tfm *tfm)
108{
109 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); 87 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
110 88
111 return skcipher_geniv_init(tfm); 89 return crypto_rng_get_bytes(crypto_default_rng, iv,
90 crypto_ablkcipher_ivsize(geniv)) ?:
91 skcipher_geniv_init(tfm);
112} 92}
113 93
114static int chainiv_init(struct crypto_tfm *tfm) 94static int chainiv_init(struct crypto_tfm *tfm)
@@ -117,7 +97,7 @@ static int chainiv_init(struct crypto_tfm *tfm)
117 97
118 spin_lock_init(&ctx->lock); 98 spin_lock_init(&ctx->lock);
119 99
120 return chainiv_init_common(tfm); 100 return chainiv_init_common(tfm, ctx->iv);
121} 101}
122 102
123static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) 103static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
@@ -205,33 +185,6 @@ postpone:
205 return async_chainiv_postpone_request(req); 185 return async_chainiv_postpone_request(req);
206} 186}
207 187
208static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
209{
210 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
211 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
212 int err = 0;
213
214 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
215 goto out;
216
217 if (crypto_ablkcipher_crt(geniv)->givencrypt !=
218 async_chainiv_givencrypt_first)
219 goto unlock;
220
221 crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
222 err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
223 crypto_ablkcipher_ivsize(geniv));
224
225unlock:
226 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
227
228 if (err)
229 return err;
230
231out:
232 return async_chainiv_givencrypt(req);
233}
234
235static void async_chainiv_do_postponed(struct work_struct *work) 188static void async_chainiv_do_postponed(struct work_struct *work)
236{ 189{
237 struct async_chainiv_ctx *ctx = container_of(work, 190 struct async_chainiv_ctx *ctx = container_of(work,
@@ -270,7 +223,7 @@ static int async_chainiv_init(struct crypto_tfm *tfm)
270 crypto_init_queue(&ctx->queue, 100); 223 crypto_init_queue(&ctx->queue, 100);
271 INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); 224 INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
272 225
273 return chainiv_init_common(tfm); 226 return chainiv_init_common(tfm, ctx->iv);
274} 227}
275 228
276static void async_chainiv_exit(struct crypto_tfm *tfm) 229static void async_chainiv_exit(struct crypto_tfm *tfm)
@@ -302,7 +255,7 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
302 if (IS_ERR(inst)) 255 if (IS_ERR(inst))
303 goto put_rng; 256 goto put_rng;
304 257
305 inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first; 258 inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt;
306 259
307 inst->alg.cra_init = chainiv_init; 260 inst->alg.cra_init = chainiv_init;
308 inst->alg.cra_exit = skcipher_geniv_exit; 261 inst->alg.cra_exit = skcipher_geniv_exit;
@@ -312,8 +265,7 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
312 if (!crypto_requires_sync(algt->type, algt->mask)) { 265 if (!crypto_requires_sync(algt->type, algt->mask)) {
313 inst->alg.cra_flags |= CRYPTO_ALG_ASYNC; 266 inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
314 267
315 inst->alg.cra_ablkcipher.givencrypt = 268 inst->alg.cra_ablkcipher.givencrypt = async_chainiv_givencrypt;
316 async_chainiv_givencrypt_first;
317 269
318 inst->alg.cra_init = async_chainiv_init; 270 inst->alg.cra_init = async_chainiv_init;
319 inst->alg.cra_exit = async_chainiv_exit; 271 inst->alg.cra_exit = async_chainiv_exit;