diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 1 | ||||
-rw-r--r-- | crypto/cryptd.c | 243 |
2 files changed, 244 insertions, 0 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 5963a9566fe2..795e31c8aec2 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -65,6 +65,7 @@ config CRYPTO_NULL | |||
65 | config CRYPTO_CRYPTD | 65 | config CRYPTO_CRYPTD |
66 | tristate "Software async crypto daemon" | 66 | tristate "Software async crypto daemon" |
67 | select CRYPTO_BLKCIPHER | 67 | select CRYPTO_BLKCIPHER |
68 | select CRYPTO_HASH | ||
68 | select CRYPTO_MANAGER | 69 | select CRYPTO_MANAGER |
69 | help | 70 | help |
70 | This is a generic software asynchronous crypto daemon that | 71 | This is a generic software asynchronous crypto daemon that |
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index f38e1473b724..d3ecd7e73b7e 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -45,6 +45,13 @@ struct cryptd_blkcipher_request_ctx { | |||
45 | crypto_completion_t complete; | 45 | crypto_completion_t complete; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct cryptd_hash_ctx { | ||
49 | struct crypto_hash *child; | ||
50 | }; | ||
51 | |||
52 | struct cryptd_hash_request_ctx { | ||
53 | crypto_completion_t complete; | ||
54 | }; | ||
48 | 55 | ||
49 | static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) | 56 | static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) |
50 | { | 57 | { |
@@ -260,6 +267,240 @@ out_put_alg: | |||
260 | return inst; | 267 | return inst; |
261 | } | 268 | } |
262 | 269 | ||
270 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) | ||
271 | { | ||
272 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | ||
273 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | ||
274 | struct crypto_spawn *spawn = &ictx->spawn; | ||
275 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
276 | struct crypto_hash *cipher; | ||
277 | |||
278 | cipher = crypto_spawn_hash(spawn); | ||
279 | if (IS_ERR(cipher)) | ||
280 | return PTR_ERR(cipher); | ||
281 | |||
282 | ctx->child = cipher; | ||
283 | tfm->crt_ahash.reqsize = | ||
284 | sizeof(struct cryptd_hash_request_ctx); | ||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | ||
289 | { | ||
290 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | ||
291 | struct cryptd_state *state = cryptd_get_state(tfm); | ||
292 | int active; | ||
293 | |||
294 | mutex_lock(&state->mutex); | ||
295 | active = ahash_tfm_in_queue(&state->queue, | ||
296 | __crypto_ahash_cast(tfm)); | ||
297 | mutex_unlock(&state->mutex); | ||
298 | |||
299 | BUG_ON(active); | ||
300 | |||
301 | crypto_free_hash(ctx->child); | ||
302 | } | ||
303 | |||
304 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | ||
305 | const u8 *key, unsigned int keylen) | ||
306 | { | ||
307 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | ||
308 | struct crypto_hash *child = ctx->child; | ||
309 | int err; | ||
310 | |||
311 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | ||
312 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & | ||
313 | CRYPTO_TFM_REQ_MASK); | ||
314 | err = crypto_hash_setkey(child, key, keylen); | ||
315 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & | ||
316 | CRYPTO_TFM_RES_MASK); | ||
317 | return err; | ||
318 | } | ||
319 | |||
320 | static int cryptd_hash_enqueue(struct ahash_request *req, | ||
321 | crypto_completion_t complete) | ||
322 | { | ||
323 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | ||
324 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | ||
325 | struct cryptd_state *state = | ||
326 | cryptd_get_state(crypto_ahash_tfm(tfm)); | ||
327 | int err; | ||
328 | |||
329 | rctx->complete = req->base.complete; | ||
330 | req->base.complete = complete; | ||
331 | |||
332 | spin_lock_bh(&state->lock); | ||
333 | err = ahash_enqueue_request(&state->queue, req); | ||
334 | spin_unlock_bh(&state->lock); | ||
335 | |||
336 | wake_up_process(state->task); | ||
337 | return err; | ||
338 | } | ||
339 | |||
340 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | ||
341 | { | ||
342 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | ||
343 | struct crypto_hash *child = ctx->child; | ||
344 | struct ahash_request *req = ahash_request_cast(req_async); | ||
345 | struct cryptd_hash_request_ctx *rctx; | ||
346 | struct hash_desc desc; | ||
347 | |||
348 | rctx = ahash_request_ctx(req); | ||
349 | |||
350 | if (unlikely(err == -EINPROGRESS)) | ||
351 | goto out; | ||
352 | |||
353 | desc.tfm = child; | ||
354 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
355 | |||
356 | err = crypto_hash_crt(child)->init(&desc); | ||
357 | |||
358 | req->base.complete = rctx->complete; | ||
359 | |||
360 | out: | ||
361 | local_bh_disable(); | ||
362 | rctx->complete(&req->base, err); | ||
363 | local_bh_enable(); | ||
364 | } | ||
365 | |||
366 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | ||
367 | { | ||
368 | return cryptd_hash_enqueue(req, cryptd_hash_init); | ||
369 | } | ||
370 | |||
371 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | ||
372 | { | ||
373 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | ||
374 | struct crypto_hash *child = ctx->child; | ||
375 | struct ahash_request *req = ahash_request_cast(req_async); | ||
376 | struct cryptd_hash_request_ctx *rctx; | ||
377 | struct hash_desc desc; | ||
378 | |||
379 | rctx = ahash_request_ctx(req); | ||
380 | |||
381 | if (unlikely(err == -EINPROGRESS)) | ||
382 | goto out; | ||
383 | |||
384 | desc.tfm = child; | ||
385 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
386 | |||
387 | err = crypto_hash_crt(child)->update(&desc, | ||
388 | req->src, | ||
389 | req->nbytes); | ||
390 | |||
391 | req->base.complete = rctx->complete; | ||
392 | |||
393 | out: | ||
394 | local_bh_disable(); | ||
395 | rctx->complete(&req->base, err); | ||
396 | local_bh_enable(); | ||
397 | } | ||
398 | |||
399 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | ||
400 | { | ||
401 | return cryptd_hash_enqueue(req, cryptd_hash_update); | ||
402 | } | ||
403 | |||
404 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | ||
405 | { | ||
406 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | ||
407 | struct crypto_hash *child = ctx->child; | ||
408 | struct ahash_request *req = ahash_request_cast(req_async); | ||
409 | struct cryptd_hash_request_ctx *rctx; | ||
410 | struct hash_desc desc; | ||
411 | |||
412 | rctx = ahash_request_ctx(req); | ||
413 | |||
414 | if (unlikely(err == -EINPROGRESS)) | ||
415 | goto out; | ||
416 | |||
417 | desc.tfm = child; | ||
418 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
419 | |||
420 | err = crypto_hash_crt(child)->final(&desc, req->result); | ||
421 | |||
422 | req->base.complete = rctx->complete; | ||
423 | |||
424 | out: | ||
425 | local_bh_disable(); | ||
426 | rctx->complete(&req->base, err); | ||
427 | local_bh_enable(); | ||
428 | } | ||
429 | |||
430 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | ||
431 | { | ||
432 | return cryptd_hash_enqueue(req, cryptd_hash_final); | ||
433 | } | ||
434 | |||
435 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | ||
436 | { | ||
437 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | ||
438 | struct crypto_hash *child = ctx->child; | ||
439 | struct ahash_request *req = ahash_request_cast(req_async); | ||
440 | struct cryptd_hash_request_ctx *rctx; | ||
441 | struct hash_desc desc; | ||
442 | |||
443 | rctx = ahash_request_ctx(req); | ||
444 | |||
445 | if (unlikely(err == -EINPROGRESS)) | ||
446 | goto out; | ||
447 | |||
448 | desc.tfm = child; | ||
449 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
450 | |||
451 | err = crypto_hash_crt(child)->digest(&desc, | ||
452 | req->src, | ||
453 | req->nbytes, | ||
454 | req->result); | ||
455 | |||
456 | req->base.complete = rctx->complete; | ||
457 | |||
458 | out: | ||
459 | local_bh_disable(); | ||
460 | rctx->complete(&req->base, err); | ||
461 | local_bh_enable(); | ||
462 | } | ||
463 | |||
464 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | ||
465 | { | ||
466 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | ||
467 | } | ||
468 | |||
469 | static struct crypto_instance *cryptd_alloc_hash( | ||
470 | struct rtattr **tb, struct cryptd_state *state) | ||
471 | { | ||
472 | struct crypto_instance *inst; | ||
473 | struct crypto_alg *alg; | ||
474 | |||
475 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | ||
476 | CRYPTO_ALG_TYPE_HASH_MASK); | ||
477 | if (IS_ERR(alg)) | ||
478 | return ERR_PTR(PTR_ERR(alg)); | ||
479 | |||
480 | inst = cryptd_alloc_instance(alg, state); | ||
481 | if (IS_ERR(inst)) | ||
482 | goto out_put_alg; | ||
483 | |||
484 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | ||
485 | inst->alg.cra_type = &crypto_ahash_type; | ||
486 | |||
487 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; | ||
488 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | ||
489 | |||
490 | inst->alg.cra_init = cryptd_hash_init_tfm; | ||
491 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | ||
492 | |||
493 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | ||
494 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | ||
495 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | ||
496 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | ||
497 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | ||
498 | |||
499 | out_put_alg: | ||
500 | crypto_mod_put(alg); | ||
501 | return inst; | ||
502 | } | ||
503 | |||
263 | static struct cryptd_state state; | 504 | static struct cryptd_state state; |
264 | 505 | ||
265 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | 506 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) |
@@ -273,6 +514,8 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | |||
273 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | 514 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { |
274 | case CRYPTO_ALG_TYPE_BLKCIPHER: | 515 | case CRYPTO_ALG_TYPE_BLKCIPHER: |
275 | return cryptd_alloc_blkcipher(tb, &state); | 516 | return cryptd_alloc_blkcipher(tb, &state); |
517 | case CRYPTO_ALG_TYPE_DIGEST: | ||
518 | return cryptd_alloc_hash(tb, &state); | ||
276 | } | 519 | } |
277 | 520 | ||
278 | return ERR_PTR(-EINVAL); | 521 | return ERR_PTR(-EINVAL); |