diff options
author | Tadeusz Struk <tadeusz.struk@intel.com> | 2016-03-11 14:50:33 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2016-04-05 08:35:41 -0400 |
commit | 83094e5e9e49f893403d6fad20c9c06c980c2d1b (patch) | |
tree | 99040fa5f985828f748b609c60d26863f709854f /crypto/algif_aead.c | |
parent | 47cd30608f3fc3dbb4fdf37300baca911e2dde34 (diff) |
crypto: af_alg - add async support to algif_aead
Following the async change for algif_skcipher
this patch adds similar async read to algif_aead.
changes in v3:
- add call to aead_reset_ctx directly from aead_put_sgl instead of calling
them separatelly one after the other
- remove wait from aead_sock_destruct function as it is not needed
when sock_hold is used
changes in v2:
- change internal data structures from fixed size arrays, limited to
RSGL_MAX_ENTRIES, to linked list model with no artificial limitation.
- use sock_kmalloc instead of kmalloc for memory allocation
- use sock_hold instead of separate atomic ctr to wait for outstanding
request
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/algif_aead.c')
-rw-r--r-- | crypto/algif_aead.c | 268 |
1 files changed, 237 insertions, 31 deletions
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 147069c9afd0..80a0f1a78551 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
@@ -13,7 +13,7 @@ | |||
13 | * any later version. | 13 | * any later version. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <crypto/aead.h> | 16 | #include <crypto/internal/aead.h> |
17 | #include <crypto/scatterwalk.h> | 17 | #include <crypto/scatterwalk.h> |
18 | #include <crypto/if_alg.h> | 18 | #include <crypto/if_alg.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
@@ -29,15 +29,24 @@ struct aead_sg_list { | |||
29 | struct scatterlist sg[ALG_MAX_PAGES]; | 29 | struct scatterlist sg[ALG_MAX_PAGES]; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | struct aead_async_rsgl { | ||
33 | struct af_alg_sgl sgl; | ||
34 | struct list_head list; | ||
35 | }; | ||
36 | |||
37 | struct aead_async_req { | ||
38 | struct scatterlist *tsgl; | ||
39 | struct aead_async_rsgl first_rsgl; | ||
40 | struct list_head list; | ||
41 | struct kiocb *iocb; | ||
42 | unsigned int tsgls; | ||
43 | char iv[]; | ||
44 | }; | ||
45 | |||
32 | struct aead_ctx { | 46 | struct aead_ctx { |
33 | struct aead_sg_list tsgl; | 47 | struct aead_sg_list tsgl; |
34 | /* | 48 | struct aead_async_rsgl first_rsgl; |
35 | * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum | 49 | struct list_head list; |
36 | * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES | ||
37 | * pages | ||
38 | */ | ||
39 | #define RSGL_MAX_ENTRIES ALG_MAX_PAGES | ||
40 | struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES]; | ||
41 | 50 | ||
42 | void *iv; | 51 | void *iv; |
43 | 52 | ||
@@ -75,6 +84,17 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx) | |||
75 | return ctx->used >= ctx->aead_assoclen + as; | 84 | return ctx->used >= ctx->aead_assoclen + as; |
76 | } | 85 | } |
77 | 86 | ||
87 | static void aead_reset_ctx(struct aead_ctx *ctx) | ||
88 | { | ||
89 | struct aead_sg_list *sgl = &ctx->tsgl; | ||
90 | |||
91 | sg_init_table(sgl->sg, ALG_MAX_PAGES); | ||
92 | sgl->cur = 0; | ||
93 | ctx->used = 0; | ||
94 | ctx->more = 0; | ||
95 | ctx->merge = 0; | ||
96 | } | ||
97 | |||
78 | static void aead_put_sgl(struct sock *sk) | 98 | static void aead_put_sgl(struct sock *sk) |
79 | { | 99 | { |
80 | struct alg_sock *ask = alg_sk(sk); | 100 | struct alg_sock *ask = alg_sk(sk); |
@@ -90,11 +110,7 @@ static void aead_put_sgl(struct sock *sk) | |||
90 | put_page(sg_page(sg + i)); | 110 | put_page(sg_page(sg + i)); |
91 | sg_assign_page(sg + i, NULL); | 111 | sg_assign_page(sg + i, NULL); |
92 | } | 112 | } |
93 | sg_init_table(sg, ALG_MAX_PAGES); | 113 | aead_reset_ctx(ctx); |
94 | sgl->cur = 0; | ||
95 | ctx->used = 0; | ||
96 | ctx->more = 0; | ||
97 | ctx->merge = 0; | ||
98 | } | 114 | } |
99 | 115 | ||
100 | static void aead_wmem_wakeup(struct sock *sk) | 116 | static void aead_wmem_wakeup(struct sock *sk) |
@@ -349,23 +365,188 @@ unlock: | |||
349 | return err ?: size; | 365 | return err ?: size; |
350 | } | 366 | } |
351 | 367 | ||
352 | static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) | 368 | #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \ |
369 | ((char *)req + sizeof(struct aead_request) + \ | ||
370 | crypto_aead_reqsize(tfm)) | ||
371 | |||
372 | #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \ | ||
373 | crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \ | ||
374 | sizeof(struct aead_request) | ||
375 | |||
376 | static void aead_async_cb(struct crypto_async_request *_req, int err) | ||
377 | { | ||
378 | struct sock *sk = _req->data; | ||
379 | struct alg_sock *ask = alg_sk(sk); | ||
380 | struct aead_ctx *ctx = ask->private; | ||
381 | struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); | ||
382 | struct aead_request *req = aead_request_cast(_req); | ||
383 | struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); | ||
384 | struct scatterlist *sg = areq->tsgl; | ||
385 | struct aead_async_rsgl *rsgl; | ||
386 | struct kiocb *iocb = areq->iocb; | ||
387 | unsigned int i, reqlen = GET_REQ_SIZE(tfm); | ||
388 | |||
389 | list_for_each_entry(rsgl, &areq->list, list) { | ||
390 | af_alg_free_sg(&rsgl->sgl); | ||
391 | if (rsgl != &areq->first_rsgl) | ||
392 | sock_kfree_s(sk, rsgl, sizeof(*rsgl)); | ||
393 | } | ||
394 | |||
395 | for (i = 0; i < areq->tsgls; i++) | ||
396 | put_page(sg_page(sg + i)); | ||
397 | |||
398 | sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); | ||
399 | sock_kfree_s(sk, req, reqlen); | ||
400 | __sock_put(sk); | ||
401 | iocb->ki_complete(iocb, err, err); | ||
402 | } | ||
403 | |||
404 | static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, | ||
405 | int flags) | ||
406 | { | ||
407 | struct sock *sk = sock->sk; | ||
408 | struct alg_sock *ask = alg_sk(sk); | ||
409 | struct aead_ctx *ctx = ask->private; | ||
410 | struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); | ||
411 | struct aead_async_req *areq; | ||
412 | struct aead_request *req = NULL; | ||
413 | struct aead_sg_list *sgl = &ctx->tsgl; | ||
414 | struct aead_async_rsgl *last_rsgl = NULL, *rsgl; | ||
415 | unsigned int as = crypto_aead_authsize(tfm); | ||
416 | unsigned int i, reqlen = GET_REQ_SIZE(tfm); | ||
417 | int err = -ENOMEM; | ||
418 | unsigned long used; | ||
419 | size_t outlen; | ||
420 | size_t usedpages = 0; | ||
421 | |||
422 | lock_sock(sk); | ||
423 | if (ctx->more) { | ||
424 | err = aead_wait_for_data(sk, flags); | ||
425 | if (err) | ||
426 | goto unlock; | ||
427 | } | ||
428 | |||
429 | used = ctx->used; | ||
430 | outlen = used; | ||
431 | |||
432 | if (!aead_sufficient_data(ctx)) | ||
433 | goto unlock; | ||
434 | |||
435 | req = sock_kmalloc(sk, reqlen, GFP_KERNEL); | ||
436 | if (unlikely(!req)) | ||
437 | goto unlock; | ||
438 | |||
439 | areq = GET_ASYM_REQ(req, tfm); | ||
440 | memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); | ||
441 | INIT_LIST_HEAD(&areq->list); | ||
442 | areq->iocb = msg->msg_iocb; | ||
443 | memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); | ||
444 | aead_request_set_tfm(req, tfm); | ||
445 | aead_request_set_ad(req, ctx->aead_assoclen); | ||
446 | aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, | ||
447 | aead_async_cb, sk); | ||
448 | used -= ctx->aead_assoclen + (ctx->enc ? as : 0); | ||
449 | |||
450 | /* take over all tx sgls from ctx */ | ||
451 | areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur, | ||
452 | GFP_KERNEL); | ||
453 | if (unlikely(!areq->tsgl)) | ||
454 | goto free; | ||
455 | |||
456 | sg_init_table(areq->tsgl, sgl->cur); | ||
457 | for (i = 0; i < sgl->cur; i++) | ||
458 | sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]), | ||
459 | sgl->sg[i].length, sgl->sg[i].offset); | ||
460 | |||
461 | areq->tsgls = sgl->cur; | ||
462 | |||
463 | /* create rx sgls */ | ||
464 | while (iov_iter_count(&msg->msg_iter)) { | ||
465 | size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), | ||
466 | (outlen - usedpages)); | ||
467 | |||
468 | if (list_empty(&areq->list)) { | ||
469 | rsgl = &areq->first_rsgl; | ||
470 | |||
471 | } else { | ||
472 | rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); | ||
473 | if (unlikely(!rsgl)) { | ||
474 | err = -ENOMEM; | ||
475 | goto free; | ||
476 | } | ||
477 | } | ||
478 | rsgl->sgl.npages = 0; | ||
479 | list_add_tail(&rsgl->list, &areq->list); | ||
480 | |||
481 | /* make one iovec available as scatterlist */ | ||
482 | err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); | ||
483 | if (err < 0) | ||
484 | goto free; | ||
485 | |||
486 | usedpages += err; | ||
487 | |||
488 | /* chain the new scatterlist with previous one */ | ||
489 | if (last_rsgl) | ||
490 | af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); | ||
491 | |||
492 | last_rsgl = rsgl; | ||
493 | |||
494 | /* we do not need more iovecs as we have sufficient memory */ | ||
495 | if (outlen <= usedpages) | ||
496 | break; | ||
497 | |||
498 | iov_iter_advance(&msg->msg_iter, err); | ||
499 | } | ||
500 | err = -EINVAL; | ||
501 | /* ensure output buffer is sufficiently large */ | ||
502 | if (usedpages < outlen) | ||
503 | goto free; | ||
504 | |||
505 | aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, | ||
506 | areq->iv); | ||
507 | err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); | ||
508 | if (err) { | ||
509 | if (err == -EINPROGRESS) { | ||
510 | sock_hold(sk); | ||
511 | err = -EIOCBQUEUED; | ||
512 | aead_reset_ctx(ctx); | ||
513 | goto unlock; | ||
514 | } else if (err == -EBADMSG) { | ||
515 | aead_put_sgl(sk); | ||
516 | } | ||
517 | goto free; | ||
518 | } | ||
519 | aead_put_sgl(sk); | ||
520 | |||
521 | free: | ||
522 | list_for_each_entry(rsgl, &areq->list, list) { | ||
523 | af_alg_free_sg(&rsgl->sgl); | ||
524 | if (rsgl != &areq->first_rsgl) | ||
525 | sock_kfree_s(sk, rsgl, sizeof(*rsgl)); | ||
526 | } | ||
527 | if (areq->tsgl) | ||
528 | sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls); | ||
529 | if (req) | ||
530 | sock_kfree_s(sk, req, reqlen); | ||
531 | unlock: | ||
532 | aead_wmem_wakeup(sk); | ||
533 | release_sock(sk); | ||
534 | return err ? err : outlen; | ||
535 | } | ||
536 | |||
537 | static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) | ||
353 | { | 538 | { |
354 | struct sock *sk = sock->sk; | 539 | struct sock *sk = sock->sk; |
355 | struct alg_sock *ask = alg_sk(sk); | 540 | struct alg_sock *ask = alg_sk(sk); |
356 | struct aead_ctx *ctx = ask->private; | 541 | struct aead_ctx *ctx = ask->private; |
357 | unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); | 542 | unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); |
358 | struct aead_sg_list *sgl = &ctx->tsgl; | 543 | struct aead_sg_list *sgl = &ctx->tsgl; |
359 | unsigned int i = 0; | 544 | struct aead_async_rsgl *last_rsgl = NULL; |
545 | struct aead_async_rsgl *rsgl, *tmp; | ||
360 | int err = -EINVAL; | 546 | int err = -EINVAL; |
361 | unsigned long used = 0; | 547 | unsigned long used = 0; |
362 | size_t outlen = 0; | 548 | size_t outlen = 0; |
363 | size_t usedpages = 0; | 549 | size_t usedpages = 0; |
364 | unsigned int cnt = 0; | ||
365 | |||
366 | /* Limit number of IOV blocks to be accessed below */ | ||
367 | if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES) | ||
368 | return -ENOMSG; | ||
369 | 550 | ||
370 | lock_sock(sk); | 551 | lock_sock(sk); |
371 | 552 | ||
@@ -417,21 +598,33 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, | |||
417 | size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), | 598 | size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), |
418 | (outlen - usedpages)); | 599 | (outlen - usedpages)); |
419 | 600 | ||
601 | if (list_empty(&ctx->list)) { | ||
602 | rsgl = &ctx->first_rsgl; | ||
603 | } else { | ||
604 | rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); | ||
605 | if (unlikely(!rsgl)) { | ||
606 | err = -ENOMEM; | ||
607 | goto unlock; | ||
608 | } | ||
609 | } | ||
610 | rsgl->sgl.npages = 0; | ||
611 | list_add_tail(&rsgl->list, &ctx->list); | ||
612 | |||
420 | /* make one iovec available as scatterlist */ | 613 | /* make one iovec available as scatterlist */ |
421 | err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter, | 614 | err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); |
422 | seglen); | ||
423 | if (err < 0) | 615 | if (err < 0) |
424 | goto unlock; | 616 | goto unlock; |
425 | usedpages += err; | 617 | usedpages += err; |
426 | /* chain the new scatterlist with previous one */ | 618 | /* chain the new scatterlist with previous one */ |
427 | if (cnt) | 619 | if (last_rsgl) |
428 | af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]); | 620 | af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); |
621 | |||
622 | last_rsgl = rsgl; | ||
429 | 623 | ||
430 | /* we do not need more iovecs as we have sufficient memory */ | 624 | /* we do not need more iovecs as we have sufficient memory */ |
431 | if (outlen <= usedpages) | 625 | if (outlen <= usedpages) |
432 | break; | 626 | break; |
433 | iov_iter_advance(&msg->msg_iter, err); | 627 | iov_iter_advance(&msg->msg_iter, err); |
434 | cnt++; | ||
435 | } | 628 | } |
436 | 629 | ||
437 | err = -EINVAL; | 630 | err = -EINVAL; |
@@ -440,8 +633,7 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, | |||
440 | goto unlock; | 633 | goto unlock; |
441 | 634 | ||
442 | sg_mark_end(sgl->sg + sgl->cur - 1); | 635 | sg_mark_end(sgl->sg + sgl->cur - 1); |
443 | 636 | aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, | |
444 | aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg, | ||
445 | used, ctx->iv); | 637 | used, ctx->iv); |
446 | aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); | 638 | aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); |
447 | 639 | ||
@@ -454,23 +646,35 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, | |||
454 | /* EBADMSG implies a valid cipher operation took place */ | 646 | /* EBADMSG implies a valid cipher operation took place */ |
455 | if (err == -EBADMSG) | 647 | if (err == -EBADMSG) |
456 | aead_put_sgl(sk); | 648 | aead_put_sgl(sk); |
649 | |||
457 | goto unlock; | 650 | goto unlock; |
458 | } | 651 | } |
459 | 652 | ||
460 | aead_put_sgl(sk); | 653 | aead_put_sgl(sk); |
461 | |||
462 | err = 0; | 654 | err = 0; |
463 | 655 | ||
464 | unlock: | 656 | unlock: |
465 | for (i = 0; i < cnt; i++) | 657 | list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { |
466 | af_alg_free_sg(&ctx->rsgl[i]); | 658 | af_alg_free_sg(&rsgl->sgl); |
467 | 659 | if (rsgl != &ctx->first_rsgl) | |
660 | sock_kfree_s(sk, rsgl, sizeof(*rsgl)); | ||
661 | list_del(&rsgl->list); | ||
662 | } | ||
663 | INIT_LIST_HEAD(&ctx->list); | ||
468 | aead_wmem_wakeup(sk); | 664 | aead_wmem_wakeup(sk); |
469 | release_sock(sk); | 665 | release_sock(sk); |
470 | 666 | ||
471 | return err ? err : outlen; | 667 | return err ? err : outlen; |
472 | } | 668 | } |
473 | 669 | ||
670 | static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, | ||
671 | int flags) | ||
672 | { | ||
673 | return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? | ||
674 | aead_recvmsg_async(sock, msg, flags) : | ||
675 | aead_recvmsg_sync(sock, msg, flags); | ||
676 | } | ||
677 | |||
474 | static unsigned int aead_poll(struct file *file, struct socket *sock, | 678 | static unsigned int aead_poll(struct file *file, struct socket *sock, |
475 | poll_table *wait) | 679 | poll_table *wait) |
476 | { | 680 | { |
@@ -540,6 +744,7 @@ static void aead_sock_destruct(struct sock *sk) | |||
540 | unsigned int ivlen = crypto_aead_ivsize( | 744 | unsigned int ivlen = crypto_aead_ivsize( |
541 | crypto_aead_reqtfm(&ctx->aead_req)); | 745 | crypto_aead_reqtfm(&ctx->aead_req)); |
542 | 746 | ||
747 | WARN_ON(atomic_read(&sk->sk_refcnt) != 0); | ||
543 | aead_put_sgl(sk); | 748 | aead_put_sgl(sk); |
544 | sock_kzfree_s(sk, ctx->iv, ivlen); | 749 | sock_kzfree_s(sk, ctx->iv, ivlen); |
545 | sock_kfree_s(sk, ctx, ctx->len); | 750 | sock_kfree_s(sk, ctx, ctx->len); |
@@ -574,6 +779,7 @@ static int aead_accept_parent(void *private, struct sock *sk) | |||
574 | ctx->aead_assoclen = 0; | 779 | ctx->aead_assoclen = 0; |
575 | af_alg_init_completion(&ctx->completion); | 780 | af_alg_init_completion(&ctx->completion); |
576 | sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); | 781 | sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); |
782 | INIT_LIST_HEAD(&ctx->list); | ||
577 | 783 | ||
578 | ask->private = ctx; | 784 | ask->private = ctx; |
579 | 785 | ||