aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephan Mueller <smueller@chronox.de>2017-08-02 01:56:19 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2017-08-09 08:18:32 -0400
commit2d97591ef43d0587be22ad1b0d758d6df4999a0b (patch)
treee43adcbf67987557e3d1955f3496666b9799a486
parenta92f7af3854ce6b80a4cd7e3df6148663f15671b (diff)
crypto: af_alg - consolidation of duplicate code
Consolidate following data structures: skcipher_async_req, aead_async_req -> af_alg_async_req skcipher_rsgl, aead_rsql -> af_alg_rsgl skcipher_tsgl, aead_tsql -> af_alg_tsgl skcipher_ctx, aead_ctx -> af_alg_ctx Consolidate following functions: skcipher_sndbuf, aead_sndbuf -> af_alg_sndbuf skcipher_writable, aead_writable -> af_alg_writable skcipher_rcvbuf, aead_rcvbuf -> af_alg_rcvbuf skcipher_readable, aead_readable -> af_alg_readable aead_alloc_tsgl, skcipher_alloc_tsgl -> af_alg_alloc_tsgl aead_count_tsgl, skcipher_count_tsgl -> af_alg_count_tsgl aead_pull_tsgl, skcipher_pull_tsgl -> af_alg_pull_tsgl aead_free_areq_sgls, skcipher_free_areq_sgls -> af_alg_free_areq_sgls aead_wait_for_wmem, skcipher_wait_for_wmem -> af_alg_wait_for_wmem aead_wmem_wakeup, skcipher_wmem_wakeup -> af_alg_wmem_wakeup aead_wait_for_data, skcipher_wait_for_data -> af_alg_wait_for_data aead_data_wakeup, skcipher_data_wakeup -> af_alg_data_wakeup aead_sendmsg, skcipher_sendmsg -> af_alg_sendmsg aead_sendpage, skcipher_sendpage -> af_alg_sendpage aead_async_cb, skcipher_async_cb -> af_alg_async_cb aead_poll, skcipher_poll -> af_alg_poll Split out the following common code from recvmsg: af_alg_alloc_areq: allocation of the request data structure for the cipher operation af_alg_get_rsgl: creation of the RX SGL anchored in the request data structure The following changes to the implementation without affecting the functionality have been applied to synchronize slightly different code bases in algif_skcipher and algif_aead: The wakeup in af_alg_wait_for_data is triggered when either more data is received or the indicator that more data is to be expected is released. The first is triggered by user space, the second is triggered by the kernel upon finishing the processing of data (i.e. the kernel is ready for more). af_alg_sendmsg uses size_t in min_t calculation for obtaining len. Return code determination is consistent with algif_skcipher. The scope of the variable i is reduced to match algif_aead. The type of the variable i is switched from int to unsigned int to match algif_aead. af_alg_sendpage does not contain the superfluous err = 0 from aead_sendpage. af_alg_async_cb requires to store the number of output bytes in areq->outlen before the AIO callback is triggered. The POLLIN / POLLRDNORM is now set when either not more data is given or the kernel is supplied with data. This is consistent to the wakeup from sleep when the kernel waits for data. The request data structure is extended by the field last_rsgl which points to the last RX SGL list entry. This shall help recvmsg implementation to chain the RX SGL to other SG(L)s if needed. It is currently used by algif_aead which chains the tag SGL to the RX SGL during decryption. Signed-off-by: Stephan Mueller <smueller@chronox.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--crypto/af_alg.c693
-rw-r--r--crypto/algif_aead.c701
-rw-r--r--crypto/algif_skcipher.c638
-rw-r--r--include/crypto/if_alg.h170
4 files changed, 940 insertions, 1262 deletions
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 92a3d540d920..d6936c0e08d9 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/net.h> 22#include <linux/net.h>
23#include <linux/rwsem.h> 23#include <linux/rwsem.h>
24#include <linux/sched/signal.h>
24#include <linux/security.h> 25#include <linux/security.h>
25 26
26struct alg_type_list { 27struct alg_type_list {
@@ -507,6 +508,698 @@ void af_alg_complete(struct crypto_async_request *req, int err)
507} 508}
508EXPORT_SYMBOL_GPL(af_alg_complete); 509EXPORT_SYMBOL_GPL(af_alg_complete);
509 510
511/**
512 * af_alg_alloc_tsgl - allocate the TX SGL
513 *
514 * @sk socket of connection to user space
515 * @return: 0 upon success, < 0 upon error
516 */
517int af_alg_alloc_tsgl(struct sock *sk)
518{
519 struct alg_sock *ask = alg_sk(sk);
520 struct af_alg_ctx *ctx = ask->private;
521 struct af_alg_tsgl *sgl;
522 struct scatterlist *sg = NULL;
523
524 sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
525 if (!list_empty(&ctx->tsgl_list))
526 sg = sgl->sg;
527
528 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
529 sgl = sock_kmalloc(sk, sizeof(*sgl) +
530 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
531 GFP_KERNEL);
532 if (!sgl)
533 return -ENOMEM;
534
535 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
536 sgl->cur = 0;
537
538 if (sg)
539 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
540
541 list_add_tail(&sgl->list, &ctx->tsgl_list);
542 }
543
544 return 0;
545}
546EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl);
547
548/**
549 * aead_count_tsgl - Count number of TX SG entries
550 *
551 * The counting starts from the beginning of the SGL to @bytes. If
552 * an offset is provided, the counting of the SG entries starts at the offset.
553 *
554 * @sk socket of connection to user space
555 * @bytes Count the number of SG entries holding given number of bytes.
556 * @offset Start the counting of SG entries from the given offset.
557 * @return Number of TX SG entries found given the constraints
558 */
559unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
560{
561 struct alg_sock *ask = alg_sk(sk);
562 struct af_alg_ctx *ctx = ask->private;
563 struct af_alg_tsgl *sgl, *tmp;
564 unsigned int i;
565 unsigned int sgl_count = 0;
566
567 if (!bytes)
568 return 0;
569
570 list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
571 struct scatterlist *sg = sgl->sg;
572
573 for (i = 0; i < sgl->cur; i++) {
574 size_t bytes_count;
575
576 /* Skip offset */
577 if (offset >= sg[i].length) {
578 offset -= sg[i].length;
579 bytes -= sg[i].length;
580 continue;
581 }
582
583 bytes_count = sg[i].length - offset;
584
585 offset = 0;
586 sgl_count++;
587
588 /* If we have seen requested number of bytes, stop */
589 if (bytes_count >= bytes)
590 return sgl_count;
591
592 bytes -= bytes_count;
593 }
594 }
595
596 return sgl_count;
597}
598EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
599
600/**
601 * aead_pull_tsgl - Release the specified buffers from TX SGL
602 *
603 * If @dst is non-null, reassign the pages to dst. The caller must release
604 * the pages. If @dst_offset is given only reassign the pages to @dst starting
605 * at the @dst_offset (byte). The caller must ensure that @dst is large
606 * enough (e.g. by using af_alg_count_tsgl with the same offset).
607 *
608 * @sk socket of connection to user space
609 * @used Number of bytes to pull from TX SGL
610 * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
611 * caller must release the buffers in dst.
612 * @dst_offset Reassign the TX SGL from given offset. All buffers before
613 * reaching the offset is released.
614 */
615void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
616 size_t dst_offset)
617{
618 struct alg_sock *ask = alg_sk(sk);
619 struct af_alg_ctx *ctx = ask->private;
620 struct af_alg_tsgl *sgl;
621 struct scatterlist *sg;
622 unsigned int i, j;
623
624 while (!list_empty(&ctx->tsgl_list)) {
625 sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl,
626 list);
627 sg = sgl->sg;
628
629 for (i = 0, j = 0; i < sgl->cur; i++) {
630 size_t plen = min_t(size_t, used, sg[i].length);
631 struct page *page = sg_page(sg + i);
632
633 if (!page)
634 continue;
635
636 /*
637 * Assumption: caller created af_alg_count_tsgl(len)
638 * SG entries in dst.
639 */
640 if (dst) {
641 if (dst_offset >= plen) {
642 /* discard page before offset */
643 dst_offset -= plen;
644 put_page(page);
645 } else {
646 /* reassign page to dst after offset */
647 sg_set_page(dst + j, page,
648 plen - dst_offset,
649 sg[i].offset + dst_offset);
650 dst_offset = 0;
651 j++;
652 }
653 }
654
655 sg[i].length -= plen;
656 sg[i].offset += plen;
657
658 used -= plen;
659 ctx->used -= plen;
660
661 if (sg[i].length)
662 return;
663
664 if (!dst)
665 put_page(page);
666
667 sg_assign_page(sg + i, NULL);
668 }
669
670 list_del(&sgl->list);
671 sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
672 (MAX_SGL_ENTS + 1));
673 }
674
675 if (!ctx->used)
676 ctx->merge = 0;
677}
678EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
679
680/**
681 * af_alg_free_areq_sgls - Release TX and RX SGLs of the request
682 *
683 * @areq Request holding the TX and RX SGL
684 */
685void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
686{
687 struct sock *sk = areq->sk;
688 struct alg_sock *ask = alg_sk(sk);
689 struct af_alg_ctx *ctx = ask->private;
690 struct af_alg_rsgl *rsgl, *tmp;
691 struct scatterlist *tsgl;
692 struct scatterlist *sg;
693 unsigned int i;
694
695 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
696 ctx->rcvused -= rsgl->sg_num_bytes;
697 af_alg_free_sg(&rsgl->sgl);
698 list_del(&rsgl->list);
699 if (rsgl != &areq->first_rsgl)
700 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
701 }
702
703 tsgl = areq->tsgl;
704 for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
705 if (!sg_page(sg))
706 continue;
707 put_page(sg_page(sg));
708 }
709
710 if (areq->tsgl && areq->tsgl_entries)
711 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
712}
713EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
714
715/**
716 * af_alg_wait_for_wmem - wait for availability of writable memory
717 *
718 * @sk socket of connection to user space
719 * @flags If MSG_DONTWAIT is set, then only report if function would sleep
720 * @return 0 when writable memory is available, < 0 upon error
721 */
722int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
723{
724 DEFINE_WAIT_FUNC(wait, woken_wake_function);
725 int err = -ERESTARTSYS;
726 long timeout;
727
728 if (flags & MSG_DONTWAIT)
729 return -EAGAIN;
730
731 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
732
733 add_wait_queue(sk_sleep(sk), &wait);
734 for (;;) {
735 if (signal_pending(current))
736 break;
737 timeout = MAX_SCHEDULE_TIMEOUT;
738 if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) {
739 err = 0;
740 break;
741 }
742 }
743 remove_wait_queue(sk_sleep(sk), &wait);
744
745 return err;
746}
747EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem);
748
749/**
750 * af_alg_wmem_wakeup - wakeup caller when writable memory is available
751 *
752 * @sk socket of connection to user space
753 */
754void af_alg_wmem_wakeup(struct sock *sk)
755{
756 struct socket_wq *wq;
757
758 if (!af_alg_writable(sk))
759 return;
760
761 rcu_read_lock();
762 wq = rcu_dereference(sk->sk_wq);
763 if (skwq_has_sleeper(wq))
764 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
765 POLLRDNORM |
766 POLLRDBAND);
767 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
768 rcu_read_unlock();
769}
770EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
771
772/**
773 * af_alg_wait_for_data - wait for availability of TX data
774 *
775 * @sk socket of connection to user space
776 * @flags If MSG_DONTWAIT is set, then only report if function would sleep
777 * @return 0 when writable memory is available, < 0 upon error
778 */
779int af_alg_wait_for_data(struct sock *sk, unsigned flags)
780{
781 DEFINE_WAIT_FUNC(wait, woken_wake_function);
782 struct alg_sock *ask = alg_sk(sk);
783 struct af_alg_ctx *ctx = ask->private;
784 long timeout;
785 int err = -ERESTARTSYS;
786
787 if (flags & MSG_DONTWAIT)
788 return -EAGAIN;
789
790 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
791
792 add_wait_queue(sk_sleep(sk), &wait);
793 for (;;) {
794 if (signal_pending(current))
795 break;
796 timeout = MAX_SCHEDULE_TIMEOUT;
797 if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
798 &wait)) {
799 err = 0;
800 break;
801 }
802 }
803 remove_wait_queue(sk_sleep(sk), &wait);
804
805 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
806
807 return err;
808}
809EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
810
811/**
812 * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
813 *
814 * @sk socket of connection to user space
815 */
816
817void af_alg_data_wakeup(struct sock *sk)
818{
819 struct alg_sock *ask = alg_sk(sk);
820 struct af_alg_ctx *ctx = ask->private;
821 struct socket_wq *wq;
822
823 if (!ctx->used)
824 return;
825
826 rcu_read_lock();
827 wq = rcu_dereference(sk->sk_wq);
828 if (skwq_has_sleeper(wq))
829 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
830 POLLRDNORM |
831 POLLRDBAND);
832 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
833 rcu_read_unlock();
834}
835EXPORT_SYMBOL_GPL(af_alg_data_wakeup);
836
837/**
838 * af_alg_sendmsg - implementation of sendmsg system call handler
839 *
840 * The sendmsg system call handler obtains the user data and stores it
841 * in ctx->tsgl_list. This implies allocation of the required numbers of
842 * struct af_alg_tsgl.
843 *
844 * In addition, the ctx is filled with the information sent via CMSG.
845 *
846 * @sock socket of connection to user space
847 * @msg message from user space
848 * @size size of message from user space
849 * @ivsize the size of the IV for the cipher operation to verify that the
850 * user-space-provided IV has the right size
851 * @return the number of copied data upon success, < 0 upon error
852 */
853int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
854 unsigned int ivsize)
855{
856 struct sock *sk = sock->sk;
857 struct alg_sock *ask = alg_sk(sk);
858 struct af_alg_ctx *ctx = ask->private;
859 struct af_alg_tsgl *sgl;
860 struct af_alg_control con = {};
861 long copied = 0;
862 bool enc = 0;
863 bool init = 0;
864 int err = 0;
865
866 if (msg->msg_controllen) {
867 err = af_alg_cmsg_send(msg, &con);
868 if (err)
869 return err;
870
871 init = 1;
872 switch (con.op) {
873 case ALG_OP_ENCRYPT:
874 enc = 1;
875 break;
876 case ALG_OP_DECRYPT:
877 enc = 0;
878 break;
879 default:
880 return -EINVAL;
881 }
882
883 if (con.iv && con.iv->ivlen != ivsize)
884 return -EINVAL;
885 }
886
887 lock_sock(sk);
888 if (!ctx->more && ctx->used) {
889 err = -EINVAL;
890 goto unlock;
891 }
892
893 if (init) {
894 ctx->enc = enc;
895 if (con.iv)
896 memcpy(ctx->iv, con.iv->iv, ivsize);
897
898 ctx->aead_assoclen = con.aead_assoclen;
899 }
900
901 while (size) {
902 struct scatterlist *sg;
903 size_t len = size;
904 size_t plen;
905
906 /* use the existing memory in an allocated page */
907 if (ctx->merge) {
908 sgl = list_entry(ctx->tsgl_list.prev,
909 struct af_alg_tsgl, list);
910 sg = sgl->sg + sgl->cur - 1;
911 len = min_t(size_t, len,
912 PAGE_SIZE - sg->offset - sg->length);
913
914 err = memcpy_from_msg(page_address(sg_page(sg)) +
915 sg->offset + sg->length,
916 msg, len);
917 if (err)
918 goto unlock;
919
920 sg->length += len;
921 ctx->merge = (sg->offset + sg->length) &
922 (PAGE_SIZE - 1);
923
924 ctx->used += len;
925 copied += len;
926 size -= len;
927 continue;
928 }
929
930 if (!af_alg_writable(sk)) {
931 err = af_alg_wait_for_wmem(sk, msg->msg_flags);
932 if (err)
933 goto unlock;
934 }
935
936 /* allocate a new page */
937 len = min_t(unsigned long, len, af_alg_sndbuf(sk));
938
939 err = af_alg_alloc_tsgl(sk);
940 if (err)
941 goto unlock;
942
943 sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl,
944 list);
945 sg = sgl->sg;
946 if (sgl->cur)
947 sg_unmark_end(sg + sgl->cur - 1);
948
949 do {
950 unsigned int i = sgl->cur;
951
952 plen = min_t(size_t, len, PAGE_SIZE);
953
954 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
955 if (!sg_page(sg + i)) {
956 err = -ENOMEM;
957 goto unlock;
958 }
959
960 err = memcpy_from_msg(page_address(sg_page(sg + i)),
961 msg, plen);
962 if (err) {
963 __free_page(sg_page(sg + i));
964 sg_assign_page(sg + i, NULL);
965 goto unlock;
966 }
967
968 sg[i].length = plen;
969 len -= plen;
970 ctx->used += plen;
971 copied += plen;
972 size -= plen;
973 sgl->cur++;
974 } while (len && sgl->cur < MAX_SGL_ENTS);
975
976 if (!size)
977 sg_mark_end(sg + sgl->cur - 1);
978
979 ctx->merge = plen & (PAGE_SIZE - 1);
980 }
981
982 err = 0;
983
984 ctx->more = msg->msg_flags & MSG_MORE;
985
986unlock:
987 af_alg_data_wakeup(sk);
988 release_sock(sk);
989
990 return copied ?: err;
991}
992EXPORT_SYMBOL_GPL(af_alg_sendmsg);
993
994/**
995 * af_alg_sendpage - sendpage system call handler
996 *
997 * This is a generic implementation of sendpage to fill ctx->tsgl_list.
998 */
999ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
1000 int offset, size_t size, int flags)
1001{
1002 struct sock *sk = sock->sk;
1003 struct alg_sock *ask = alg_sk(sk);
1004 struct af_alg_ctx *ctx = ask->private;
1005 struct af_alg_tsgl *sgl;
1006 int err = -EINVAL;
1007
1008 if (flags & MSG_SENDPAGE_NOTLAST)
1009 flags |= MSG_MORE;
1010
1011 lock_sock(sk);
1012 if (!ctx->more && ctx->used)
1013 goto unlock;
1014
1015 if (!size)
1016 goto done;
1017
1018 if (!af_alg_writable(sk)) {
1019 err = af_alg_wait_for_wmem(sk, flags);
1020 if (err)
1021 goto unlock;
1022 }
1023
1024 err = af_alg_alloc_tsgl(sk);
1025 if (err)
1026 goto unlock;
1027
1028 ctx->merge = 0;
1029 sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
1030
1031 if (sgl->cur)
1032 sg_unmark_end(sgl->sg + sgl->cur - 1);
1033
1034 sg_mark_end(sgl->sg + sgl->cur);
1035
1036 get_page(page);
1037 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
1038 sgl->cur++;
1039 ctx->used += size;
1040
1041done:
1042 ctx->more = flags & MSG_MORE;
1043
1044unlock:
1045 af_alg_data_wakeup(sk);
1046 release_sock(sk);
1047
1048 return err ?: size;
1049}
1050EXPORT_SYMBOL_GPL(af_alg_sendpage);
1051
1052/**
1053 * af_alg_async_cb - AIO callback handler
1054 *
1055 * This handler cleans up the struct af_alg_async_req upon completion of the
1056 * AIO operation.
1057 *
1058 * The number of bytes to be generated with the AIO operation must be set
1059 * in areq->outlen before the AIO callback handler is invoked.
1060 */
1061void af_alg_async_cb(struct crypto_async_request *_req, int err)
1062{
1063 struct af_alg_async_req *areq = _req->data;
1064 struct sock *sk = areq->sk;
1065 struct kiocb *iocb = areq->iocb;
1066 unsigned int resultlen;
1067
1068 lock_sock(sk);
1069
1070 /* Buffer size written by crypto operation. */
1071 resultlen = areq->outlen;
1072
1073 af_alg_free_areq_sgls(areq);
1074 sock_kfree_s(sk, areq, areq->areqlen);
1075 __sock_put(sk);
1076
1077 iocb->ki_complete(iocb, err ? err : resultlen, 0);
1078
1079 release_sock(sk);
1080}
1081EXPORT_SYMBOL_GPL(af_alg_async_cb);
1082
1083/**
1084 * af_alg_poll - poll system call handler
1085 */
1086unsigned int af_alg_poll(struct file *file, struct socket *sock,
1087 poll_table *wait)
1088{
1089 struct sock *sk = sock->sk;
1090 struct alg_sock *ask = alg_sk(sk);
1091 struct af_alg_ctx *ctx = ask->private;
1092 unsigned int mask;
1093
1094 sock_poll_wait(file, sk_sleep(sk), wait);
1095 mask = 0;
1096
1097 if (!ctx->more || ctx->used)
1098 mask |= POLLIN | POLLRDNORM;
1099
1100 if (af_alg_writable(sk))
1101 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1102
1103 return mask;
1104}
1105EXPORT_SYMBOL_GPL(af_alg_poll);
1106
1107/**
1108 * af_alg_alloc_areq - allocate struct af_alg_async_req
1109 *
1110 * @sk socket of connection to user space
1111 * @areqlen size of struct af_alg_async_req + crypto_*_reqsize
1112 * @return allocated data structure or ERR_PTR upon error
1113 */
1114struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
1115 unsigned int areqlen)
1116{
1117 struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
1118
1119 if (unlikely(!areq))
1120 return ERR_PTR(-ENOMEM);
1121
1122 areq->areqlen = areqlen;
1123 areq->sk = sk;
1124 areq->last_rsgl = NULL;
1125 INIT_LIST_HEAD(&areq->rsgl_list);
1126 areq->tsgl = NULL;
1127 areq->tsgl_entries = 0;
1128
1129 return areq;
1130}
1131EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
1132
1133/**
1134 * af_alg_get_rsgl - create the RX SGL for the output data from the crypto
1135 * operation
1136 *
1137 * @sk socket of connection to user space
1138 * @msg user space message
1139 * @flags flags used to invoke recvmsg with
1140 * @areq instance of the cryptographic request that will hold the RX SGL
1141 * @maxsize maximum number of bytes to be pulled from user space
1142 * @outlen number of bytes in the RX SGL
1143 * @return 0 on success, < 0 upon error
1144 */
1145int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
1146 struct af_alg_async_req *areq, size_t maxsize,
1147 size_t *outlen)
1148{
1149 struct alg_sock *ask = alg_sk(sk);
1150 struct af_alg_ctx *ctx = ask->private;
1151 size_t len = 0;
1152
1153 while (maxsize > len && msg_data_left(msg)) {
1154 struct af_alg_rsgl *rsgl;
1155 size_t seglen;
1156 int err;
1157
1158 /* limit the amount of readable buffers */
1159 if (!af_alg_readable(sk))
1160 break;
1161
1162 if (!ctx->used) {
1163 err = af_alg_wait_for_data(sk, flags);
1164 if (err)
1165 return err;
1166 }
1167
1168 seglen = min_t(size_t, (maxsize - len),
1169 msg_data_left(msg));
1170
1171 if (list_empty(&areq->rsgl_list)) {
1172 rsgl = &areq->first_rsgl;
1173 } else {
1174 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
1175 if (unlikely(!rsgl))
1176 return -ENOMEM;
1177 }
1178
1179 rsgl->sgl.npages = 0;
1180 list_add_tail(&rsgl->list, &areq->rsgl_list);
1181
1182 /* make one iovec available as scatterlist */
1183 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
1184 if (err < 0)
1185 return err;
1186
1187 /* chain the new scatterlist with previous one */
1188 if (areq->last_rsgl)
1189 af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
1190
1191 areq->last_rsgl = rsgl;
1192 len += err;
1193 ctx->rcvused += err;
1194 rsgl->sg_num_bytes = err;
1195 iov_iter_advance(&msg->msg_iter, err);
1196 }
1197
1198 *outlen = len;
1199 return 0;
1200}
1201EXPORT_SYMBOL_GPL(af_alg_get_rsgl);
1202
510static int __init af_alg_init(void) 1203static int __init af_alg_init(void)
511{ 1204{
512 int err = proto_register(&alg_proto, 0); 1205 int err = proto_register(&alg_proto, 0);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 1f0696dd64f4..48d46e74ed0d 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -35,101 +35,23 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/list.h> 36#include <linux/list.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/sched/signal.h>
39#include <linux/mm.h> 38#include <linux/mm.h>
40#include <linux/module.h> 39#include <linux/module.h>
41#include <linux/net.h> 40#include <linux/net.h>
42#include <net/sock.h> 41#include <net/sock.h>
43 42
44struct aead_tsgl {
45 struct list_head list;
46 unsigned int cur; /* Last processed SG entry */
47 struct scatterlist sg[0]; /* Array of SGs forming the SGL */
48};
49
50struct aead_rsgl {
51 struct af_alg_sgl sgl;
52 struct list_head list;
53 size_t sg_num_bytes; /* Bytes of data in that SGL */
54};
55
56struct aead_async_req {
57 struct kiocb *iocb;
58 struct sock *sk;
59
60 struct aead_rsgl first_rsgl; /* First RX SG */
61 struct list_head rsgl_list; /* Track RX SGs */
62
63 struct scatterlist *tsgl; /* priv. TX SGL of buffers to process */
64 unsigned int tsgl_entries; /* number of entries in priv. TX SGL */
65
66 unsigned int outlen; /* Filled output buf length */
67
68 unsigned int areqlen; /* Length of this data struct */
69 struct aead_request aead_req; /* req ctx trails this struct */
70};
71
72struct aead_tfm { 43struct aead_tfm {
73 struct crypto_aead *aead; 44 struct crypto_aead *aead;
74 bool has_key; 45 bool has_key;
75 struct crypto_skcipher *null_tfm; 46 struct crypto_skcipher *null_tfm;
76}; 47};
77 48
78struct aead_ctx {
79 struct list_head tsgl_list; /* Link to TX SGL */
80
81 void *iv;
82 size_t aead_assoclen;
83
84 struct af_alg_completion completion; /* sync work queue */
85
86 size_t used; /* TX bytes sent to kernel */
87 size_t rcvused; /* total RX bytes to be processed by kernel */
88
89 bool more; /* More data to be expected? */
90 bool merge; /* Merge new data into existing SG */
91 bool enc; /* Crypto operation: enc, dec */
92
93 unsigned int len; /* Length of allocated memory for this struct */
94};
95
96#define MAX_SGL_ENTS ((4096 - sizeof(struct aead_tsgl)) / \
97 sizeof(struct scatterlist) - 1)
98
99static inline int aead_sndbuf(struct sock *sk)
100{
101 struct alg_sock *ask = alg_sk(sk);
102 struct aead_ctx *ctx = ask->private;
103
104 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
105 ctx->used, 0);
106}
107
108static inline bool aead_writable(struct sock *sk)
109{
110 return PAGE_SIZE <= aead_sndbuf(sk);
111}
112
113static inline int aead_rcvbuf(struct sock *sk)
114{
115 struct alg_sock *ask = alg_sk(sk);
116 struct aead_ctx *ctx = ask->private;
117
118 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
119 ctx->rcvused, 0);
120}
121
122static inline bool aead_readable(struct sock *sk)
123{
124 return PAGE_SIZE <= aead_rcvbuf(sk);
125}
126
127static inline bool aead_sufficient_data(struct sock *sk) 49static inline bool aead_sufficient_data(struct sock *sk)
128{ 50{
129 struct alg_sock *ask = alg_sk(sk); 51 struct alg_sock *ask = alg_sk(sk);
130 struct sock *psk = ask->parent; 52 struct sock *psk = ask->parent;
131 struct alg_sock *pask = alg_sk(psk); 53 struct alg_sock *pask = alg_sk(psk);
132 struct aead_ctx *ctx = ask->private; 54 struct af_alg_ctx *ctx = ask->private;
133 struct aead_tfm *aeadc = pask->private; 55 struct aead_tfm *aeadc = pask->private;
134 struct crypto_aead *tfm = aeadc->aead; 56 struct crypto_aead *tfm = aeadc->aead;
135 unsigned int as = crypto_aead_authsize(tfm); 57 unsigned int as = crypto_aead_authsize(tfm);
@@ -141,490 +63,17 @@ static inline bool aead_sufficient_data(struct sock *sk)
141 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); 63 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
142} 64}
143 65
144static int aead_alloc_tsgl(struct sock *sk)
145{
146 struct alg_sock *ask = alg_sk(sk);
147 struct aead_ctx *ctx = ask->private;
148 struct aead_tsgl *sgl;
149 struct scatterlist *sg = NULL;
150
151 sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list);
152 if (!list_empty(&ctx->tsgl_list))
153 sg = sgl->sg;
154
155 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
156 sgl = sock_kmalloc(sk, sizeof(*sgl) +
157 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
158 GFP_KERNEL);
159 if (!sgl)
160 return -ENOMEM;
161
162 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
163 sgl->cur = 0;
164
165 if (sg)
166 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
167
168 list_add_tail(&sgl->list, &ctx->tsgl_list);
169 }
170
171 return 0;
172}
173
174/**
175 * Count number of SG entries from the beginning of the SGL to @bytes. If
176 * an offset is provided, the counting of the SG entries starts at the offset.
177 */
178static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes,
179 size_t offset)
180{
181 struct alg_sock *ask = alg_sk(sk);
182 struct aead_ctx *ctx = ask->private;
183 struct aead_tsgl *sgl, *tmp;
184 unsigned int i;
185 unsigned int sgl_count = 0;
186
187 if (!bytes)
188 return 0;
189
190 list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
191 struct scatterlist *sg = sgl->sg;
192
193 for (i = 0; i < sgl->cur; i++) {
194 size_t bytes_count;
195
196 /* Skip offset */
197 if (offset >= sg[i].length) {
198 offset -= sg[i].length;
199 bytes -= sg[i].length;
200 continue;
201 }
202
203 bytes_count = sg[i].length - offset;
204
205 offset = 0;
206 sgl_count++;
207
208 /* If we have seen requested number of bytes, stop */
209 if (bytes_count >= bytes)
210 return sgl_count;
211
212 bytes -= bytes_count;
213 }
214 }
215
216 return sgl_count;
217}
218
219/**
220 * Release the specified buffers from TX SGL pointed to by ctx->tsgl_list for
221 * @used bytes.
222 *
223 * If @dst is non-null, reassign the pages to dst. The caller must release
224 * the pages. If @dst_offset is given only reassign the pages to @dst starting
225 * at the @dst_offset (byte). The caller must ensure that @dst is large
226 * enough (e.g. by using aead_count_tsgl with the same offset).
227 */
228static void aead_pull_tsgl(struct sock *sk, size_t used,
229 struct scatterlist *dst, size_t dst_offset)
230{
231 struct alg_sock *ask = alg_sk(sk);
232 struct aead_ctx *ctx = ask->private;
233 struct aead_tsgl *sgl;
234 struct scatterlist *sg;
235 unsigned int i, j;
236
237 while (!list_empty(&ctx->tsgl_list)) {
238 sgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl,
239 list);
240 sg = sgl->sg;
241
242 for (i = 0, j = 0; i < sgl->cur; i++) {
243 size_t plen = min_t(size_t, used, sg[i].length);
244 struct page *page = sg_page(sg + i);
245
246 if (!page)
247 continue;
248
249 /*
250 * Assumption: caller created aead_count_tsgl(len)
251 * SG entries in dst.
252 */
253 if (dst) {
254 if (dst_offset >= plen) {
255 /* discard page before offset */
256 dst_offset -= plen;
257 put_page(page);
258 } else {
259 /* reassign page to dst after offset */
260 sg_set_page(dst + j, page,
261 plen - dst_offset,
262 sg[i].offset + dst_offset);
263 dst_offset = 0;
264 j++;
265 }
266 }
267
268 sg[i].length -= plen;
269 sg[i].offset += plen;
270
271 used -= plen;
272 ctx->used -= plen;
273
274 if (sg[i].length)
275 return;
276
277 if (!dst)
278 put_page(page);
279
280 sg_assign_page(sg + i, NULL);
281 }
282
283 list_del(&sgl->list);
284 sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
285 (MAX_SGL_ENTS + 1));
286 }
287
288 if (!ctx->used)
289 ctx->merge = 0;
290}
291
292static void aead_free_areq_sgls(struct aead_async_req *areq)
293{
294 struct sock *sk = areq->sk;
295 struct alg_sock *ask = alg_sk(sk);
296 struct aead_ctx *ctx = ask->private;
297 struct aead_rsgl *rsgl, *tmp;
298 struct scatterlist *tsgl;
299 struct scatterlist *sg;
300 unsigned int i;
301
302 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
303 ctx->rcvused -= rsgl->sg_num_bytes;
304 af_alg_free_sg(&rsgl->sgl);
305 list_del(&rsgl->list);
306 if (rsgl != &areq->first_rsgl)
307 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
308 }
309
310 tsgl = areq->tsgl;
311 for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
312 if (!sg_page(sg))
313 continue;
314 put_page(sg_page(sg));
315 }
316
317 if (areq->tsgl && areq->tsgl_entries)
318 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
319}
320
321static int aead_wait_for_wmem(struct sock *sk, unsigned int flags)
322{
323 DEFINE_WAIT_FUNC(wait, woken_wake_function);
324 int err = -ERESTARTSYS;
325 long timeout;
326
327 if (flags & MSG_DONTWAIT)
328 return -EAGAIN;
329
330 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
331
332 add_wait_queue(sk_sleep(sk), &wait);
333 for (;;) {
334 if (signal_pending(current))
335 break;
336 timeout = MAX_SCHEDULE_TIMEOUT;
337 if (sk_wait_event(sk, &timeout, aead_writable(sk), &wait)) {
338 err = 0;
339 break;
340 }
341 }
342 remove_wait_queue(sk_sleep(sk), &wait);
343
344 return err;
345}
346
347static void aead_wmem_wakeup(struct sock *sk)
348{
349 struct socket_wq *wq;
350
351 if (!aead_writable(sk))
352 return;
353
354 rcu_read_lock();
355 wq = rcu_dereference(sk->sk_wq);
356 if (skwq_has_sleeper(wq))
357 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
358 POLLRDNORM |
359 POLLRDBAND);
360 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
361 rcu_read_unlock();
362}
363
364static int aead_wait_for_data(struct sock *sk, unsigned flags)
365{
366 DEFINE_WAIT_FUNC(wait, woken_wake_function);
367 struct alg_sock *ask = alg_sk(sk);
368 struct aead_ctx *ctx = ask->private;
369 long timeout;
370 int err = -ERESTARTSYS;
371
372 if (flags & MSG_DONTWAIT)
373 return -EAGAIN;
374
375 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
376
377 add_wait_queue(sk_sleep(sk), &wait);
378 for (;;) {
379 if (signal_pending(current))
380 break;
381 timeout = MAX_SCHEDULE_TIMEOUT;
382 if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
383 err = 0;
384 break;
385 }
386 }
387 remove_wait_queue(sk_sleep(sk), &wait);
388
389 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
390
391 return err;
392}
393
394static void aead_data_wakeup(struct sock *sk)
395{
396 struct alg_sock *ask = alg_sk(sk);
397 struct aead_ctx *ctx = ask->private;
398 struct socket_wq *wq;
399
400 if (!ctx->used)
401 return;
402
403 rcu_read_lock();
404 wq = rcu_dereference(sk->sk_wq);
405 if (skwq_has_sleeper(wq))
406 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
407 POLLRDNORM |
408 POLLRDBAND);
409 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
410 rcu_read_unlock();
411}
412
413static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 66static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
414{ 67{
415 struct sock *sk = sock->sk; 68 struct sock *sk = sock->sk;
416 struct alg_sock *ask = alg_sk(sk); 69 struct alg_sock *ask = alg_sk(sk);
417 struct sock *psk = ask->parent; 70 struct sock *psk = ask->parent;
418 struct alg_sock *pask = alg_sk(psk); 71 struct alg_sock *pask = alg_sk(psk);
419 struct aead_ctx *ctx = ask->private;
420 struct aead_tfm *aeadc = pask->private; 72 struct aead_tfm *aeadc = pask->private;
421 struct crypto_aead *tfm = aeadc->aead; 73 struct crypto_aead *tfm = aeadc->aead;
422 unsigned int ivsize = crypto_aead_ivsize(tfm); 74 unsigned int ivsize = crypto_aead_ivsize(tfm);
423 struct aead_tsgl *sgl;
424 struct af_alg_control con = {};
425 long copied = 0;
426 bool enc = 0;
427 bool init = 0;
428 int err = 0;
429
430 if (msg->msg_controllen) {
431 err = af_alg_cmsg_send(msg, &con);
432 if (err)
433 return err;
434
435 init = 1;
436 switch (con.op) {
437 case ALG_OP_ENCRYPT:
438 enc = 1;
439 break;
440 case ALG_OP_DECRYPT:
441 enc = 0;
442 break;
443 default:
444 return -EINVAL;
445 }
446
447 if (con.iv && con.iv->ivlen != ivsize)
448 return -EINVAL;
449 }
450
451 lock_sock(sk);
452 if (!ctx->more && ctx->used) {
453 err = -EINVAL;
454 goto unlock;
455 }
456
457 if (init) {
458 ctx->enc = enc;
459 if (con.iv)
460 memcpy(ctx->iv, con.iv->iv, ivsize);
461
462 ctx->aead_assoclen = con.aead_assoclen;
463 }
464
465 while (size) {
466 struct scatterlist *sg;
467 size_t len = size;
468 size_t plen;
469
470 /* use the existing memory in an allocated page */
471 if (ctx->merge) {
472 sgl = list_entry(ctx->tsgl_list.prev,
473 struct aead_tsgl, list);
474 sg = sgl->sg + sgl->cur - 1;
475 len = min_t(unsigned long, len,
476 PAGE_SIZE - sg->offset - sg->length);
477 err = memcpy_from_msg(page_address(sg_page(sg)) +
478 sg->offset + sg->length,
479 msg, len);
480 if (err)
481 goto unlock;
482
483 sg->length += len;
484 ctx->merge = (sg->offset + sg->length) &
485 (PAGE_SIZE - 1);
486
487 ctx->used += len;
488 copied += len;
489 size -= len;
490 continue;
491 }
492
493 if (!aead_writable(sk)) {
494 err = aead_wait_for_wmem(sk, msg->msg_flags);
495 if (err)
496 goto unlock;
497 }
498
499 /* allocate a new page */
500 len = min_t(unsigned long, size, aead_sndbuf(sk));
501
502 err = aead_alloc_tsgl(sk);
503 if (err)
504 goto unlock;
505
506 sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl,
507 list);
508 sg = sgl->sg;
509 if (sgl->cur)
510 sg_unmark_end(sg + sgl->cur - 1);
511
512 do {
513 unsigned int i = sgl->cur;
514
515 plen = min_t(size_t, len, PAGE_SIZE);
516
517 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
518 if (!sg_page(sg + i)) {
519 err = -ENOMEM;
520 goto unlock;
521 }
522
523 err = memcpy_from_msg(page_address(sg_page(sg + i)),
524 msg, plen);
525 if (err) {
526 __free_page(sg_page(sg + i));
527 sg_assign_page(sg + i, NULL);
528 goto unlock;
529 }
530
531 sg[i].length = plen;
532 len -= plen;
533 ctx->used += plen;
534 copied += plen;
535 size -= plen;
536 sgl->cur++;
537 } while (len && sgl->cur < MAX_SGL_ENTS);
538
539 if (!size)
540 sg_mark_end(sg + sgl->cur - 1);
541
542 ctx->merge = plen & (PAGE_SIZE - 1);
543 }
544
545 err = 0;
546
547 ctx->more = msg->msg_flags & MSG_MORE;
548
549unlock:
550 aead_data_wakeup(sk);
551 release_sock(sk);
552
553 return err ?: copied;
554}
555
556static ssize_t aead_sendpage(struct socket *sock, struct page *page,
557 int offset, size_t size, int flags)
558{
559 struct sock *sk = sock->sk;
560 struct alg_sock *ask = alg_sk(sk);
561 struct aead_ctx *ctx = ask->private;
562 struct aead_tsgl *sgl;
563 int err = -EINVAL;
564
565 if (flags & MSG_SENDPAGE_NOTLAST)
566 flags |= MSG_MORE;
567
568 lock_sock(sk);
569 if (!ctx->more && ctx->used)
570 goto unlock;
571
572 if (!size)
573 goto done;
574
575 if (!aead_writable(sk)) {
576 err = aead_wait_for_wmem(sk, flags);
577 if (err)
578 goto unlock;
579 }
580
581 err = aead_alloc_tsgl(sk);
582 if (err)
583 goto unlock;
584
585 ctx->merge = 0;
586 sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list);
587
588 if (sgl->cur)
589 sg_unmark_end(sgl->sg + sgl->cur - 1);
590
591 sg_mark_end(sgl->sg + sgl->cur);
592
593 get_page(page);
594 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
595 sgl->cur++;
596 ctx->used += size;
597
598 err = 0;
599 75
600done: 76 return af_alg_sendmsg(sock, msg, size, ivsize);
601 ctx->more = flags & MSG_MORE;
602unlock:
603 aead_data_wakeup(sk);
604 release_sock(sk);
605
606 return err ?: size;
607}
608
609static void aead_async_cb(struct crypto_async_request *_req, int err)
610{
611 struct aead_async_req *areq = _req->data;
612 struct sock *sk = areq->sk;
613 struct kiocb *iocb = areq->iocb;
614 unsigned int resultlen;
615
616 lock_sock(sk);
617
618 /* Buffer size written by crypto operation. */
619 resultlen = areq->outlen;
620
621 aead_free_areq_sgls(areq);
622 sock_kfree_s(sk, areq, areq->areqlen);
623 __sock_put(sk);
624
625 iocb->ki_complete(iocb, err ? err : resultlen, 0);
626
627 release_sock(sk);
628} 77}
629 78
630static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm, 79static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
@@ -648,16 +97,13 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
648 struct alg_sock *ask = alg_sk(sk); 97 struct alg_sock *ask = alg_sk(sk);
649 struct sock *psk = ask->parent; 98 struct sock *psk = ask->parent;
650 struct alg_sock *pask = alg_sk(psk); 99 struct alg_sock *pask = alg_sk(psk);
651 struct aead_ctx *ctx = ask->private; 100 struct af_alg_ctx *ctx = ask->private;
652 struct aead_tfm *aeadc = pask->private; 101 struct aead_tfm *aeadc = pask->private;
653 struct crypto_aead *tfm = aeadc->aead; 102 struct crypto_aead *tfm = aeadc->aead;
654 struct crypto_skcipher *null_tfm = aeadc->null_tfm; 103 struct crypto_skcipher *null_tfm = aeadc->null_tfm;
655 unsigned int as = crypto_aead_authsize(tfm); 104 unsigned int as = crypto_aead_authsize(tfm);
656 unsigned int areqlen = 105 struct af_alg_async_req *areq;
657 sizeof(struct aead_async_req) + crypto_aead_reqsize(tfm); 106 struct af_alg_tsgl *tsgl;
658 struct aead_async_req *areq;
659 struct aead_rsgl *last_rsgl = NULL;
660 struct aead_tsgl *tsgl;
661 struct scatterlist *src; 107 struct scatterlist *src;
662 int err = 0; 108 int err = 0;
663 size_t used = 0; /* [in] TX bufs to be en/decrypted */ 109 size_t used = 0; /* [in] TX bufs to be en/decrypted */
@@ -703,61 +149,15 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
703 used -= ctx->aead_assoclen; 149 used -= ctx->aead_assoclen;
704 150
705 /* Allocate cipher request for current operation. */ 151 /* Allocate cipher request for current operation. */
706 areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); 152 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
707 if (unlikely(!areq)) 153 crypto_aead_reqsize(tfm));
708 return -ENOMEM; 154 if (IS_ERR(areq))
709 areq->areqlen = areqlen; 155 return PTR_ERR(areq);
710 areq->sk = sk;
711 INIT_LIST_HEAD(&areq->rsgl_list);
712 areq->tsgl = NULL;
713 areq->tsgl_entries = 0;
714 156
715 /* convert iovecs of output buffers into RX SGL */ 157 /* convert iovecs of output buffers into RX SGL */
716 while (outlen > usedpages && msg_data_left(msg)) { 158 err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
717 struct aead_rsgl *rsgl; 159 if (err)
718 size_t seglen; 160 goto free;
719
720 /* limit the amount of readable buffers */
721 if (!aead_readable(sk))
722 break;
723
724 if (!ctx->used) {
725 err = aead_wait_for_data(sk, flags);
726 if (err)
727 goto free;
728 }
729
730 seglen = min_t(size_t, (outlen - usedpages),
731 msg_data_left(msg));
732
733 if (list_empty(&areq->rsgl_list)) {
734 rsgl = &areq->first_rsgl;
735 } else {
736 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
737 if (unlikely(!rsgl)) {
738 err = -ENOMEM;
739 goto free;
740 }
741 }
742
743 rsgl->sgl.npages = 0;
744 list_add_tail(&rsgl->list, &areq->rsgl_list);
745
746 /* make one iovec available as scatterlist */
747 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
748 if (err < 0)
749 goto free;
750
751 /* chain the new scatterlist with previous one */
752 if (last_rsgl)
753 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
754
755 last_rsgl = rsgl;
756 usedpages += err;
757 ctx->rcvused += err;
758 rsgl->sg_num_bytes = err;
759 iov_iter_advance(&msg->msg_iter, err);
760 }
761 161
762 /* 162 /*
763 * Ensure output buffer is sufficiently large. If the caller provides 163 * Ensure output buffer is sufficiently large. If the caller provides
@@ -778,7 +178,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
778 } 178 }
779 179
780 processed = used + ctx->aead_assoclen; 180 processed = used + ctx->aead_assoclen;
781 tsgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl, list); 181 tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
782 182
783 /* 183 /*
784 * Copy of AAD from source to destination 184 * Copy of AAD from source to destination
@@ -811,7 +211,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
811 areq->first_rsgl.sgl.sg, processed); 211 areq->first_rsgl.sgl.sg, processed);
812 if (err) 212 if (err)
813 goto free; 213 goto free;
814 aead_pull_tsgl(sk, processed, NULL, 0); 214 af_alg_pull_tsgl(sk, processed, NULL, 0);
815 } else { 215 } else {
816 /* 216 /*
817 * Decryption operation - To achieve an in-place cipher 217 * Decryption operation - To achieve an in-place cipher
@@ -831,8 +231,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
831 goto free; 231 goto free;
832 232
833 /* Create TX SGL for tag and chain it to RX SGL. */ 233 /* Create TX SGL for tag and chain it to RX SGL. */
834 areq->tsgl_entries = aead_count_tsgl(sk, processed, 234 areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
835 processed - as); 235 processed - as);
836 if (!areq->tsgl_entries) 236 if (!areq->tsgl_entries)
837 areq->tsgl_entries = 1; 237 areq->tsgl_entries = 1;
838 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * 238 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) *
@@ -845,12 +245,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
845 sg_init_table(areq->tsgl, areq->tsgl_entries); 245 sg_init_table(areq->tsgl, areq->tsgl_entries);
846 246
847 /* Release TX SGL, except for tag data and reassign tag data. */ 247 /* Release TX SGL, except for tag data and reassign tag data. */
848 aead_pull_tsgl(sk, processed, areq->tsgl, processed - as); 248 af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
849 249
850 /* chain the areq TX SGL holding the tag with RX SGL */ 250 /* chain the areq TX SGL holding the tag with RX SGL */
851 if (last_rsgl) { 251 if (usedpages) {
852 /* RX SGL present */ 252 /* RX SGL present */
853 struct af_alg_sgl *sgl_prev = &last_rsgl->sgl; 253 struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
854 254
855 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); 255 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
856 sg_chain(sgl_prev->sg, sgl_prev->npages + 1, 256 sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
@@ -861,28 +261,28 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
861 } 261 }
862 262
863 /* Initialize the crypto operation */ 263 /* Initialize the crypto operation */
864 aead_request_set_crypt(&areq->aead_req, src, 264 aead_request_set_crypt(&areq->cra_u.aead_req, src,
865 areq->first_rsgl.sgl.sg, used, ctx->iv); 265 areq->first_rsgl.sgl.sg, used, ctx->iv);
866 aead_request_set_ad(&areq->aead_req, ctx->aead_assoclen); 266 aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
867 aead_request_set_tfm(&areq->aead_req, tfm); 267 aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
868 268
869 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { 269 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
870 /* AIO operation */ 270 /* AIO operation */
871 areq->iocb = msg->msg_iocb; 271 areq->iocb = msg->msg_iocb;
872 aead_request_set_callback(&areq->aead_req, 272 aead_request_set_callback(&areq->cra_u.aead_req,
873 CRYPTO_TFM_REQ_MAY_BACKLOG, 273 CRYPTO_TFM_REQ_MAY_BACKLOG,
874 aead_async_cb, areq); 274 af_alg_async_cb, areq);
875 err = ctx->enc ? crypto_aead_encrypt(&areq->aead_req) : 275 err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
876 crypto_aead_decrypt(&areq->aead_req); 276 crypto_aead_decrypt(&areq->cra_u.aead_req);
877 } else { 277 } else {
878 /* Synchronous operation */ 278 /* Synchronous operation */
879 aead_request_set_callback(&areq->aead_req, 279 aead_request_set_callback(&areq->cra_u.aead_req,
880 CRYPTO_TFM_REQ_MAY_BACKLOG, 280 CRYPTO_TFM_REQ_MAY_BACKLOG,
881 af_alg_complete, &ctx->completion); 281 af_alg_complete, &ctx->completion);
882 err = af_alg_wait_for_completion(ctx->enc ? 282 err = af_alg_wait_for_completion(ctx->enc ?
883 crypto_aead_encrypt(&areq->aead_req) : 283 crypto_aead_encrypt(&areq->cra_u.aead_req) :
884 crypto_aead_decrypt(&areq->aead_req), 284 crypto_aead_decrypt(&areq->cra_u.aead_req),
885 &ctx->completion); 285 &ctx->completion);
886 } 286 }
887 287
888 /* AIO operation in progress */ 288 /* AIO operation in progress */
@@ -896,9 +296,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
896 } 296 }
897 297
898free: 298free:
899 aead_free_areq_sgls(areq); 299 af_alg_free_areq_sgls(areq);
900 if (areq) 300 sock_kfree_s(sk, areq, areq->areqlen);
901 sock_kfree_s(sk, areq, areqlen);
902 301
903 return err ? err : outlen; 302 return err ? err : outlen;
904} 303}
@@ -931,31 +330,11 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
931 } 330 }
932 331
933out: 332out:
934 aead_wmem_wakeup(sk); 333 af_alg_wmem_wakeup(sk);
935 release_sock(sk); 334 release_sock(sk);
936 return ret; 335 return ret;
937} 336}
938 337
939static unsigned int aead_poll(struct file *file, struct socket *sock,
940 poll_table *wait)
941{
942 struct sock *sk = sock->sk;
943 struct alg_sock *ask = alg_sk(sk);
944 struct aead_ctx *ctx = ask->private;
945 unsigned int mask;
946
947 sock_poll_wait(file, sk_sleep(sk), wait);
948 mask = 0;
949
950 if (!ctx->more)
951 mask |= POLLIN | POLLRDNORM;
952
953 if (aead_writable(sk))
954 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
955
956 return mask;
957}
958
959static struct proto_ops algif_aead_ops = { 338static struct proto_ops algif_aead_ops = {
960 .family = PF_ALG, 339 .family = PF_ALG,
961 340
@@ -973,9 +352,9 @@ static struct proto_ops algif_aead_ops = {
973 352
974 .release = af_alg_release, 353 .release = af_alg_release,
975 .sendmsg = aead_sendmsg, 354 .sendmsg = aead_sendmsg,
976 .sendpage = aead_sendpage, 355 .sendpage = af_alg_sendpage,
977 .recvmsg = aead_recvmsg, 356 .recvmsg = aead_recvmsg,
978 .poll = aead_poll, 357 .poll = af_alg_poll,
979}; 358};
980 359
981static int aead_check_key(struct socket *sock) 360static int aead_check_key(struct socket *sock)
@@ -1037,7 +416,7 @@ static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
1037 if (err) 416 if (err)
1038 return err; 417 return err;
1039 418
1040 return aead_sendpage(sock, page, offset, size, flags); 419 return af_alg_sendpage(sock, page, offset, size, flags);
1041} 420}
1042 421
1043static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 422static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
@@ -1071,7 +450,7 @@ static struct proto_ops algif_aead_ops_nokey = {
1071 .sendmsg = aead_sendmsg_nokey, 450 .sendmsg = aead_sendmsg_nokey,
1072 .sendpage = aead_sendpage_nokey, 451 .sendpage = aead_sendpage_nokey,
1073 .recvmsg = aead_recvmsg_nokey, 452 .recvmsg = aead_recvmsg_nokey,
1074 .poll = aead_poll, 453 .poll = af_alg_poll,
1075}; 454};
1076 455
1077static void *aead_bind(const char *name, u32 type, u32 mask) 456static void *aead_bind(const char *name, u32 type, u32 mask)
@@ -1132,14 +511,14 @@ static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
1132static void aead_sock_destruct(struct sock *sk) 511static void aead_sock_destruct(struct sock *sk)
1133{ 512{
1134 struct alg_sock *ask = alg_sk(sk); 513 struct alg_sock *ask = alg_sk(sk);
1135 struct aead_ctx *ctx = ask->private; 514 struct af_alg_ctx *ctx = ask->private;
1136 struct sock *psk = ask->parent; 515 struct sock *psk = ask->parent;
1137 struct alg_sock *pask = alg_sk(psk); 516 struct alg_sock *pask = alg_sk(psk);
1138 struct aead_tfm *aeadc = pask->private; 517 struct aead_tfm *aeadc = pask->private;
1139 struct crypto_aead *tfm = aeadc->aead; 518 struct crypto_aead *tfm = aeadc->aead;
1140 unsigned int ivlen = crypto_aead_ivsize(tfm); 519 unsigned int ivlen = crypto_aead_ivsize(tfm);
1141 520
1142 aead_pull_tsgl(sk, ctx->used, NULL, 0); 521 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
1143 crypto_put_default_null_skcipher2(); 522 crypto_put_default_null_skcipher2();
1144 sock_kzfree_s(sk, ctx->iv, ivlen); 523 sock_kzfree_s(sk, ctx->iv, ivlen);
1145 sock_kfree_s(sk, ctx, ctx->len); 524 sock_kfree_s(sk, ctx, ctx->len);
@@ -1148,7 +527,7 @@ static void aead_sock_destruct(struct sock *sk)
1148 527
1149static int aead_accept_parent_nokey(void *private, struct sock *sk) 528static int aead_accept_parent_nokey(void *private, struct sock *sk)
1150{ 529{
1151 struct aead_ctx *ctx; 530 struct af_alg_ctx *ctx;
1152 struct alg_sock *ask = alg_sk(sk); 531 struct alg_sock *ask = alg_sk(sk);
1153 struct aead_tfm *tfm = private; 532 struct aead_tfm *tfm = private;
1154 struct crypto_aead *aead = tfm->aead; 533 struct crypto_aead *aead = tfm->aead;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index ce3b5fba2279..8ae4170aaeb4 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -33,320 +33,16 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/sched/signal.h>
37#include <linux/mm.h> 36#include <linux/mm.h>
38#include <linux/module.h> 37#include <linux/module.h>
39#include <linux/net.h> 38#include <linux/net.h>
40#include <net/sock.h> 39#include <net/sock.h>
41 40
42struct skcipher_tsgl {
43 struct list_head list;
44 int cur;
45 struct scatterlist sg[0];
46};
47
48struct skcipher_rsgl {
49 struct af_alg_sgl sgl;
50 struct list_head list;
51 size_t sg_num_bytes;
52};
53
54struct skcipher_async_req {
55 struct kiocb *iocb;
56 struct sock *sk;
57
58 struct skcipher_rsgl first_sgl;
59 struct list_head rsgl_list;
60
61 struct scatterlist *tsgl;
62 unsigned int tsgl_entries;
63
64 unsigned int areqlen;
65 struct skcipher_request req;
66};
67
68struct skcipher_tfm { 41struct skcipher_tfm {
69 struct crypto_skcipher *skcipher; 42 struct crypto_skcipher *skcipher;
70 bool has_key; 43 bool has_key;
71}; 44};
72 45
73struct skcipher_ctx {
74 struct list_head tsgl_list;
75
76 void *iv;
77
78 struct af_alg_completion completion;
79
80 size_t used;
81 size_t rcvused;
82
83 bool more;
84 bool merge;
85 bool enc;
86
87 unsigned int len;
88};
89
90#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_tsgl)) / \
91 sizeof(struct scatterlist) - 1)
92
93static inline int skcipher_sndbuf(struct sock *sk)
94{
95 struct alg_sock *ask = alg_sk(sk);
96 struct skcipher_ctx *ctx = ask->private;
97
98 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
99 ctx->used, 0);
100}
101
102static inline bool skcipher_writable(struct sock *sk)
103{
104 return PAGE_SIZE <= skcipher_sndbuf(sk);
105}
106
107static inline int skcipher_rcvbuf(struct sock *sk)
108{
109 struct alg_sock *ask = alg_sk(sk);
110 struct skcipher_ctx *ctx = ask->private;
111
112 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
113 ctx->rcvused, 0);
114}
115
116static inline bool skcipher_readable(struct sock *sk)
117{
118 return PAGE_SIZE <= skcipher_rcvbuf(sk);
119}
120
121static int skcipher_alloc_tsgl(struct sock *sk)
122{
123 struct alg_sock *ask = alg_sk(sk);
124 struct skcipher_ctx *ctx = ask->private;
125 struct skcipher_tsgl *sgl;
126 struct scatterlist *sg = NULL;
127
128 sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
129 if (!list_empty(&ctx->tsgl_list))
130 sg = sgl->sg;
131
132 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
133 sgl = sock_kmalloc(sk, sizeof(*sgl) +
134 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
135 GFP_KERNEL);
136 if (!sgl)
137 return -ENOMEM;
138
139 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
140 sgl->cur = 0;
141
142 if (sg)
143 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
144
145 list_add_tail(&sgl->list, &ctx->tsgl_list);
146 }
147
148 return 0;
149}
150
151static unsigned int skcipher_count_tsgl(struct sock *sk, size_t bytes)
152{
153 struct alg_sock *ask = alg_sk(sk);
154 struct skcipher_ctx *ctx = ask->private;
155 struct skcipher_tsgl *sgl, *tmp;
156 unsigned int i;
157 unsigned int sgl_count = 0;
158
159 if (!bytes)
160 return 0;
161
162 list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
163 struct scatterlist *sg = sgl->sg;
164
165 for (i = 0; i < sgl->cur; i++) {
166 sgl_count++;
167 if (sg[i].length >= bytes)
168 return sgl_count;
169
170 bytes -= sg[i].length;
171 }
172 }
173
174 return sgl_count;
175}
176
177static void skcipher_pull_tsgl(struct sock *sk, size_t used,
178 struct scatterlist *dst)
179{
180 struct alg_sock *ask = alg_sk(sk);
181 struct skcipher_ctx *ctx = ask->private;
182 struct skcipher_tsgl *sgl;
183 struct scatterlist *sg;
184 unsigned int i;
185
186 while (!list_empty(&ctx->tsgl_list)) {
187 sgl = list_first_entry(&ctx->tsgl_list, struct skcipher_tsgl,
188 list);
189 sg = sgl->sg;
190
191 for (i = 0; i < sgl->cur; i++) {
192 size_t plen = min_t(size_t, used, sg[i].length);
193 struct page *page = sg_page(sg + i);
194
195 if (!page)
196 continue;
197
198 /*
199 * Assumption: caller created skcipher_count_tsgl(len)
200 * SG entries in dst.
201 */
202 if (dst)
203 sg_set_page(dst + i, page, plen, sg[i].offset);
204
205 sg[i].length -= plen;
206 sg[i].offset += plen;
207
208 used -= plen;
209 ctx->used -= plen;
210
211 if (sg[i].length)
212 return;
213
214 if (!dst)
215 put_page(page);
216 sg_assign_page(sg + i, NULL);
217 }
218
219 list_del(&sgl->list);
220 sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
221 (MAX_SGL_ENTS + 1));
222 }
223
224 if (!ctx->used)
225 ctx->merge = 0;
226}
227
228static void skcipher_free_areq_sgls(struct skcipher_async_req *areq)
229{
230 struct sock *sk = areq->sk;
231 struct alg_sock *ask = alg_sk(sk);
232 struct skcipher_ctx *ctx = ask->private;
233 struct skcipher_rsgl *rsgl, *tmp;
234 struct scatterlist *tsgl;
235 struct scatterlist *sg;
236 unsigned int i;
237
238 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
239 ctx->rcvused -= rsgl->sg_num_bytes;
240 af_alg_free_sg(&rsgl->sgl);
241 list_del(&rsgl->list);
242 if (rsgl != &areq->first_sgl)
243 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
244 }
245
246 tsgl = areq->tsgl;
247 for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
248 if (!sg_page(sg))
249 continue;
250 put_page(sg_page(sg));
251 }
252
253 if (areq->tsgl && areq->tsgl_entries)
254 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
255}
256
257static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
258{
259 DEFINE_WAIT_FUNC(wait, woken_wake_function);
260 int err = -ERESTARTSYS;
261 long timeout;
262
263 if (flags & MSG_DONTWAIT)
264 return -EAGAIN;
265
266 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
267
268 add_wait_queue(sk_sleep(sk), &wait);
269 for (;;) {
270 if (signal_pending(current))
271 break;
272 timeout = MAX_SCHEDULE_TIMEOUT;
273 if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) {
274 err = 0;
275 break;
276 }
277 }
278 remove_wait_queue(sk_sleep(sk), &wait);
279
280 return err;
281}
282
283static void skcipher_wmem_wakeup(struct sock *sk)
284{
285 struct socket_wq *wq;
286
287 if (!skcipher_writable(sk))
288 return;
289
290 rcu_read_lock();
291 wq = rcu_dereference(sk->sk_wq);
292 if (skwq_has_sleeper(wq))
293 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
294 POLLRDNORM |
295 POLLRDBAND);
296 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
297 rcu_read_unlock();
298}
299
300static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
301{
302 DEFINE_WAIT_FUNC(wait, woken_wake_function);
303 struct alg_sock *ask = alg_sk(sk);
304 struct skcipher_ctx *ctx = ask->private;
305 long timeout;
306 int err = -ERESTARTSYS;
307
308 if (flags & MSG_DONTWAIT) {
309 return -EAGAIN;
310 }
311
312 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
313
314 add_wait_queue(sk_sleep(sk), &wait);
315 for (;;) {
316 if (signal_pending(current))
317 break;
318 timeout = MAX_SCHEDULE_TIMEOUT;
319 if (sk_wait_event(sk, &timeout, ctx->used, &wait)) {
320 err = 0;
321 break;
322 }
323 }
324 remove_wait_queue(sk_sleep(sk), &wait);
325
326 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
327
328 return err;
329}
330
331static void skcipher_data_wakeup(struct sock *sk)
332{
333 struct alg_sock *ask = alg_sk(sk);
334 struct skcipher_ctx *ctx = ask->private;
335 struct socket_wq *wq;
336
337 if (!ctx->used)
338 return;
339
340 rcu_read_lock();
341 wq = rcu_dereference(sk->sk_wq);
342 if (skwq_has_sleeper(wq))
343 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
344 POLLRDNORM |
345 POLLRDBAND);
346 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
347 rcu_read_unlock();
348}
349
350static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, 46static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
351 size_t size) 47 size_t size)
352{ 48{
@@ -354,208 +50,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
354 struct alg_sock *ask = alg_sk(sk); 50 struct alg_sock *ask = alg_sk(sk);
355 struct sock *psk = ask->parent; 51 struct sock *psk = ask->parent;
356 struct alg_sock *pask = alg_sk(psk); 52 struct alg_sock *pask = alg_sk(psk);
357 struct skcipher_ctx *ctx = ask->private;
358 struct skcipher_tfm *skc = pask->private; 53 struct skcipher_tfm *skc = pask->private;
359 struct crypto_skcipher *tfm = skc->skcipher; 54 struct crypto_skcipher *tfm = skc->skcipher;
360 unsigned ivsize = crypto_skcipher_ivsize(tfm); 55 unsigned ivsize = crypto_skcipher_ivsize(tfm);
361 struct skcipher_tsgl *sgl;
362 struct af_alg_control con = {};
363 long copied = 0;
364 bool enc = 0;
365 bool init = 0;
366 int err;
367 int i;
368
369 if (msg->msg_controllen) {
370 err = af_alg_cmsg_send(msg, &con);
371 if (err)
372 return err;
373
374 init = 1;
375 switch (con.op) {
376 case ALG_OP_ENCRYPT:
377 enc = 1;
378 break;
379 case ALG_OP_DECRYPT:
380 enc = 0;
381 break;
382 default:
383 return -EINVAL;
384 }
385
386 if (con.iv && con.iv->ivlen != ivsize)
387 return -EINVAL;
388 }
389
390 err = -EINVAL;
391
392 lock_sock(sk);
393 if (!ctx->more && ctx->used)
394 goto unlock;
395
396 if (init) {
397 ctx->enc = enc;
398 if (con.iv)
399 memcpy(ctx->iv, con.iv->iv, ivsize);
400 }
401
402 while (size) {
403 struct scatterlist *sg;
404 unsigned long len = size;
405 size_t plen;
406
407 if (ctx->merge) {
408 sgl = list_entry(ctx->tsgl_list.prev,
409 struct skcipher_tsgl, list);
410 sg = sgl->sg + sgl->cur - 1;
411 len = min_t(unsigned long, len,
412 PAGE_SIZE - sg->offset - sg->length);
413
414 err = memcpy_from_msg(page_address(sg_page(sg)) +
415 sg->offset + sg->length,
416 msg, len);
417 if (err)
418 goto unlock;
419
420 sg->length += len;
421 ctx->merge = (sg->offset + sg->length) &
422 (PAGE_SIZE - 1);
423
424 ctx->used += len;
425 copied += len;
426 size -= len;
427 continue;
428 }
429
430 if (!skcipher_writable(sk)) {
431 err = skcipher_wait_for_wmem(sk, msg->msg_flags);
432 if (err)
433 goto unlock;
434 }
435
436 len = min_t(unsigned long, len, skcipher_sndbuf(sk));
437
438 err = skcipher_alloc_tsgl(sk);
439 if (err)
440 goto unlock;
441
442 sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl,
443 list);
444 sg = sgl->sg;
445 if (sgl->cur)
446 sg_unmark_end(sg + sgl->cur - 1);
447 do {
448 i = sgl->cur;
449 plen = min_t(size_t, len, PAGE_SIZE);
450
451 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
452 err = -ENOMEM;
453 if (!sg_page(sg + i))
454 goto unlock;
455
456 err = memcpy_from_msg(page_address(sg_page(sg + i)),
457 msg, plen);
458 if (err) {
459 __free_page(sg_page(sg + i));
460 sg_assign_page(sg + i, NULL);
461 goto unlock;
462 }
463
464 sg[i].length = plen;
465 len -= plen;
466 ctx->used += plen;
467 copied += plen;
468 size -= plen;
469 sgl->cur++;
470 } while (len && sgl->cur < MAX_SGL_ENTS);
471
472 if (!size)
473 sg_mark_end(sg + sgl->cur - 1);
474
475 ctx->merge = plen & (PAGE_SIZE - 1);
476 }
477
478 err = 0;
479
480 ctx->more = msg->msg_flags & MSG_MORE;
481
482unlock:
483 skcipher_data_wakeup(sk);
484 release_sock(sk);
485 56
486 return copied ?: err; 57 return af_alg_sendmsg(sock, msg, size, ivsize);
487}
488
489static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
490 int offset, size_t size, int flags)
491{
492 struct sock *sk = sock->sk;
493 struct alg_sock *ask = alg_sk(sk);
494 struct skcipher_ctx *ctx = ask->private;
495 struct skcipher_tsgl *sgl;
496 int err = -EINVAL;
497
498 if (flags & MSG_SENDPAGE_NOTLAST)
499 flags |= MSG_MORE;
500
501 lock_sock(sk);
502 if (!ctx->more && ctx->used)
503 goto unlock;
504
505 if (!size)
506 goto done;
507
508 if (!skcipher_writable(sk)) {
509 err = skcipher_wait_for_wmem(sk, flags);
510 if (err)
511 goto unlock;
512 }
513
514 err = skcipher_alloc_tsgl(sk);
515 if (err)
516 goto unlock;
517
518 ctx->merge = 0;
519 sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list);
520
521 if (sgl->cur)
522 sg_unmark_end(sgl->sg + sgl->cur - 1);
523
524 sg_mark_end(sgl->sg + sgl->cur);
525 get_page(page);
526 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
527 sgl->cur++;
528 ctx->used += size;
529
530done:
531 ctx->more = flags & MSG_MORE;
532
533unlock:
534 skcipher_data_wakeup(sk);
535 release_sock(sk);
536
537 return err ?: size;
538}
539
540static void skcipher_async_cb(struct crypto_async_request *req, int err)
541{
542 struct skcipher_async_req *areq = req->data;
543 struct sock *sk = areq->sk;
544 struct kiocb *iocb = areq->iocb;
545 unsigned int resultlen;
546
547 lock_sock(sk);
548
549 /* Buffer size written by crypto operation. */
550 resultlen = areq->req.cryptlen;
551
552 skcipher_free_areq_sgls(areq);
553 sock_kfree_s(sk, areq, areq->areqlen);
554 __sock_put(sk);
555
556 iocb->ki_complete(iocb, err ? err : resultlen, 0);
557
558 release_sock(sk);
559} 58}
560 59
561static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, 60static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
@@ -565,72 +64,24 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
565 struct alg_sock *ask = alg_sk(sk); 64 struct alg_sock *ask = alg_sk(sk);
566 struct sock *psk = ask->parent; 65 struct sock *psk = ask->parent;
567 struct alg_sock *pask = alg_sk(psk); 66 struct alg_sock *pask = alg_sk(psk);
568 struct skcipher_ctx *ctx = ask->private; 67 struct af_alg_ctx *ctx = ask->private;
569 struct skcipher_tfm *skc = pask->private; 68 struct skcipher_tfm *skc = pask->private;
570 struct crypto_skcipher *tfm = skc->skcipher; 69 struct crypto_skcipher *tfm = skc->skcipher;
571 unsigned int bs = crypto_skcipher_blocksize(tfm); 70 unsigned int bs = crypto_skcipher_blocksize(tfm);
572 unsigned int areqlen = sizeof(struct skcipher_async_req) + 71 struct af_alg_async_req *areq;
573 crypto_skcipher_reqsize(tfm);
574 struct skcipher_async_req *areq;
575 struct skcipher_rsgl *last_rsgl = NULL;
576 int err = 0; 72 int err = 0;
577 size_t len = 0; 73 size_t len = 0;
578 74
579 /* Allocate cipher request for current operation. */ 75 /* Allocate cipher request for current operation. */
580 areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); 76 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
581 if (unlikely(!areq)) 77 crypto_skcipher_reqsize(tfm));
582 return -ENOMEM; 78 if (IS_ERR(areq))
583 areq->areqlen = areqlen; 79 return PTR_ERR(areq);
584 areq->sk = sk;
585 INIT_LIST_HEAD(&areq->rsgl_list);
586 areq->tsgl = NULL;
587 areq->tsgl_entries = 0;
588 80
589 /* convert iovecs of output buffers into RX SGL */ 81 /* convert iovecs of output buffers into RX SGL */
590 while (msg_data_left(msg)) { 82 err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len);
591 struct skcipher_rsgl *rsgl; 83 if (err)
592 size_t seglen; 84 goto free;
593
594 /* limit the amount of readable buffers */
595 if (!skcipher_readable(sk))
596 break;
597
598 if (!ctx->used) {
599 err = skcipher_wait_for_data(sk, flags);
600 if (err)
601 goto free;
602 }
603
604 seglen = min_t(size_t, ctx->used, msg_data_left(msg));
605
606 if (list_empty(&areq->rsgl_list)) {
607 rsgl = &areq->first_sgl;
608 } else {
609 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
610 if (!rsgl) {
611 err = -ENOMEM;
612 goto free;
613 }
614 }
615
616 rsgl->sgl.npages = 0;
617 list_add_tail(&rsgl->list, &areq->rsgl_list);
618
619 /* make one iovec available as scatterlist */
620 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
621 if (err < 0)
622 goto free;
623
624 /* chain the new scatterlist with previous one */
625 if (last_rsgl)
626 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
627
628 last_rsgl = rsgl;
629 len += err;
630 ctx->rcvused += err;
631 rsgl->sg_num_bytes = err;
632 iov_iter_advance(&msg->msg_iter, err);
633 }
634 85
635 /* Process only as much RX buffers for which we have TX data */ 86 /* Process only as much RX buffers for which we have TX data */
636 if (len > ctx->used) 87 if (len > ctx->used)
@@ -647,7 +98,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
647 * Create a per request TX SGL for this request which tracks the 98 * Create a per request TX SGL for this request which tracks the
648 * SG entries from the global TX SGL. 99 * SG entries from the global TX SGL.
649 */ 100 */
650 areq->tsgl_entries = skcipher_count_tsgl(sk, len); 101 areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
651 if (!areq->tsgl_entries) 102 if (!areq->tsgl_entries)
652 areq->tsgl_entries = 1; 103 areq->tsgl_entries = 1;
653 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries, 104 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
@@ -657,44 +108,48 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
657 goto free; 108 goto free;
658 } 109 }
659 sg_init_table(areq->tsgl, areq->tsgl_entries); 110 sg_init_table(areq->tsgl, areq->tsgl_entries);
660 skcipher_pull_tsgl(sk, len, areq->tsgl); 111 af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
661 112
662 /* Initialize the crypto operation */ 113 /* Initialize the crypto operation */
663 skcipher_request_set_tfm(&areq->req, tfm); 114 skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
664 skcipher_request_set_crypt(&areq->req, areq->tsgl, 115 skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
665 areq->first_sgl.sgl.sg, len, ctx->iv); 116 areq->first_rsgl.sgl.sg, len, ctx->iv);
666 117
667 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { 118 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
668 /* AIO operation */ 119 /* AIO operation */
669 areq->iocb = msg->msg_iocb; 120 areq->iocb = msg->msg_iocb;
670 skcipher_request_set_callback(&areq->req, 121 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
671 CRYPTO_TFM_REQ_MAY_SLEEP, 122 CRYPTO_TFM_REQ_MAY_SLEEP,
672 skcipher_async_cb, areq); 123 af_alg_async_cb, areq);
673 err = ctx->enc ? crypto_skcipher_encrypt(&areq->req) : 124 err = ctx->enc ?
674 crypto_skcipher_decrypt(&areq->req); 125 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
126 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
675 } else { 127 } else {
676 /* Synchronous operation */ 128 /* Synchronous operation */
677 skcipher_request_set_callback(&areq->req, 129 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
678 CRYPTO_TFM_REQ_MAY_SLEEP | 130 CRYPTO_TFM_REQ_MAY_SLEEP |
679 CRYPTO_TFM_REQ_MAY_BACKLOG, 131 CRYPTO_TFM_REQ_MAY_BACKLOG,
680 af_alg_complete, 132 af_alg_complete,
681 &ctx->completion); 133 &ctx->completion);
682 err = af_alg_wait_for_completion(ctx->enc ? 134 err = af_alg_wait_for_completion(ctx->enc ?
683 crypto_skcipher_encrypt(&areq->req) : 135 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
684 crypto_skcipher_decrypt(&areq->req), 136 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
685 &ctx->completion); 137 &ctx->completion);
686 } 138 }
687 139
688 /* AIO operation in progress */ 140 /* AIO operation in progress */
689 if (err == -EINPROGRESS) { 141 if (err == -EINPROGRESS) {
690 sock_hold(sk); 142 sock_hold(sk);
143
144 /* Remember output size that will be generated. */
145 areq->outlen = len;
146
691 return -EIOCBQUEUED; 147 return -EIOCBQUEUED;
692 } 148 }
693 149
694free: 150free:
695 skcipher_free_areq_sgls(areq); 151 af_alg_free_areq_sgls(areq);
696 if (areq) 152 sock_kfree_s(sk, areq, areq->areqlen);
697 sock_kfree_s(sk, areq, areqlen);
698 153
699 return err ? err : len; 154 return err ? err : len;
700} 155}
@@ -727,30 +182,11 @@ static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
727 } 182 }
728 183
729out: 184out:
730 skcipher_wmem_wakeup(sk); 185 af_alg_wmem_wakeup(sk);
731 release_sock(sk); 186 release_sock(sk);
732 return ret; 187 return ret;
733} 188}
734 189
735static unsigned int skcipher_poll(struct file *file, struct socket *sock,
736 poll_table *wait)
737{
738 struct sock *sk = sock->sk;
739 struct alg_sock *ask = alg_sk(sk);
740 struct skcipher_ctx *ctx = ask->private;
741 unsigned int mask;
742
743 sock_poll_wait(file, sk_sleep(sk), wait);
744 mask = 0;
745
746 if (ctx->used)
747 mask |= POLLIN | POLLRDNORM;
748
749 if (skcipher_writable(sk))
750 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
751
752 return mask;
753}
754 190
755static struct proto_ops algif_skcipher_ops = { 191static struct proto_ops algif_skcipher_ops = {
756 .family = PF_ALG, 192 .family = PF_ALG,
@@ -769,9 +205,9 @@ static struct proto_ops algif_skcipher_ops = {
769 205
770 .release = af_alg_release, 206 .release = af_alg_release,
771 .sendmsg = skcipher_sendmsg, 207 .sendmsg = skcipher_sendmsg,
772 .sendpage = skcipher_sendpage, 208 .sendpage = af_alg_sendpage,
773 .recvmsg = skcipher_recvmsg, 209 .recvmsg = skcipher_recvmsg,
774 .poll = skcipher_poll, 210 .poll = af_alg_poll,
775}; 211};
776 212
777static int skcipher_check_key(struct socket *sock) 213static int skcipher_check_key(struct socket *sock)
@@ -833,7 +269,7 @@ static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
833 if (err) 269 if (err)
834 return err; 270 return err;
835 271
836 return skcipher_sendpage(sock, page, offset, size, flags); 272 return af_alg_sendpage(sock, page, offset, size, flags);
837} 273}
838 274
839static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 275static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
@@ -867,7 +303,7 @@ static struct proto_ops algif_skcipher_ops_nokey = {
867 .sendmsg = skcipher_sendmsg_nokey, 303 .sendmsg = skcipher_sendmsg_nokey,
868 .sendpage = skcipher_sendpage_nokey, 304 .sendpage = skcipher_sendpage_nokey,
869 .recvmsg = skcipher_recvmsg_nokey, 305 .recvmsg = skcipher_recvmsg_nokey,
870 .poll = skcipher_poll, 306 .poll = af_alg_poll,
871}; 307};
872 308
873static void *skcipher_bind(const char *name, u32 type, u32 mask) 309static void *skcipher_bind(const char *name, u32 type, u32 mask)
@@ -912,13 +348,13 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
912static void skcipher_sock_destruct(struct sock *sk) 348static void skcipher_sock_destruct(struct sock *sk)
913{ 349{
914 struct alg_sock *ask = alg_sk(sk); 350 struct alg_sock *ask = alg_sk(sk);
915 struct skcipher_ctx *ctx = ask->private; 351 struct af_alg_ctx *ctx = ask->private;
916 struct sock *psk = ask->parent; 352 struct sock *psk = ask->parent;
917 struct alg_sock *pask = alg_sk(psk); 353 struct alg_sock *pask = alg_sk(psk);
918 struct skcipher_tfm *skc = pask->private; 354 struct skcipher_tfm *skc = pask->private;
919 struct crypto_skcipher *tfm = skc->skcipher; 355 struct crypto_skcipher *tfm = skc->skcipher;
920 356
921 skcipher_pull_tsgl(sk, ctx->used, NULL); 357 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
922 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); 358 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
923 sock_kfree_s(sk, ctx, ctx->len); 359 sock_kfree_s(sk, ctx, ctx->len);
924 af_alg_release_parent(sk); 360 af_alg_release_parent(sk);
@@ -926,7 +362,7 @@ static void skcipher_sock_destruct(struct sock *sk)
926 362
927static int skcipher_accept_parent_nokey(void *private, struct sock *sk) 363static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
928{ 364{
929 struct skcipher_ctx *ctx; 365 struct af_alg_ctx *ctx;
930 struct alg_sock *ask = alg_sk(sk); 366 struct alg_sock *ask = alg_sk(sk);
931 struct skcipher_tfm *tfm = private; 367 struct skcipher_tfm *tfm = private;
932 struct crypto_skcipher *skcipher = tfm->skcipher; 368 struct crypto_skcipher *skcipher = tfm->skcipher;
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index e2b9c6fe2714..75ec9c662268 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -20,6 +20,9 @@
20#include <linux/types.h> 20#include <linux/types.h>
21#include <net/sock.h> 21#include <net/sock.h>
22 22
23#include <crypto/aead.h>
24#include <crypto/skcipher.h>
25
23#define ALG_MAX_PAGES 16 26#define ALG_MAX_PAGES 16
24 27
25struct crypto_async_request; 28struct crypto_async_request;
@@ -68,6 +71,99 @@ struct af_alg_sgl {
68 unsigned int npages; 71 unsigned int npages;
69}; 72};
70 73
74/* TX SGL entry */
75struct af_alg_tsgl {
76 struct list_head list;
77 unsigned int cur; /* Last processed SG entry */
78 struct scatterlist sg[0]; /* Array of SGs forming the SGL */
79};
80
81#define MAX_SGL_ENTS ((4096 - sizeof(struct af_alg_tsgl)) / \
82 sizeof(struct scatterlist) - 1)
83
84/* RX SGL entry */
85struct af_alg_rsgl {
86 struct af_alg_sgl sgl;
87 struct list_head list;
88 size_t sg_num_bytes; /* Bytes of data in that SGL */
89};
90
91/**
92 * struct af_alg_async_req - definition of crypto request
93 * @iocb: IOCB for AIO operations
94 * @sk: Socket the request is associated with
95 * @first_rsgl: First RX SG
96 * @last_rsgl: Pointer to last RX SG
97 * @rsgl_list: Track RX SGs
98 * @tsgl: Private, per request TX SGL of buffers to process
99 * @tsgl_entries: Number of entries in priv. TX SGL
100 * @outlen: Number of output bytes generated by crypto op
101 * @areqlen: Length of this data structure
102 * @cra_u: Cipher request
103 */
104struct af_alg_async_req {
105 struct kiocb *iocb;
106 struct sock *sk;
107
108 struct af_alg_rsgl first_rsgl;
109 struct af_alg_rsgl *last_rsgl;
110 struct list_head rsgl_list;
111
112 struct scatterlist *tsgl;
113 unsigned int tsgl_entries;
114
115 unsigned int outlen;
116 unsigned int areqlen;
117
118 union {
119 struct aead_request aead_req;
120 struct skcipher_request skcipher_req;
121 } cra_u;
122
123 /* req ctx trails this struct */
124};
125
126/**
127 * struct af_alg_ctx - definition of the crypto context
128 *
129 * The crypto context tracks the input data during the lifetime of an AF_ALG
130 * socket.
131 *
132 * @tsgl_list: Link to TX SGL
133 * @iv: IV for cipher operation
134 * @aead_assoclen: Length of AAD for AEAD cipher operations
135 * @completion: Work queue for synchronous operation
136 * @used: TX bytes sent to kernel. This variable is used to
137 * ensure that user space cannot cause the kernel
138 * to allocate too much memory in sendmsg operation.
139 * @rcvused: Total RX bytes to be filled by kernel. This variable
140 * is used to ensure user space cannot cause the kernel
141 * to allocate too much memory in a recvmsg operation.
142 * @more: More data to be expected from user space?
143 * @merge: Shall new data from user space be merged into existing
144 * SG?
145 * @enc: Cryptographic operation to be performed when
146 * recvmsg is invoked.
147 * @len: Length of memory allocated for this data structure.
148 */
149struct af_alg_ctx {
150 struct list_head tsgl_list;
151
152 void *iv;
153 size_t aead_assoclen;
154
155 struct af_alg_completion completion;
156
157 size_t used;
158 size_t rcvused;
159
160 bool more;
161 bool merge;
162 bool enc;
163
164 unsigned int len;
165};
166
71int af_alg_register_type(const struct af_alg_type *type); 167int af_alg_register_type(const struct af_alg_type *type);
72int af_alg_unregister_type(const struct af_alg_type *type); 168int af_alg_unregister_type(const struct af_alg_type *type);
73 169
@@ -94,4 +190,78 @@ static inline void af_alg_init_completion(struct af_alg_completion *completion)
94 init_completion(&completion->completion); 190 init_completion(&completion->completion);
95} 191}
96 192
193/**
194 * Size of available buffer for sending data from user space to kernel.
195 *
196 * @sk socket of connection to user space
197 * @return number of bytes still available
198 */
199static inline int af_alg_sndbuf(struct sock *sk)
200{
201 struct alg_sock *ask = alg_sk(sk);
202 struct af_alg_ctx *ctx = ask->private;
203
204 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
205 ctx->used, 0);
206}
207
208/**
209 * Can the send buffer still be written to?
210 *
211 * @sk socket of connection to user space
212 * @return true => writable, false => not writable
213 */
214static inline bool af_alg_writable(struct sock *sk)
215{
216 return PAGE_SIZE <= af_alg_sndbuf(sk);
217}
218
219/**
220 * Size of available buffer used by kernel for the RX user space operation.
221 *
222 * @sk socket of connection to user space
223 * @return number of bytes still available
224 */
225static inline int af_alg_rcvbuf(struct sock *sk)
226{
227 struct alg_sock *ask = alg_sk(sk);
228 struct af_alg_ctx *ctx = ask->private;
229
230 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
231 ctx->rcvused, 0);
232}
233
234/**
235 * Can the RX buffer still be written to?
236 *
237 * @sk socket of connection to user space
238 * @return true => writable, false => not writable
239 */
240static inline bool af_alg_readable(struct sock *sk)
241{
242 return PAGE_SIZE <= af_alg_rcvbuf(sk);
243}
244
245int af_alg_alloc_tsgl(struct sock *sk);
246unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
247void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
248 size_t dst_offset);
249void af_alg_free_areq_sgls(struct af_alg_async_req *areq);
250int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags);
251void af_alg_wmem_wakeup(struct sock *sk);
252int af_alg_wait_for_data(struct sock *sk, unsigned flags);
253void af_alg_data_wakeup(struct sock *sk);
254int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
255 unsigned int ivsize);
256ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
257 int offset, size_t size, int flags);
258void af_alg_async_cb(struct crypto_async_request *_req, int err);
259unsigned int af_alg_poll(struct file *file, struct socket *sock,
260 poll_table *wait);
261struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
262 unsigned int areqlen);
263int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
264 struct af_alg_async_req *areq, size_t maxsize,
265 size_t *outlen);
266
97#endif /* _CRYPTO_IF_ALG_H */ 267#endif /* _CRYPTO_IF_ALG_H */