summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 12:00:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 12:00:47 -0400
commit6c373ca89399c5a3f7ef210ad8f63dc3437da345 (patch)
tree74d1ec65087df1da1021b43ac51acc1ee8601809 /crypto
parentbb0fd7ab0986105765d11baa82e619c618a235aa (diff)
parent9f9151412dd7aae0e3f51a89ae4a1f8755fdb4d0 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Add BQL support to via-rhine, from Tino Reichardt. 2) Integrate SWITCHDEV layer support into the DSA layer, so DSA drivers can support hw switch offloading. From Floria Fainelli. 3) Allow 'ip address' commands to initiate multicast group join/leave, from Madhu Challa. 4) Many ipv4 FIB lookup optimizations from Alexander Duyck. 5) Support EBPF in cls_bpf classifier and act_bpf action, from Daniel Borkmann. 6) Remove the ugly compat support in ARP for ugly layers like ax25, rose, etc. And use this to clean up the neigh layer, then use it to implement MPLS support. All from Eric Biederman. 7) Support L3 forwarding offloading in switches, from Scott Feldman. 8) Collapse the LOCAL and MAIN ipv4 FIB tables when possible, to speed up route lookups even further. From Alexander Duyck. 9) Many improvements and bug fixes to the rhashtable implementation, from Herbert Xu and Thomas Graf. In particular, in the case where an rhashtable user bulk adds a large number of items into an empty table, we expand the table much more sanely. 10) Don't make the tcp_metrics hash table per-namespace, from Eric Biederman. 11) Extend EBPF to access SKB fields, from Alexei Starovoitov. 12) Split out new connection request sockets so that they can be established in the main hash table. Much less false sharing since hash lookups go direct to the request sockets instead of having to go first to the listener then to the request socks hashed underneath. From Eric Dumazet. 13) Add async I/O support for crytpo AF_ALG sockets, from Tadeusz Struk. 14) Support stable privacy address generation for RFC7217 in IPV6. From Hannes Frederic Sowa. 15) Hash network namespace into IP frag IDs, also from Hannes Frederic Sowa. 16) Convert PTP get/set methods to use 64-bit time, from Richard Cochran. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1816 commits) fm10k: Bump driver version to 0.15.2 fm10k: corrected VF multicast update fm10k: mbx_update_max_size does not drop all oversized messages fm10k: reset head instead of calling update_max_size fm10k: renamed mbx_tx_dropped to mbx_tx_oversized fm10k: update xcast mode before synchronizing multicast addresses fm10k: start service timer on probe fm10k: fix function header comment fm10k: comment next_vf_mbx flow fm10k: don't handle mailbox events in iov_event path and always process mailbox fm10k: use separate workqueue for fm10k driver fm10k: Set PF queues to unlimited bandwidth during virtualization fm10k: expose tx_timeout_count as an ethtool stat fm10k: only increment tx_timeout_count in Tx hang path fm10k: remove extraneous "Reset interface" message fm10k: separate PF only stats so that VF does not display them fm10k: use hw->mac.max_queues for stats fm10k: only show actual queues, not the maximum in hardware fm10k: allow creation of VLAN on default vid fm10k: fix unused warnings ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/af_alg.c18
-rw-r--r--crypto/algif_hash.c12
-rw-r--r--crypto/algif_rng.c4
-rw-r--r--crypto/algif_skcipher.c245
4 files changed, 255 insertions, 24 deletions
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 7f8b7edcadca..f22cc56fd1b3 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -358,8 +358,8 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
358 npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; 358 npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
359 if (WARN_ON(npages == 0)) 359 if (WARN_ON(npages == 0))
360 return -EINVAL; 360 return -EINVAL;
361 361 /* Add one extra for linking */
362 sg_init_table(sgl->sg, npages); 362 sg_init_table(sgl->sg, npages + 1);
363 363
364 for (i = 0, len = n; i < npages; i++) { 364 for (i = 0, len = n; i < npages; i++) {
365 int plen = min_t(int, len, PAGE_SIZE - off); 365 int plen = min_t(int, len, PAGE_SIZE - off);
@@ -369,18 +369,26 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
369 off = 0; 369 off = 0;
370 len -= plen; 370 len -= plen;
371 } 371 }
372 sg_mark_end(sgl->sg + npages - 1);
373 sgl->npages = npages;
374
372 return n; 375 return n;
373} 376}
374EXPORT_SYMBOL_GPL(af_alg_make_sg); 377EXPORT_SYMBOL_GPL(af_alg_make_sg);
375 378
379void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
380{
381 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
382 sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
383}
384EXPORT_SYMBOL_GPL(af_alg_link_sg);
385
376void af_alg_free_sg(struct af_alg_sgl *sgl) 386void af_alg_free_sg(struct af_alg_sgl *sgl)
377{ 387{
378 int i; 388 int i;
379 389
380 i = 0; 390 for (i = 0; i < sgl->npages; i++)
381 do {
382 put_page(sgl->pages[i]); 391 put_page(sgl->pages[i]);
383 } while (!sg_is_last(sgl->sg + (i++)));
384} 392}
385EXPORT_SYMBOL_GPL(af_alg_free_sg); 393EXPORT_SYMBOL_GPL(af_alg_free_sg);
386 394
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 01da360bdb55..1396ad0787fc 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -34,8 +34,8 @@ struct hash_ctx {
34 struct ahash_request req; 34 struct ahash_request req;
35}; 35};
36 36
37static int hash_sendmsg(struct kiocb *unused, struct socket *sock, 37static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
38 struct msghdr *msg, size_t ignored) 38 size_t ignored)
39{ 39{
40 int limit = ALG_MAX_PAGES * PAGE_SIZE; 40 int limit = ALG_MAX_PAGES * PAGE_SIZE;
41 struct sock *sk = sock->sk; 41 struct sock *sk = sock->sk;
@@ -56,8 +56,8 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
56 56
57 ctx->more = 0; 57 ctx->more = 0;
58 58
59 while (iov_iter_count(&msg->msg_iter)) { 59 while (msg_data_left(msg)) {
60 int len = iov_iter_count(&msg->msg_iter); 60 int len = msg_data_left(msg);
61 61
62 if (len > limit) 62 if (len > limit)
63 len = limit; 63 len = limit;
@@ -139,8 +139,8 @@ unlock:
139 return err ?: size; 139 return err ?: size;
140} 140}
141 141
142static int hash_recvmsg(struct kiocb *unused, struct socket *sock, 142static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
143 struct msghdr *msg, size_t len, int flags) 143 int flags)
144{ 144{
145 struct sock *sk = sock->sk; 145 struct sock *sk = sock->sk;
146 struct alg_sock *ask = alg_sk(sk); 146 struct alg_sock *ask = alg_sk(sk);
diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c
index 67f612cfed97..3acba0a7cd55 100644
--- a/crypto/algif_rng.c
+++ b/crypto/algif_rng.c
@@ -55,8 +55,8 @@ struct rng_ctx {
55 struct crypto_rng *drng; 55 struct crypto_rng *drng;
56}; 56};
57 57
58static int rng_recvmsg(struct kiocb *unused, struct socket *sock, 58static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
59 struct msghdr *msg, size_t len, int flags) 59 int flags)
60{ 60{
61 struct sock *sk = sock->sk; 61 struct sock *sk = sock->sk;
62 struct alg_sock *ask = alg_sk(sk); 62 struct alg_sock *ask = alg_sk(sk);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 0c8a1e5ccadf..945075292bc9 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -39,6 +39,7 @@ struct skcipher_ctx {
39 39
40 struct af_alg_completion completion; 40 struct af_alg_completion completion;
41 41
42 atomic_t inflight;
42 unsigned used; 43 unsigned used;
43 44
44 unsigned int len; 45 unsigned int len;
@@ -49,9 +50,65 @@ struct skcipher_ctx {
49 struct ablkcipher_request req; 50 struct ablkcipher_request req;
50}; 51};
51 52
53struct skcipher_async_rsgl {
54 struct af_alg_sgl sgl;
55 struct list_head list;
56};
57
58struct skcipher_async_req {
59 struct kiocb *iocb;
60 struct skcipher_async_rsgl first_sgl;
61 struct list_head list;
62 struct scatterlist *tsg;
63 char iv[];
64};
65
66#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
67 crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
68
69#define GET_REQ_SIZE(ctx) \
70 crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
71
72#define GET_IV_SIZE(ctx) \
73 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
74
52#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ 75#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
53 sizeof(struct scatterlist) - 1) 76 sizeof(struct scatterlist) - 1)
54 77
78static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
79{
80 struct skcipher_async_rsgl *rsgl, *tmp;
81 struct scatterlist *sgl;
82 struct scatterlist *sg;
83 int i, n;
84
85 list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
86 af_alg_free_sg(&rsgl->sgl);
87 if (rsgl != &sreq->first_sgl)
88 kfree(rsgl);
89 }
90 sgl = sreq->tsg;
91 n = sg_nents(sgl);
92 for_each_sg(sgl, sg, n, i)
93 put_page(sg_page(sg));
94
95 kfree(sreq->tsg);
96}
97
98static void skcipher_async_cb(struct crypto_async_request *req, int err)
99{
100 struct sock *sk = req->data;
101 struct alg_sock *ask = alg_sk(sk);
102 struct skcipher_ctx *ctx = ask->private;
103 struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
104 struct kiocb *iocb = sreq->iocb;
105
106 atomic_dec(&ctx->inflight);
107 skcipher_free_async_sgls(sreq);
108 kfree(req);
109 iocb->ki_complete(iocb, err, err);
110}
111
55static inline int skcipher_sndbuf(struct sock *sk) 112static inline int skcipher_sndbuf(struct sock *sk)
56{ 113{
57 struct alg_sock *ask = alg_sk(sk); 114 struct alg_sock *ask = alg_sk(sk);
@@ -96,7 +153,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
96 return 0; 153 return 0;
97} 154}
98 155
99static void skcipher_pull_sgl(struct sock *sk, int used) 156static void skcipher_pull_sgl(struct sock *sk, int used, int put)
100{ 157{
101 struct alg_sock *ask = alg_sk(sk); 158 struct alg_sock *ask = alg_sk(sk);
102 struct skcipher_ctx *ctx = ask->private; 159 struct skcipher_ctx *ctx = ask->private;
@@ -123,8 +180,8 @@ static void skcipher_pull_sgl(struct sock *sk, int used)
123 180
124 if (sg[i].length) 181 if (sg[i].length)
125 return; 182 return;
126 183 if (put)
127 put_page(sg_page(sg + i)); 184 put_page(sg_page(sg + i));
128 sg_assign_page(sg + i, NULL); 185 sg_assign_page(sg + i, NULL);
129 } 186 }
130 187
@@ -143,7 +200,7 @@ static void skcipher_free_sgl(struct sock *sk)
143 struct alg_sock *ask = alg_sk(sk); 200 struct alg_sock *ask = alg_sk(sk);
144 struct skcipher_ctx *ctx = ask->private; 201 struct skcipher_ctx *ctx = ask->private;
145 202
146 skcipher_pull_sgl(sk, ctx->used); 203 skcipher_pull_sgl(sk, ctx->used, 1);
147} 204}
148 205
149static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) 206static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
@@ -239,8 +296,8 @@ static void skcipher_data_wakeup(struct sock *sk)
239 rcu_read_unlock(); 296 rcu_read_unlock();
240} 297}
241 298
242static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock, 299static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
243 struct msghdr *msg, size_t size) 300 size_t size)
244{ 301{
245 struct sock *sk = sock->sk; 302 struct sock *sk = sock->sk;
246 struct alg_sock *ask = alg_sk(sk); 303 struct alg_sock *ask = alg_sk(sk);
@@ -424,8 +481,153 @@ unlock:
424 return err ?: size; 481 return err ?: size;
425} 482}
426 483
427static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, 484static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
428 struct msghdr *msg, size_t ignored, int flags) 485{
486 struct skcipher_sg_list *sgl;
487 struct scatterlist *sg;
488 int nents = 0;
489
490 list_for_each_entry(sgl, &ctx->tsgl, list) {
491 sg = sgl->sg;
492
493 while (!sg->length)
494 sg++;
495
496 nents += sg_nents(sg);
497 }
498 return nents;
499}
500
501static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
502 int flags)
503{
504 struct sock *sk = sock->sk;
505 struct alg_sock *ask = alg_sk(sk);
506 struct skcipher_ctx *ctx = ask->private;
507 struct skcipher_sg_list *sgl;
508 struct scatterlist *sg;
509 struct skcipher_async_req *sreq;
510 struct ablkcipher_request *req;
511 struct skcipher_async_rsgl *last_rsgl = NULL;
512 unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
513 unsigned int reqlen = sizeof(struct skcipher_async_req) +
514 GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
515 int err = -ENOMEM;
516 bool mark = false;
517
518 lock_sock(sk);
519 req = kmalloc(reqlen, GFP_KERNEL);
520 if (unlikely(!req))
521 goto unlock;
522
523 sreq = GET_SREQ(req, ctx);
524 sreq->iocb = msg->msg_iocb;
525 memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
526 INIT_LIST_HEAD(&sreq->list);
527 sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
528 if (unlikely(!sreq->tsg)) {
529 kfree(req);
530 goto unlock;
531 }
532 sg_init_table(sreq->tsg, tx_nents);
533 memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
534 ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
535 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
536 skcipher_async_cb, sk);
537
538 while (iov_iter_count(&msg->msg_iter)) {
539 struct skcipher_async_rsgl *rsgl;
540 int used;
541
542 if (!ctx->used) {
543 err = skcipher_wait_for_data(sk, flags);
544 if (err)
545 goto free;
546 }
547 sgl = list_first_entry(&ctx->tsgl,
548 struct skcipher_sg_list, list);
549 sg = sgl->sg;
550
551 while (!sg->length)
552 sg++;
553
554 used = min_t(unsigned long, ctx->used,
555 iov_iter_count(&msg->msg_iter));
556 used = min_t(unsigned long, used, sg->length);
557
558 if (txbufs == tx_nents) {
559 struct scatterlist *tmp;
560 int x;
561 /* Ran out of tx slots in async request
562 * need to expand */
563 tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
564 GFP_KERNEL);
565 if (!tmp)
566 goto free;
567
568 sg_init_table(tmp, tx_nents * 2);
569 for (x = 0; x < tx_nents; x++)
570 sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
571 sreq->tsg[x].length,
572 sreq->tsg[x].offset);
573 kfree(sreq->tsg);
574 sreq->tsg = tmp;
575 tx_nents *= 2;
576 mark = true;
577 }
578 /* Need to take over the tx sgl from ctx
579 * to the asynch req - these sgls will be freed later */
580 sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
581 sg->offset);
582
583 if (list_empty(&sreq->list)) {
584 rsgl = &sreq->first_sgl;
585 list_add_tail(&rsgl->list, &sreq->list);
586 } else {
587 rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
588 if (!rsgl) {
589 err = -ENOMEM;
590 goto free;
591 }
592 list_add_tail(&rsgl->list, &sreq->list);
593 }
594
595 used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
596 err = used;
597 if (used < 0)
598 goto free;
599 if (last_rsgl)
600 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
601
602 last_rsgl = rsgl;
603 len += used;
604 skcipher_pull_sgl(sk, used, 0);
605 iov_iter_advance(&msg->msg_iter, used);
606 }
607
608 if (mark)
609 sg_mark_end(sreq->tsg + txbufs - 1);
610
611 ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
612 len, sreq->iv);
613 err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
614 crypto_ablkcipher_decrypt(req);
615 if (err == -EINPROGRESS) {
616 atomic_inc(&ctx->inflight);
617 err = -EIOCBQUEUED;
618 goto unlock;
619 }
620free:
621 skcipher_free_async_sgls(sreq);
622 kfree(req);
623unlock:
624 skcipher_wmem_wakeup(sk);
625 release_sock(sk);
626 return err;
627}
628
629static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
630 int flags)
429{ 631{
430 struct sock *sk = sock->sk; 632 struct sock *sk = sock->sk;
431 struct alg_sock *ask = alg_sk(sk); 633 struct alg_sock *ask = alg_sk(sk);
@@ -439,7 +641,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
439 long copied = 0; 641 long copied = 0;
440 642
441 lock_sock(sk); 643 lock_sock(sk);
442 while (iov_iter_count(&msg->msg_iter)) { 644 while (msg_data_left(msg)) {
443 sgl = list_first_entry(&ctx->tsgl, 645 sgl = list_first_entry(&ctx->tsgl,
444 struct skcipher_sg_list, list); 646 struct skcipher_sg_list, list);
445 sg = sgl->sg; 647 sg = sgl->sg;
@@ -453,7 +655,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
453 goto unlock; 655 goto unlock;
454 } 656 }
455 657
456 used = min_t(unsigned long, ctx->used, iov_iter_count(&msg->msg_iter)); 658 used = min_t(unsigned long, ctx->used, msg_data_left(msg));
457 659
458 used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); 660 used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
459 err = used; 661 err = used;
@@ -484,7 +686,7 @@ free:
484 goto unlock; 686 goto unlock;
485 687
486 copied += used; 688 copied += used;
487 skcipher_pull_sgl(sk, used); 689 skcipher_pull_sgl(sk, used, 1);
488 iov_iter_advance(&msg->msg_iter, used); 690 iov_iter_advance(&msg->msg_iter, used);
489 } 691 }
490 692
@@ -497,6 +699,13 @@ unlock:
497 return copied ?: err; 699 return copied ?: err;
498} 700}
499 701
702static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
703 size_t ignored, int flags)
704{
705 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
706 skcipher_recvmsg_async(sock, msg, flags) :
707 skcipher_recvmsg_sync(sock, msg, flags);
708}
500 709
501static unsigned int skcipher_poll(struct file *file, struct socket *sock, 710static unsigned int skcipher_poll(struct file *file, struct socket *sock,
502 poll_table *wait) 711 poll_table *wait)
@@ -555,12 +764,25 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
555 return crypto_ablkcipher_setkey(private, key, keylen); 764 return crypto_ablkcipher_setkey(private, key, keylen);
556} 765}
557 766
767static void skcipher_wait(struct sock *sk)
768{
769 struct alg_sock *ask = alg_sk(sk);
770 struct skcipher_ctx *ctx = ask->private;
771 int ctr = 0;
772
773 while (atomic_read(&ctx->inflight) && ctr++ < 100)
774 msleep(100);
775}
776
558static void skcipher_sock_destruct(struct sock *sk) 777static void skcipher_sock_destruct(struct sock *sk)
559{ 778{
560 struct alg_sock *ask = alg_sk(sk); 779 struct alg_sock *ask = alg_sk(sk);
561 struct skcipher_ctx *ctx = ask->private; 780 struct skcipher_ctx *ctx = ask->private;
562 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); 781 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
563 782
783 if (atomic_read(&ctx->inflight))
784 skcipher_wait(sk);
785
564 skcipher_free_sgl(sk); 786 skcipher_free_sgl(sk);
565 sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); 787 sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
566 sock_kfree_s(sk, ctx, ctx->len); 788 sock_kfree_s(sk, ctx, ctx->len);
@@ -592,6 +814,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
592 ctx->more = 0; 814 ctx->more = 0;
593 ctx->merge = 0; 815 ctx->merge = 0;
594 ctx->enc = 0; 816 ctx->enc = 0;
817 atomic_set(&ctx->inflight, 0);
595 af_alg_init_completion(&ctx->completion); 818 af_alg_init_completion(&ctx->completion);
596 819
597 ask->private = ctx; 820 ask->private = ctx;