summaryrefslogtreecommitdiffstats
path: root/crypto/algif_aead.c
diff options
context:
space:
mode:
authorStephan Mueller <smueller@chronox.de>2017-06-25 11:12:59 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2017-07-28 05:55:47 -0400
commitd887c52d6ae43aeebd249b5f2f1333e60236aa60 (patch)
tree900c6c96a8e7fba573a3d9d6a74a9e35096364d7 /crypto/algif_aead.c
parente870456d8e7c8d57c059ea479b5aadbb55ff4c3a (diff)
crypto: algif_aead - overhaul memory management
The updated memory management is described in the top part of the code. As one benefit of the changed memory management, the AIO and synchronous operation is now implemented in one common function. The AF_ALG operation uses the async kernel crypto API interface for each cipher operation. Thus, the only difference between the AIO and sync operation types visible from user space is: 1. the callback function to be invoked when the asynchronous operation is completed 2. whether to wait for the completion of the kernel crypto API operation or not The change includes the overhaul of the TX and RX SGL handling. The TX SGL holding the data sent from user space to the kernel is now dynamic similar to algif_skcipher. This dynamic nature allows a continuous operation of a thread sending data and a second thread receiving the data. These threads do not need to synchronize as the kernel processes as much data from the TX SGL to fill the RX SGL. The caller reading the data from the kernel defines the amount of data to be processed. Considering that the interface covers AEAD authenticating ciphers, the reader must provide the buffer in the correct size. Thus the reader defines the encryption size. Signed-off-by: Stephan Mueller <smueller@chronox.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/algif_aead.c')
-rw-r--r--crypto/algif_aead.c766
1 files changed, 442 insertions, 324 deletions
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index be117495eb43..9755aac0fe26 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -5,12 +5,26 @@
5 * 5 *
6 * This file provides the user-space API for AEAD ciphers. 6 * This file provides the user-space API for AEAD ciphers.
7 * 7 *
8 * This file is derived from algif_skcipher.c.
9 *
10 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free 9 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option) 10 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version. 11 * any later version.
12 *
13 * The following concept of the memory management is used:
14 *
15 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
16 * filled by user space with the data submitted via sendpage/sendmsg. Filling
17 * up the TX SGL does not cause a crypto operation -- the data will only be
18 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
19 * provide a buffer which is tracked with the RX SGL.
20 *
21 * During the processing of the recvmsg operation, the cipher request is
22 * allocated and prepared. As part of the recvmsg operation, the processed
23 * TX buffers are extracted from the TX SGL into a separate SGL.
24 *
25 * After the completion of the crypto operation, the RX SGL and the cipher
26 * request is released. The extracted TX SGL parts are released together with
27 * the RX SGL release.
14 */ 28 */
15 29
16#include <crypto/internal/aead.h> 30#include <crypto/internal/aead.h>
@@ -25,24 +39,32 @@
25#include <linux/net.h> 39#include <linux/net.h>
26#include <net/sock.h> 40#include <net/sock.h>
27 41
28struct aead_sg_list { 42struct aead_tsgl {
29 unsigned int cur; 43 struct list_head list;
30 struct scatterlist sg[ALG_MAX_PAGES]; 44 unsigned int cur; /* Last processed SG entry */
45 struct scatterlist sg[0]; /* Array of SGs forming the SGL */
31}; 46};
32 47
33struct aead_async_rsgl { 48struct aead_rsgl {
34 struct af_alg_sgl sgl; 49 struct af_alg_sgl sgl;
35 struct list_head list; 50 struct list_head list;
51 size_t sg_num_bytes; /* Bytes of data in that SGL */
36}; 52};
37 53
38struct aead_async_req { 54struct aead_async_req {
39 struct scatterlist *tsgl;
40 struct aead_async_rsgl first_rsgl;
41 struct list_head list;
42 struct kiocb *iocb; 55 struct kiocb *iocb;
43 struct sock *sk; 56 struct sock *sk;
44 unsigned int tsgls; 57
45 char iv[]; 58 struct aead_rsgl first_rsgl; /* First RX SG */
59 struct list_head rsgl_list; /* Track RX SGs */
60
61 struct scatterlist *tsgl; /* priv. TX SGL of buffers to process */
62 unsigned int tsgl_entries; /* number of entries in priv. TX SGL */
63
64 unsigned int outlen; /* Filled output buf length */
65
66 unsigned int areqlen; /* Length of this data struct */
67 struct aead_request aead_req; /* req ctx trails this struct */
46}; 68};
47 69
48struct aead_tfm { 70struct aead_tfm {
@@ -51,25 +73,26 @@ struct aead_tfm {
51}; 73};
52 74
53struct aead_ctx { 75struct aead_ctx {
54 struct aead_sg_list tsgl; 76 struct list_head tsgl_list; /* Link to TX SGL */
55 struct aead_async_rsgl first_rsgl;
56 struct list_head list;
57 77
58 void *iv; 78 void *iv;
79 size_t aead_assoclen;
59 80
60 struct af_alg_completion completion; 81 struct af_alg_completion completion; /* sync work queue */
61 82
62 unsigned long used; 83 size_t used; /* TX bytes sent to kernel */
84 size_t rcvused; /* total RX bytes to be processed by kernel */
63 85
64 unsigned int len; 86 bool more; /* More data to be expected? */
65 bool more; 87 bool merge; /* Merge new data into existing SG */
66 bool merge; 88 bool enc; /* Crypto operation: enc, dec */
67 bool enc;
68 89
69 size_t aead_assoclen; 90 unsigned int len; /* Length of allocated memory for this struct */
70 struct aead_request aead_req;
71}; 91};
72 92
93#define MAX_SGL_ENTS ((4096 - sizeof(struct aead_tsgl)) / \
94 sizeof(struct scatterlist) - 1)
95
73static inline int aead_sndbuf(struct sock *sk) 96static inline int aead_sndbuf(struct sock *sk)
74{ 97{
75 struct alg_sock *ask = alg_sk(sk); 98 struct alg_sock *ask = alg_sk(sk);
@@ -84,9 +107,29 @@ static inline bool aead_writable(struct sock *sk)
84 return PAGE_SIZE <= aead_sndbuf(sk); 107 return PAGE_SIZE <= aead_sndbuf(sk);
85} 108}
86 109
87static inline bool aead_sufficient_data(struct aead_ctx *ctx) 110static inline int aead_rcvbuf(struct sock *sk)
88{ 111{
89 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 112 struct alg_sock *ask = alg_sk(sk);
113 struct aead_ctx *ctx = ask->private;
114
115 return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
116 ctx->rcvused, 0);
117}
118
119static inline bool aead_readable(struct sock *sk)
120{
121 return PAGE_SIZE <= aead_rcvbuf(sk);
122}
123
124static inline bool aead_sufficient_data(struct sock *sk)
125{
126 struct alg_sock *ask = alg_sk(sk);
127 struct sock *psk = ask->parent;
128 struct alg_sock *pask = alg_sk(psk);
129 struct aead_ctx *ctx = ask->private;
130 struct aead_tfm *aeadc = pask->private;
131 struct crypto_aead *tfm = aeadc->aead;
132 unsigned int as = crypto_aead_authsize(tfm);
90 133
91 /* 134 /*
92 * The minimum amount of memory needed for an AEAD cipher is 135 * The minimum amount of memory needed for an AEAD cipher is
@@ -95,33 +138,166 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
95 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); 138 return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
96} 139}
97 140
98static void aead_reset_ctx(struct aead_ctx *ctx) 141static int aead_alloc_tsgl(struct sock *sk)
99{ 142{
100 struct aead_sg_list *sgl = &ctx->tsgl; 143 struct alg_sock *ask = alg_sk(sk);
144 struct aead_ctx *ctx = ask->private;
145 struct aead_tsgl *sgl;
146 struct scatterlist *sg = NULL;
101 147
102 sg_init_table(sgl->sg, ALG_MAX_PAGES); 148 sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list);
103 sgl->cur = 0; 149 if (!list_empty(&ctx->tsgl_list))
104 ctx->used = 0; 150 sg = sgl->sg;
105 ctx->more = 0; 151
106 ctx->merge = 0; 152 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
153 sgl = sock_kmalloc(sk, sizeof(*sgl) +
154 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
155 GFP_KERNEL);
156 if (!sgl)
157 return -ENOMEM;
158
159 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
160 sgl->cur = 0;
161
162 if (sg)
163 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
164
165 list_add_tail(&sgl->list, &ctx->tsgl_list);
166 }
167
168 return 0;
169}
170
171static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes)
172{
173 struct alg_sock *ask = alg_sk(sk);
174 struct aead_ctx *ctx = ask->private;
175 struct aead_tsgl *sgl, *tmp;
176 unsigned int i;
177 unsigned int sgl_count = 0;
178
179 if (!bytes)
180 return 0;
181
182 list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
183 struct scatterlist *sg = sgl->sg;
184
185 for (i = 0; i < sgl->cur; i++) {
186 sgl_count++;
187 if (sg[i].length >= bytes)
188 return sgl_count;
189
190 bytes -= sg[i].length;
191 }
192 }
193
194 return sgl_count;
107} 195}
108 196
109static void aead_put_sgl(struct sock *sk) 197static void aead_pull_tsgl(struct sock *sk, size_t used,
198 struct scatterlist *dst)
110{ 199{
111 struct alg_sock *ask = alg_sk(sk); 200 struct alg_sock *ask = alg_sk(sk);
112 struct aead_ctx *ctx = ask->private; 201 struct aead_ctx *ctx = ask->private;
113 struct aead_sg_list *sgl = &ctx->tsgl; 202 struct aead_tsgl *sgl;
114 struct scatterlist *sg = sgl->sg; 203 struct scatterlist *sg;
115 unsigned int i; 204 unsigned int i;
116 205
117 for (i = 0; i < sgl->cur; i++) { 206 while (!list_empty(&ctx->tsgl_list)) {
118 if (!sg_page(sg + i)) 207 sgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl,
208 list);
209 sg = sgl->sg;
210
211 for (i = 0; i < sgl->cur; i++) {
212 size_t plen = min_t(size_t, used, sg[i].length);
213 struct page *page = sg_page(sg + i);
214
215 if (!page)
216 continue;
217
218 /*
219 * Assumption: caller created aead_count_tsgl(len)
220 * SG entries in dst.
221 */
222 if (dst)
223 sg_set_page(dst + i, page, plen, sg[i].offset);
224
225 sg[i].length -= plen;
226 sg[i].offset += plen;
227
228 used -= plen;
229 ctx->used -= plen;
230
231 if (sg[i].length)
232 return;
233
234 if (!dst)
235 put_page(page);
236 sg_assign_page(sg + i, NULL);
237 }
238
239 list_del(&sgl->list);
240 sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
241 (MAX_SGL_ENTS + 1));
242 }
243
244 if (!ctx->used)
245 ctx->merge = 0;
246}
247
248static void aead_free_areq_sgls(struct aead_async_req *areq)
249{
250 struct sock *sk = areq->sk;
251 struct alg_sock *ask = alg_sk(sk);
252 struct aead_ctx *ctx = ask->private;
253 struct aead_rsgl *rsgl, *tmp;
254 struct scatterlist *tsgl;
255 struct scatterlist *sg;
256 unsigned int i;
257
258 list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
259 ctx->rcvused -= rsgl->sg_num_bytes;
260 af_alg_free_sg(&rsgl->sgl);
261 list_del(&rsgl->list);
262 if (rsgl != &areq->first_rsgl)
263 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
264 }
265
266 tsgl = areq->tsgl;
267 for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
268 if (!sg_page(sg))
119 continue; 269 continue;
270 put_page(sg_page(sg));
271 }
272
273 if (areq->tsgl && areq->tsgl_entries)
274 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
275}
276
277static int aead_wait_for_wmem(struct sock *sk, unsigned int flags)
278{
279 DEFINE_WAIT_FUNC(wait, woken_wake_function);
280 int err = -ERESTARTSYS;
281 long timeout;
282
283 if (flags & MSG_DONTWAIT)
284 return -EAGAIN;
120 285
121 put_page(sg_page(sg + i)); 286 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
122 sg_assign_page(sg + i, NULL); 287
288 add_wait_queue(sk_sleep(sk), &wait);
289 for (;;) {
290 if (signal_pending(current))
291 break;
292 timeout = MAX_SCHEDULE_TIMEOUT;
293 if (sk_wait_event(sk, &timeout, aead_writable(sk), &wait)) {
294 err = 0;
295 break;
296 }
123 } 297 }
124 aead_reset_ctx(ctx); 298 remove_wait_queue(sk_sleep(sk), &wait);
299
300 return err;
125} 301}
126 302
127static void aead_wmem_wakeup(struct sock *sk) 303static void aead_wmem_wakeup(struct sock *sk)
@@ -153,6 +329,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
153 return -EAGAIN; 329 return -EAGAIN;
154 330
155 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 331 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
332
156 add_wait_queue(sk_sleep(sk), &wait); 333 add_wait_queue(sk_sleep(sk), &wait);
157 for (;;) { 334 for (;;) {
158 if (signal_pending(current)) 335 if (signal_pending(current))
@@ -176,8 +353,6 @@ static void aead_data_wakeup(struct sock *sk)
176 struct aead_ctx *ctx = ask->private; 353 struct aead_ctx *ctx = ask->private;
177 struct socket_wq *wq; 354 struct socket_wq *wq;
178 355
179 if (ctx->more)
180 return;
181 if (!ctx->used) 356 if (!ctx->used)
182 return; 357 return;
183 358
@@ -195,15 +370,18 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
195{ 370{
196 struct sock *sk = sock->sk; 371 struct sock *sk = sock->sk;
197 struct alg_sock *ask = alg_sk(sk); 372 struct alg_sock *ask = alg_sk(sk);
373 struct sock *psk = ask->parent;
374 struct alg_sock *pask = alg_sk(psk);
198 struct aead_ctx *ctx = ask->private; 375 struct aead_ctx *ctx = ask->private;
199 unsigned ivsize = 376 struct aead_tfm *aeadc = pask->private;
200 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); 377 struct crypto_aead *tfm = aeadc->aead;
201 struct aead_sg_list *sgl = &ctx->tsgl; 378 unsigned int ivsize = crypto_aead_ivsize(tfm);
379 struct aead_tsgl *sgl;
202 struct af_alg_control con = {}; 380 struct af_alg_control con = {};
203 long copied = 0; 381 long copied = 0;
204 bool enc = 0; 382 bool enc = 0;
205 bool init = 0; 383 bool init = 0;
206 int err = -EINVAL; 384 int err = 0;
207 385
208 if (msg->msg_controllen) { 386 if (msg->msg_controllen) {
209 err = af_alg_cmsg_send(msg, &con); 387 err = af_alg_cmsg_send(msg, &con);
@@ -227,8 +405,10 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
227 } 405 }
228 406
229 lock_sock(sk); 407 lock_sock(sk);
230 if (!ctx->more && ctx->used) 408 if (!ctx->more && ctx->used) {
409 err = -EINVAL;
231 goto unlock; 410 goto unlock;
411 }
232 412
233 if (init) { 413 if (init) {
234 ctx->enc = enc; 414 ctx->enc = enc;
@@ -239,11 +419,14 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
239 } 419 }
240 420
241 while (size) { 421 while (size) {
422 struct scatterlist *sg;
242 size_t len = size; 423 size_t len = size;
243 struct scatterlist *sg = NULL; 424 size_t plen;
244 425
245 /* use the existing memory in an allocated page */ 426 /* use the existing memory in an allocated page */
246 if (ctx->merge) { 427 if (ctx->merge) {
428 sgl = list_entry(ctx->tsgl_list.prev,
429 struct aead_tsgl, list);
247 sg = sgl->sg + sgl->cur - 1; 430 sg = sgl->sg + sgl->cur - 1;
248 len = min_t(unsigned long, len, 431 len = min_t(unsigned long, len,
249 PAGE_SIZE - sg->offset - sg->length); 432 PAGE_SIZE - sg->offset - sg->length);
@@ -264,57 +447,60 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
264 } 447 }
265 448
266 if (!aead_writable(sk)) { 449 if (!aead_writable(sk)) {
267 /* user space sent too much data */ 450 err = aead_wait_for_wmem(sk, msg->msg_flags);
268 aead_put_sgl(sk); 451 if (err)
269 err = -EMSGSIZE; 452 goto unlock;
270 goto unlock;
271 } 453 }
272 454
273 /* allocate a new page */ 455 /* allocate a new page */
274 len = min_t(unsigned long, size, aead_sndbuf(sk)); 456 len = min_t(unsigned long, size, aead_sndbuf(sk));
275 while (len) {
276 size_t plen = 0;
277 457
278 if (sgl->cur >= ALG_MAX_PAGES) { 458 err = aead_alloc_tsgl(sk);
279 aead_put_sgl(sk); 459 if (err)
280 err = -E2BIG; 460 goto unlock;
281 goto unlock; 461
282 } 462 sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl,
463 list);
464 sg = sgl->sg;
465 if (sgl->cur)
466 sg_unmark_end(sg + sgl->cur - 1);
467
468 do {
469 unsigned int i = sgl->cur;
283 470
284 sg = sgl->sg + sgl->cur;
285 plen = min_t(size_t, len, PAGE_SIZE); 471 plen = min_t(size_t, len, PAGE_SIZE);
286 472
287 sg_assign_page(sg, alloc_page(GFP_KERNEL)); 473 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
288 err = -ENOMEM; 474 if (!sg_page(sg + i)) {
289 if (!sg_page(sg)) 475 err = -ENOMEM;
290 goto unlock; 476 goto unlock;
477 }
291 478
292 err = memcpy_from_msg(page_address(sg_page(sg)), 479 err = memcpy_from_msg(page_address(sg_page(sg + i)),
293 msg, plen); 480 msg, plen);
294 if (err) { 481 if (err) {
295 __free_page(sg_page(sg)); 482 __free_page(sg_page(sg + i));
296 sg_assign_page(sg, NULL); 483 sg_assign_page(sg + i, NULL);
297 goto unlock; 484 goto unlock;
298 } 485 }
299 486
300 sg->offset = 0; 487 sg[i].length = plen;
301 sg->length = plen;
302 len -= plen; 488 len -= plen;
303 ctx->used += plen; 489 ctx->used += plen;
304 copied += plen; 490 copied += plen;
305 sgl->cur++;
306 size -= plen; 491 size -= plen;
307 ctx->merge = plen & (PAGE_SIZE - 1); 492 sgl->cur++;
308 } 493 } while (len && sgl->cur < MAX_SGL_ENTS);
494
495 if (!size)
496 sg_mark_end(sg + sgl->cur - 1);
497
498 ctx->merge = plen & (PAGE_SIZE - 1);
309 } 499 }
310 500
311 err = 0; 501 err = 0;
312 502
313 ctx->more = msg->msg_flags & MSG_MORE; 503 ctx->more = msg->msg_flags & MSG_MORE;
314 if (!ctx->more && !aead_sufficient_data(ctx)) {
315 aead_put_sgl(sk);
316 err = -EMSGSIZE;
317 }
318 504
319unlock: 505unlock:
320 aead_data_wakeup(sk); 506 aead_data_wakeup(sk);
@@ -329,15 +515,12 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page,
329 struct sock *sk = sock->sk; 515 struct sock *sk = sock->sk;
330 struct alg_sock *ask = alg_sk(sk); 516 struct alg_sock *ask = alg_sk(sk);
331 struct aead_ctx *ctx = ask->private; 517 struct aead_ctx *ctx = ask->private;
332 struct aead_sg_list *sgl = &ctx->tsgl; 518 struct aead_tsgl *sgl;
333 int err = -EINVAL; 519 int err = -EINVAL;
334 520
335 if (flags & MSG_SENDPAGE_NOTLAST) 521 if (flags & MSG_SENDPAGE_NOTLAST)
336 flags |= MSG_MORE; 522 flags |= MSG_MORE;
337 523
338 if (sgl->cur >= ALG_MAX_PAGES)
339 return -E2BIG;
340
341 lock_sock(sk); 524 lock_sock(sk);
342 if (!ctx->more && ctx->used) 525 if (!ctx->more && ctx->used)
343 goto unlock; 526 goto unlock;
@@ -346,13 +529,22 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page,
346 goto done; 529 goto done;
347 530
348 if (!aead_writable(sk)) { 531 if (!aead_writable(sk)) {
349 /* user space sent too much data */ 532 err = aead_wait_for_wmem(sk, flags);
350 aead_put_sgl(sk); 533 if (err)
351 err = -EMSGSIZE; 534 goto unlock;
352 goto unlock;
353 } 535 }
354 536
537 err = aead_alloc_tsgl(sk);
538 if (err)
539 goto unlock;
540
355 ctx->merge = 0; 541 ctx->merge = 0;
542 sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list);
543
544 if (sgl->cur)
545 sg_unmark_end(sgl->sg + sgl->cur - 1);
546
547 sg_mark_end(sgl->sg + sgl->cur);
356 548
357 get_page(page); 549 get_page(page);
358 sg_set_page(sgl->sg + sgl->cur, page, size, offset); 550 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
@@ -363,11 +555,6 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page,
363 555
364done: 556done:
365 ctx->more = flags & MSG_MORE; 557 ctx->more = flags & MSG_MORE;
366 if (!ctx->more && !aead_sufficient_data(ctx)) {
367 aead_put_sgl(sk);
368 err = -EMSGSIZE;
369 }
370
371unlock: 558unlock:
372 aead_data_wakeup(sk); 559 aead_data_wakeup(sk);
373 release_sock(sk); 560 release_sock(sk);
@@ -375,204 +562,52 @@ unlock:
375 return err ?: size; 562 return err ?: size;
376} 563}
377 564
378#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
379 ((char *)req + sizeof(struct aead_request) + \
380 crypto_aead_reqsize(tfm))
381
382 #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
383 crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
384 sizeof(struct aead_request)
385
386static void aead_async_cb(struct crypto_async_request *_req, int err) 565static void aead_async_cb(struct crypto_async_request *_req, int err)
387{ 566{
388 struct aead_request *req = _req->data; 567 struct aead_async_req *areq = _req->data;
389 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
390 struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
391 struct sock *sk = areq->sk; 568 struct sock *sk = areq->sk;
392 struct scatterlist *sg = areq->tsgl;
393 struct aead_async_rsgl *rsgl;
394 struct kiocb *iocb = areq->iocb; 569 struct kiocb *iocb = areq->iocb;
395 unsigned int i, reqlen = GET_REQ_SIZE(tfm); 570 unsigned int resultlen;
396
397 list_for_each_entry(rsgl, &areq->list, list) {
398 af_alg_free_sg(&rsgl->sgl);
399 if (rsgl != &areq->first_rsgl)
400 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
401 }
402
403 for (i = 0; i < areq->tsgls; i++)
404 put_page(sg_page(sg + i));
405
406 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
407 sock_kfree_s(sk, req, reqlen);
408 __sock_put(sk);
409 iocb->ki_complete(iocb, err, err);
410}
411
412static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
413 int flags)
414{
415 struct sock *sk = sock->sk;
416 struct alg_sock *ask = alg_sk(sk);
417 struct aead_ctx *ctx = ask->private;
418 struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
419 struct aead_async_req *areq;
420 struct aead_request *req = NULL;
421 struct aead_sg_list *sgl = &ctx->tsgl;
422 struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
423 unsigned int as = crypto_aead_authsize(tfm);
424 unsigned int i, reqlen = GET_REQ_SIZE(tfm);
425 int err = -ENOMEM;
426 unsigned long used;
427 size_t outlen = 0;
428 size_t usedpages = 0;
429 571
430 lock_sock(sk); 572 lock_sock(sk);
431 if (ctx->more) {
432 err = aead_wait_for_data(sk, flags);
433 if (err)
434 goto unlock;
435 }
436
437 if (!aead_sufficient_data(ctx))
438 goto unlock;
439
440 used = ctx->used;
441 if (ctx->enc)
442 outlen = used + as;
443 else
444 outlen = used - as;
445
446 req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
447 if (unlikely(!req))
448 goto unlock;
449
450 areq = GET_ASYM_REQ(req, tfm);
451 memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
452 INIT_LIST_HEAD(&areq->list);
453 areq->iocb = msg->msg_iocb;
454 areq->sk = sk;
455 memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
456 aead_request_set_tfm(req, tfm);
457 aead_request_set_ad(req, ctx->aead_assoclen);
458 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
459 aead_async_cb, req);
460 used -= ctx->aead_assoclen;
461
462 /* take over all tx sgls from ctx */
463 areq->tsgl = sock_kmalloc(sk,
464 sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
465 GFP_KERNEL);
466 if (unlikely(!areq->tsgl))
467 goto free;
468
469 sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
470 for (i = 0; i < sgl->cur; i++)
471 sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
472 sgl->sg[i].length, sgl->sg[i].offset);
473
474 areq->tsgls = sgl->cur;
475
476 /* create rx sgls */
477 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
478 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
479 (outlen - usedpages));
480
481 if (list_empty(&areq->list)) {
482 rsgl = &areq->first_rsgl;
483
484 } else {
485 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
486 if (unlikely(!rsgl)) {
487 err = -ENOMEM;
488 goto free;
489 }
490 }
491 rsgl->sgl.npages = 0;
492 list_add_tail(&rsgl->list, &areq->list);
493
494 /* make one iovec available as scatterlist */
495 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
496 if (err < 0)
497 goto free;
498
499 usedpages += err;
500
501 /* chain the new scatterlist with previous one */
502 if (last_rsgl)
503 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
504
505 last_rsgl = rsgl;
506 573
507 iov_iter_advance(&msg->msg_iter, err); 574 /* Buffer size written by crypto operation. */
508 } 575 resultlen = areq->outlen;
509 576
510 /* ensure output buffer is sufficiently large */ 577 aead_free_areq_sgls(areq);
511 if (usedpages < outlen) { 578 sock_kfree_s(sk, areq, areq->areqlen);
512 err = -EINVAL; 579 __sock_put(sk);
513 goto unlock;
514 }
515 580
516 aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used, 581 iocb->ki_complete(iocb, err ? err : resultlen, 0);
517 areq->iv);
518 err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
519 if (err) {
520 if (err == -EINPROGRESS) {
521 sock_hold(sk);
522 err = -EIOCBQUEUED;
523 aead_reset_ctx(ctx);
524 goto unlock;
525 } else if (err == -EBADMSG) {
526 aead_put_sgl(sk);
527 }
528 goto free;
529 }
530 aead_put_sgl(sk);
531 582
532free:
533 list_for_each_entry(rsgl, &areq->list, list) {
534 af_alg_free_sg(&rsgl->sgl);
535 if (rsgl != &areq->first_rsgl)
536 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
537 }
538 if (areq->tsgl)
539 sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
540 if (req)
541 sock_kfree_s(sk, req, reqlen);
542unlock:
543 aead_wmem_wakeup(sk);
544 release_sock(sk); 583 release_sock(sk);
545 return err ? err : outlen;
546} 584}
547 585
548static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags) 586static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
587 size_t ignored, int flags)
549{ 588{
550 struct sock *sk = sock->sk; 589 struct sock *sk = sock->sk;
551 struct alg_sock *ask = alg_sk(sk); 590 struct alg_sock *ask = alg_sk(sk);
591 struct sock *psk = ask->parent;
592 struct alg_sock *pask = alg_sk(psk);
552 struct aead_ctx *ctx = ask->private; 593 struct aead_ctx *ctx = ask->private;
553 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); 594 struct aead_tfm *aeadc = pask->private;
554 struct aead_sg_list *sgl = &ctx->tsgl; 595 struct crypto_aead *tfm = aeadc->aead;
555 struct aead_async_rsgl *last_rsgl = NULL; 596 unsigned int as = crypto_aead_authsize(tfm);
556 struct aead_async_rsgl *rsgl, *tmp; 597 unsigned int areqlen =
557 int err = -EINVAL; 598 sizeof(struct aead_async_req) + crypto_aead_reqsize(tfm);
558 unsigned long used = 0; 599 struct aead_async_req *areq;
559 size_t outlen = 0; 600 struct aead_rsgl *last_rsgl = NULL;
560 size_t usedpages = 0; 601 int err = 0;
561 602 size_t used = 0; /* [in] TX bufs to be en/decrypted */
562 lock_sock(sk); 603 size_t outlen = 0; /* [out] RX bufs produced by kernel */
604 size_t usedpages = 0; /* [in] RX bufs to be used from user */
605 size_t processed = 0; /* [in] TX bufs to be consumed */
563 606
564 /* 607 /*
565 * Please see documentation of aead_request_set_crypt for the 608 * Data length provided by caller via sendmsg/sendpage that has not
566 * description of the AEAD memory structure expected from the caller. 609 * yet been processed.
567 */ 610 */
568
569 if (ctx->more) {
570 err = aead_wait_for_data(sk, flags);
571 if (err)
572 goto unlock;
573 }
574
575 /* data length provided by caller via sendmsg/sendpage */
576 used = ctx->used; 611 used = ctx->used;
577 612
578 /* 613 /*
@@ -584,8 +619,8 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
584 * the error message in sendmsg/sendpage and still call recvmsg. This 619 * the error message in sendmsg/sendpage and still call recvmsg. This
585 * check here protects the kernel integrity. 620 * check here protects the kernel integrity.
586 */ 621 */
587 if (!aead_sufficient_data(ctx)) 622 if (!aead_sufficient_data(sk))
588 goto unlock; 623 return -EINVAL;
589 624
590 /* 625 /*
591 * Calculate the minimum output buffer size holding the result of the 626 * Calculate the minimum output buffer size holding the result of the
@@ -606,84 +641,170 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
606 */ 641 */
607 used -= ctx->aead_assoclen; 642 used -= ctx->aead_assoclen;
608 643
609 /* convert iovecs of output buffers into scatterlists */ 644 /* Allocate cipher request for current operation. */
610 while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) { 645 areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
611 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), 646 if (unlikely(!areq))
612 (outlen - usedpages)); 647 return -ENOMEM;
648 areq->areqlen = areqlen;
649 areq->sk = sk;
650 INIT_LIST_HEAD(&areq->rsgl_list);
651 areq->tsgl = NULL;
652 areq->tsgl_entries = 0;
653
654 /* convert iovecs of output buffers into RX SGL */
655 while (outlen > usedpages && msg_data_left(msg)) {
656 struct aead_rsgl *rsgl;
657 size_t seglen;
658
659 /* limit the amount of readable buffers */
660 if (!aead_readable(sk))
661 break;
613 662
614 if (list_empty(&ctx->list)) { 663 if (!ctx->used) {
615 rsgl = &ctx->first_rsgl; 664 err = aead_wait_for_data(sk, flags);
665 if (err)
666 goto free;
667 }
668
669 seglen = min_t(size_t, (outlen - usedpages),
670 msg_data_left(msg));
671
672 if (list_empty(&areq->rsgl_list)) {
673 rsgl = &areq->first_rsgl;
616 } else { 674 } else {
617 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 675 rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
618 if (unlikely(!rsgl)) { 676 if (unlikely(!rsgl)) {
619 err = -ENOMEM; 677 err = -ENOMEM;
620 goto unlock; 678 goto free;
621 } 679 }
622 } 680 }
681
623 rsgl->sgl.npages = 0; 682 rsgl->sgl.npages = 0;
624 list_add_tail(&rsgl->list, &ctx->list); 683 list_add_tail(&rsgl->list, &areq->rsgl_list);
625 684
626 /* make one iovec available as scatterlist */ 685 /* make one iovec available as scatterlist */
627 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 686 err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
628 if (err < 0) 687 if (err < 0)
629 goto unlock; 688 goto free;
630 usedpages += err; 689
631 /* chain the new scatterlist with previous one */ 690 /* chain the new scatterlist with previous one */
632 if (last_rsgl) 691 if (last_rsgl)
633 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 692 af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
634 693
635 last_rsgl = rsgl; 694 last_rsgl = rsgl;
636 695 usedpages += err;
696 ctx->rcvused += err;
697 rsgl->sg_num_bytes = err;
637 iov_iter_advance(&msg->msg_iter, err); 698 iov_iter_advance(&msg->msg_iter, err);
638 } 699 }
639 700
640 /* ensure output buffer is sufficiently large */ 701 /*
702 * Ensure output buffer is sufficiently large. If the caller provides
703 * less buffer space, only use the relative required input size. This
704 * allows AIO operation where the caller sent all data to be processed
705 * and the AIO operation performs the operation on the different chunks
706 * of the input data.
707 */
641 if (usedpages < outlen) { 708 if (usedpages < outlen) {
642 err = -EINVAL; 709 size_t less = outlen - usedpages;
643 goto unlock;
644 }
645 710
646 sg_mark_end(sgl->sg + sgl->cur - 1); 711 if (used < less) {
647 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg, 712 err = -EINVAL;
648 used, ctx->iv); 713 goto free;
649 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); 714 }
715 used -= less;
716 outlen -= less;
717 }
650 718
651 err = af_alg_wait_for_completion(ctx->enc ? 719 /*
652 crypto_aead_encrypt(&ctx->aead_req) : 720 * Create a per request TX SGL for this request which tracks the
653 crypto_aead_decrypt(&ctx->aead_req), 721 * SG entries from the global TX SGL.
722 */
723 processed = used + ctx->aead_assoclen;
724 areq->tsgl_entries = aead_count_tsgl(sk, processed);
725 if (!areq->tsgl_entries)
726 areq->tsgl_entries = 1;
727 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
728 GFP_KERNEL);
729 if (!areq->tsgl) {
730 err = -ENOMEM;
731 goto free;
732 }
733 sg_init_table(areq->tsgl, areq->tsgl_entries);
734 aead_pull_tsgl(sk, processed, areq->tsgl);
735
736 /* Initialize the crypto operation */
737 aead_request_set_crypt(&areq->aead_req, areq->tsgl,
738 areq->first_rsgl.sgl.sg, used, ctx->iv);
739 aead_request_set_ad(&areq->aead_req, ctx->aead_assoclen);
740 aead_request_set_tfm(&areq->aead_req, tfm);
741
742 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
743 /* AIO operation */
744 areq->iocb = msg->msg_iocb;
745 aead_request_set_callback(&areq->aead_req,
746 CRYPTO_TFM_REQ_MAY_BACKLOG,
747 aead_async_cb, areq);
748 err = ctx->enc ? crypto_aead_encrypt(&areq->aead_req) :
749 crypto_aead_decrypt(&areq->aead_req);
750 } else {
751 /* Synchronous operation */
752 aead_request_set_callback(&areq->aead_req,
753 CRYPTO_TFM_REQ_MAY_BACKLOG,
754 af_alg_complete, &ctx->completion);
755 err = af_alg_wait_for_completion(ctx->enc ?
756 crypto_aead_encrypt(&areq->aead_req) :
757 crypto_aead_decrypt(&areq->aead_req),
654 &ctx->completion); 758 &ctx->completion);
655
656 if (err) {
657 /* EBADMSG implies a valid cipher operation took place */
658 if (err == -EBADMSG)
659 aead_put_sgl(sk);
660
661 goto unlock;
662 } 759 }
663 760
664 aead_put_sgl(sk); 761 /* AIO operation in progress */
665 err = 0; 762 if (err == -EINPROGRESS) {
763 sock_hold(sk);
666 764
667unlock: 765 /* Remember output size that will be generated. */
668 list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { 766 areq->outlen = outlen;
669 af_alg_free_sg(&rsgl->sgl); 767
670 list_del(&rsgl->list); 768 return -EIOCBQUEUED;
671 if (rsgl != &ctx->first_rsgl)
672 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
673 } 769 }
674 INIT_LIST_HEAD(&ctx->list); 770
675 aead_wmem_wakeup(sk); 771free:
676 release_sock(sk); 772 aead_free_areq_sgls(areq);
773 if (areq)
774 sock_kfree_s(sk, areq, areqlen);
677 775
678 return err ? err : outlen; 776 return err ? err : outlen;
679} 777}
680 778
681static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, 779static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
682 int flags) 780 size_t ignored, int flags)
683{ 781{
684 return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? 782 struct sock *sk = sock->sk;
685 aead_recvmsg_async(sock, msg, flags) : 783 int ret = 0;
686 aead_recvmsg_sync(sock, msg, flags); 784
785 lock_sock(sk);
786 while (msg_data_left(msg)) {
787 int err = _aead_recvmsg(sock, msg, ignored, flags);
788
789 /*
790 * This error covers -EIOCBQUEUED which implies that we can
791 * only handle one AIO request. If the caller wants to have
792 * multiple AIO requests in parallel, he must make multiple
793 * separate AIO calls.
794 */
795 if (err <= 0) {
796 if (err == -EIOCBQUEUED || err == -EBADMSG)
797 ret = err;
798 goto out;
799 }
800
801 ret += err;
802 }
803
804out:
805 aead_wmem_wakeup(sk);
806 release_sock(sk);
807 return ret;
687} 808}
688 809
689static unsigned int aead_poll(struct file *file, struct socket *sock, 810static unsigned int aead_poll(struct file *file, struct socket *sock,
@@ -874,11 +995,13 @@ static void aead_sock_destruct(struct sock *sk)
874{ 995{
875 struct alg_sock *ask = alg_sk(sk); 996 struct alg_sock *ask = alg_sk(sk);
876 struct aead_ctx *ctx = ask->private; 997 struct aead_ctx *ctx = ask->private;
877 unsigned int ivlen = crypto_aead_ivsize( 998 struct sock *psk = ask->parent;
878 crypto_aead_reqtfm(&ctx->aead_req)); 999 struct alg_sock *pask = alg_sk(psk);
1000 struct aead_tfm *aeadc = pask->private;
1001 struct crypto_aead *tfm = aeadc->aead;
1002 unsigned int ivlen = crypto_aead_ivsize(tfm);
879 1003
880 WARN_ON(refcount_read(&sk->sk_refcnt) != 0); 1004 aead_pull_tsgl(sk, ctx->used, NULL);
881 aead_put_sgl(sk);
882 sock_kzfree_s(sk, ctx->iv, ivlen); 1005 sock_kzfree_s(sk, ctx->iv, ivlen);
883 sock_kfree_s(sk, ctx, ctx->len); 1006 sock_kfree_s(sk, ctx, ctx->len);
884 af_alg_release_parent(sk); 1007 af_alg_release_parent(sk);
@@ -890,7 +1013,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
890 struct alg_sock *ask = alg_sk(sk); 1013 struct alg_sock *ask = alg_sk(sk);
891 struct aead_tfm *tfm = private; 1014 struct aead_tfm *tfm = private;
892 struct crypto_aead *aead = tfm->aead; 1015 struct crypto_aead *aead = tfm->aead;
893 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead); 1016 unsigned int len = sizeof(*ctx);
894 unsigned int ivlen = crypto_aead_ivsize(aead); 1017 unsigned int ivlen = crypto_aead_ivsize(aead);
895 1018
896 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 1019 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
@@ -905,23 +1028,18 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
905 } 1028 }
906 memset(ctx->iv, 0, ivlen); 1029 memset(ctx->iv, 0, ivlen);
907 1030
1031 INIT_LIST_HEAD(&ctx->tsgl_list);
908 ctx->len = len; 1032 ctx->len = len;
909 ctx->used = 0; 1033 ctx->used = 0;
1034 ctx->rcvused = 0;
910 ctx->more = 0; 1035 ctx->more = 0;
911 ctx->merge = 0; 1036 ctx->merge = 0;
912 ctx->enc = 0; 1037 ctx->enc = 0;
913 ctx->tsgl.cur = 0;
914 ctx->aead_assoclen = 0; 1038 ctx->aead_assoclen = 0;
915 af_alg_init_completion(&ctx->completion); 1039 af_alg_init_completion(&ctx->completion);
916 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
917 INIT_LIST_HEAD(&ctx->list);
918 1040
919 ask->private = ctx; 1041 ask->private = ctx;
920 1042
921 aead_request_set_tfm(&ctx->aead_req, aead);
922 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
923 af_alg_complete, &ctx->completion);
924
925 sk->sk_destruct = aead_sock_destruct; 1043 sk->sk_destruct = aead_sock_destruct;
926 1044
927 return 0; 1045 return 0;