aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorStephan Mueller <smueller@chronox.de>2015-02-28 14:50:00 -0500
committerHerbert Xu <herbert@gondor.apana.org.au>2015-03-04 04:12:39 -0500
commit400c40cf78da00c16e561a3a253ca272455c42ef (patch)
treef60d7792027d5cadfe91f0ef334191977a8b2ddd /crypto
parent7b24d97f16f561cc90eab1658100598d54a414fd (diff)
crypto: algif - add AEAD support
This patch adds the AEAD support for AF_ALG. The implementation is based on algif_skcipher, but contains heavy modifications to streamline the interface for AEAD uses. To use AEAD, the user space consumer has to use the salg_type named "aead". The AEAD implementation includes some overhead to calculate the size of the ciphertext, because the AEAD implementation of the kernel crypto API makes implied assumption on the location of the authentication tag. When performing an encryption, the tag will be added to the created ciphertext (note, the tag is placed adjacent to the ciphertext). For decryption, the caller must hand in the ciphertext with the tag appended to the ciphertext. Therefore, the selection of the used memory needs to add/subtract the tag size from the source/destination buffers depending on the encryption type. The code is provided with comments explaining when and how that operation is performed. A fully working example using all aspects of AEAD is provided at http://www.chronox.de/libkcapi.html Signed-off-by: Stephan Mueller <smueller@chronox.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/algif_aead.c666
1 files changed, 666 insertions, 0 deletions
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
new file mode 100644
index 000000000000..527d27b023ab
--- /dev/null
+++ b/crypto/algif_aead.c
@@ -0,0 +1,666 @@
1/*
2 * algif_aead: User-space interface for AEAD algorithms
3 *
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5 *
6 * This file provides the user-space API for AEAD ciphers.
7 *
8 * This file is derived from algif_skcipher.c.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 */
15
16#include <crypto/scatterwalk.h>
17#include <crypto/if_alg.h>
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/net.h>
24#include <net/sock.h>
25
26struct aead_sg_list {
27 unsigned int cur;
28 struct scatterlist sg[ALG_MAX_PAGES];
29};
30
31struct aead_ctx {
32 struct aead_sg_list tsgl;
33 /*
34 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
35 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
36 * bytes
37 */
38#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
39 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
40
41 void *iv;
42
43 struct af_alg_completion completion;
44
45 unsigned long used;
46
47 unsigned int len;
48 bool more;
49 bool merge;
50 bool enc;
51
52 size_t aead_assoclen;
53 struct aead_request aead_req;
54};
55
56static inline int aead_sndbuf(struct sock *sk)
57{
58 struct alg_sock *ask = alg_sk(sk);
59 struct aead_ctx *ctx = ask->private;
60
61 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
62 ctx->used, 0);
63}
64
65static inline bool aead_writable(struct sock *sk)
66{
67 return PAGE_SIZE <= aead_sndbuf(sk);
68}
69
70static inline bool aead_sufficient_data(struct aead_ctx *ctx)
71{
72 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
73
74 return (ctx->used >= (ctx->aead_assoclen + (ctx->enc ? 0 : as)));
75}
76
77static void aead_put_sgl(struct sock *sk)
78{
79 struct alg_sock *ask = alg_sk(sk);
80 struct aead_ctx *ctx = ask->private;
81 struct aead_sg_list *sgl = &ctx->tsgl;
82 struct scatterlist *sg = sgl->sg;
83 unsigned int i;
84
85 for (i = 0; i < sgl->cur; i++) {
86 if (!sg_page(sg + i))
87 continue;
88
89 put_page(sg_page(sg + i));
90 sg_assign_page(sg + i, NULL);
91 }
92 sgl->cur = 0;
93 ctx->used = 0;
94 ctx->more = 0;
95 ctx->merge = 0;
96}
97
98static void aead_wmem_wakeup(struct sock *sk)
99{
100 struct socket_wq *wq;
101
102 if (!aead_writable(sk))
103 return;
104
105 rcu_read_lock();
106 wq = rcu_dereference(sk->sk_wq);
107 if (wq_has_sleeper(wq))
108 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
109 POLLRDNORM |
110 POLLRDBAND);
111 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
112 rcu_read_unlock();
113}
114
115static int aead_wait_for_data(struct sock *sk, unsigned flags)
116{
117 struct alg_sock *ask = alg_sk(sk);
118 struct aead_ctx *ctx = ask->private;
119 long timeout;
120 DEFINE_WAIT(wait);
121 int err = -ERESTARTSYS;
122
123 if (flags & MSG_DONTWAIT)
124 return -EAGAIN;
125
126 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
127
128 for (;;) {
129 if (signal_pending(current))
130 break;
131 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
132 timeout = MAX_SCHEDULE_TIMEOUT;
133 if (sk_wait_event(sk, &timeout, !ctx->more)) {
134 err = 0;
135 break;
136 }
137 }
138 finish_wait(sk_sleep(sk), &wait);
139
140 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
141
142 return err;
143}
144
145static void aead_data_wakeup(struct sock *sk)
146{
147 struct alg_sock *ask = alg_sk(sk);
148 struct aead_ctx *ctx = ask->private;
149 struct socket_wq *wq;
150
151 if (ctx->more)
152 return;
153 if (!ctx->used)
154 return;
155
156 rcu_read_lock();
157 wq = rcu_dereference(sk->sk_wq);
158 if (wq_has_sleeper(wq))
159 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
160 POLLRDNORM |
161 POLLRDBAND);
162 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
163 rcu_read_unlock();
164}
165
166static int aead_sendmsg(struct kiocb *unused, struct socket *sock,
167 struct msghdr *msg, size_t size)
168{
169 struct sock *sk = sock->sk;
170 struct alg_sock *ask = alg_sk(sk);
171 struct aead_ctx *ctx = ask->private;
172 unsigned ivsize =
173 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
174 struct aead_sg_list *sgl = &ctx->tsgl;
175 struct af_alg_control con = {};
176 long copied = 0;
177 bool enc = 0;
178 bool init = 0;
179 int err = -EINVAL;
180
181 if (msg->msg_controllen) {
182 err = af_alg_cmsg_send(msg, &con);
183 if (err)
184 return err;
185
186 init = 1;
187 switch (con.op) {
188 case ALG_OP_ENCRYPT:
189 enc = 1;
190 break;
191 case ALG_OP_DECRYPT:
192 enc = 0;
193 break;
194 default:
195 return -EINVAL;
196 }
197
198 if (con.iv && con.iv->ivlen != ivsize)
199 return -EINVAL;
200 }
201
202 lock_sock(sk);
203 if (!ctx->more && ctx->used)
204 goto unlock;
205
206 if (init) {
207 ctx->enc = enc;
208 if (con.iv)
209 memcpy(ctx->iv, con.iv->iv, ivsize);
210
211 ctx->aead_assoclen = con.aead_assoclen;
212 }
213
214 while (size) {
215 unsigned long len = size;
216 struct scatterlist *sg = NULL;
217
218 /* use the existing memory in an allocated page */
219 if (ctx->merge) {
220 sg = sgl->sg + sgl->cur - 1;
221 len = min_t(unsigned long, len,
222 PAGE_SIZE - sg->offset - sg->length);
223 err = memcpy_from_msg(page_address(sg_page(sg)) +
224 sg->offset + sg->length,
225 msg, len);
226 if (err)
227 goto unlock;
228
229 sg->length += len;
230 ctx->merge = (sg->offset + sg->length) &
231 (PAGE_SIZE - 1);
232
233 ctx->used += len;
234 copied += len;
235 size -= len;
236 continue;
237 }
238
239 if (!aead_writable(sk)) {
240 /* user space sent too much data */
241 aead_put_sgl(sk);
242 err = -EMSGSIZE;
243 goto unlock;
244 }
245
246 /* allocate a new page */
247 len = min_t(unsigned long, size, aead_sndbuf(sk));
248 while (len) {
249 int plen = 0;
250
251 if (sgl->cur >= ALG_MAX_PAGES) {
252 aead_put_sgl(sk);
253 err = -E2BIG;
254 goto unlock;
255 }
256
257 sg = sgl->sg + sgl->cur;
258 plen = min_t(int, len, PAGE_SIZE);
259
260 sg_assign_page(sg, alloc_page(GFP_KERNEL));
261 err = -ENOMEM;
262 if (!sg_page(sg))
263 goto unlock;
264
265 err = memcpy_from_msg(page_address(sg_page(sg)),
266 msg, plen);
267 if (err) {
268 __free_page(sg_page(sg));
269 sg_assign_page(sg, NULL);
270 goto unlock;
271 }
272
273 sg->offset = 0;
274 sg->length = plen;
275 len -= plen;
276 ctx->used += plen;
277 copied += plen;
278 sgl->cur++;
279 size -= plen;
280 ctx->merge = plen & (PAGE_SIZE - 1);
281 }
282 }
283
284 err = 0;
285
286 ctx->more = msg->msg_flags & MSG_MORE;
287 if (!ctx->more && !aead_sufficient_data(ctx)) {
288 aead_put_sgl(sk);
289 err = -EMSGSIZE;
290 }
291
292unlock:
293 aead_data_wakeup(sk);
294 release_sock(sk);
295
296 return err ?: copied;
297}
298
299static ssize_t aead_sendpage(struct socket *sock, struct page *page,
300 int offset, size_t size, int flags)
301{
302 struct sock *sk = sock->sk;
303 struct alg_sock *ask = alg_sk(sk);
304 struct aead_ctx *ctx = ask->private;
305 struct aead_sg_list *sgl = &ctx->tsgl;
306 int err = -EINVAL;
307
308 if (flags & MSG_SENDPAGE_NOTLAST)
309 flags |= MSG_MORE;
310
311 if (sgl->cur >= ALG_MAX_PAGES)
312 return -E2BIG;
313
314 lock_sock(sk);
315 if (!ctx->more && ctx->used)
316 goto unlock;
317
318 if (!size)
319 goto done;
320
321 if (!aead_writable(sk)) {
322 /* user space sent too much data */
323 aead_put_sgl(sk);
324 err = -EMSGSIZE;
325 goto unlock;
326 }
327
328 ctx->merge = 0;
329
330 get_page(page);
331 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
332 sgl->cur++;
333 ctx->used += size;
334
335 err = 0;
336
337done:
338 ctx->more = flags & MSG_MORE;
339 if (!ctx->more && !aead_sufficient_data(ctx)) {
340 aead_put_sgl(sk);
341 err = -EMSGSIZE;
342 }
343
344unlock:
345 aead_data_wakeup(sk);
346 release_sock(sk);
347
348 return err ?: size;
349}
350
351static int aead_recvmsg(struct kiocb *unused, struct socket *sock,
352 struct msghdr *msg, size_t ignored, int flags)
353{
354 struct sock *sk = sock->sk;
355 struct alg_sock *ask = alg_sk(sk);
356 struct aead_ctx *ctx = ask->private;
357 unsigned bs = crypto_aead_blocksize(crypto_aead_reqtfm(&ctx->aead_req));
358 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
359 struct aead_sg_list *sgl = &ctx->tsgl;
360 struct scatterlist *sg = NULL;
361 struct scatterlist assoc[ALG_MAX_PAGES];
362 size_t assoclen = 0;
363 unsigned int i = 0;
364 int err = -EINVAL;
365 unsigned long used = 0;
366 size_t outlen = 0;
367 size_t usedpages = 0;
368 unsigned int cnt = 0;
369
370 /* Limit number of IOV blocks to be accessed below */
371 if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
372 return -ENOMSG;
373
374 lock_sock(sk);
375
376 /*
377 * AEAD memory structure: For encryption, the tag is appended to the
378 * ciphertext which implies that the memory allocated for the ciphertext
379 * must be increased by the tag length. For decryption, the tag
380 * is expected to be concatenated to the ciphertext. The plaintext
381 * therefore has a memory size of the ciphertext minus the tag length.
382 *
383 * The memory structure for cipher operation has the following
384 * structure:
385 * AEAD encryption input: assoc data || plaintext
386 * AEAD encryption output: cipherntext || auth tag
387 * AEAD decryption input: assoc data || ciphertext || auth tag
388 * AEAD decryption output: plaintext
389 */
390
391 if (ctx->more) {
392 err = aead_wait_for_data(sk, flags);
393 if (err)
394 goto unlock;
395 }
396
397 used = ctx->used;
398
399 /*
400 * Make sure sufficient data is present -- note, the same check is
401 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
402 * shall provide an information to the data sender that something is
403 * wrong, but they are irrelevant to maintain the kernel integrity.
404 * We need this check here too in case user space decides to not honor
405 * the error message in sendmsg/sendpage and still call recvmsg. This
406 * check here protects the kernel integrity.
407 */
408 if (!aead_sufficient_data(ctx))
409 goto unlock;
410
411 /*
412 * The cipher operation input data is reduced by the associated data
413 * length as this data is processed separately later on.
414 */
415 used -= ctx->aead_assoclen;
416
417 if (ctx->enc) {
418 /* round up output buffer to multiple of block size */
419 outlen = ((used + bs - 1) / bs * bs);
420 /* add the size needed for the auth tag to be created */
421 outlen += as;
422 } else {
423 /* output data size is input without the authentication tag */
424 outlen = used - as;
425 /* round up output buffer to multiple of block size */
426 outlen = ((outlen + bs - 1) / bs * bs);
427 }
428
429 /* convert iovecs of output buffers into scatterlists */
430 while (iov_iter_count(&msg->msg_iter)) {
431 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
432 (outlen - usedpages));
433
434 /* make one iovec available as scatterlist */
435 err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter,
436 seglen);
437 if (err < 0)
438 goto unlock;
439 usedpages += err;
440 /* chain the new scatterlist with initial list */
441 if (cnt)
442 scatterwalk_crypto_chain(ctx->rsgl[0].sg,
443 ctx->rsgl[cnt].sg, 1,
444 sg_nents(ctx->rsgl[cnt-1].sg));
445 /* we do not need more iovecs as we have sufficient memory */
446 if (outlen <= usedpages)
447 break;
448 iov_iter_advance(&msg->msg_iter, err);
449 cnt++;
450 }
451
452 err = -EINVAL;
453 /* ensure output buffer is sufficiently large */
454 if (usedpages < outlen)
455 goto unlock;
456
457 sg_init_table(assoc, ALG_MAX_PAGES);
458 assoclen = ctx->aead_assoclen;
459 /*
460 * Split scatterlist into two: first part becomes AD, second part
461 * is plaintext / ciphertext. The first part is assigned to assoc
462 * scatterlist. When this loop finishes, sg points to the start of the
463 * plaintext / ciphertext.
464 */
465 for (i = 0; i < ctx->tsgl.cur; i++) {
466 sg = sgl->sg + i;
467 if (sg->length <= assoclen) {
468 /* AD is larger than one page */
469 sg_set_page(assoc + i, sg_page(sg),
470 sg->length, sg->offset);
471 assoclen -= sg->length;
472 if (i >= ctx->tsgl.cur)
473 goto unlock;
474 } else if (!assoclen) {
475 /* current page is to start of plaintext / ciphertext */
476 if (i)
477 /* AD terminates at page boundary */
478 sg_mark_end(assoc + i - 1);
479 else
480 /* AD size is zero */
481 sg_mark_end(assoc);
482 break;
483 } else {
484 /* AD does not terminate at page boundary */
485 sg_set_page(assoc + i, sg_page(sg),
486 assoclen, sg->offset);
487 sg_mark_end(assoc + i);
488 /* plaintext / ciphertext starts after AD */
489 sg->length -= assoclen;
490 sg->offset += assoclen;
491 break;
492 }
493 }
494
495 aead_request_set_assoc(&ctx->aead_req, assoc, ctx->aead_assoclen);
496 aead_request_set_crypt(&ctx->aead_req, sg, ctx->rsgl[0].sg, used,
497 ctx->iv);
498
499 err = af_alg_wait_for_completion(ctx->enc ?
500 crypto_aead_encrypt(&ctx->aead_req) :
501 crypto_aead_decrypt(&ctx->aead_req),
502 &ctx->completion);
503
504 if (err) {
505 /* EBADMSG implies a valid cipher operation took place */
506 if (err == -EBADMSG)
507 aead_put_sgl(sk);
508 goto unlock;
509 }
510
511 aead_put_sgl(sk);
512
513 err = 0;
514
515unlock:
516 for (i = 0; i < cnt; i++)
517 af_alg_free_sg(&ctx->rsgl[i]);
518
519 aead_wmem_wakeup(sk);
520 release_sock(sk);
521
522 return err ? err : outlen;
523}
524
525static unsigned int aead_poll(struct file *file, struct socket *sock,
526 poll_table *wait)
527{
528 struct sock *sk = sock->sk;
529 struct alg_sock *ask = alg_sk(sk);
530 struct aead_ctx *ctx = ask->private;
531 unsigned int mask;
532
533 sock_poll_wait(file, sk_sleep(sk), wait);
534 mask = 0;
535
536 if (!ctx->more)
537 mask |= POLLIN | POLLRDNORM;
538
539 if (aead_writable(sk))
540 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
541
542 return mask;
543}
544
545static struct proto_ops algif_aead_ops = {
546 .family = PF_ALG,
547
548 .connect = sock_no_connect,
549 .socketpair = sock_no_socketpair,
550 .getname = sock_no_getname,
551 .ioctl = sock_no_ioctl,
552 .listen = sock_no_listen,
553 .shutdown = sock_no_shutdown,
554 .getsockopt = sock_no_getsockopt,
555 .mmap = sock_no_mmap,
556 .bind = sock_no_bind,
557 .accept = sock_no_accept,
558 .setsockopt = sock_no_setsockopt,
559
560 .release = af_alg_release,
561 .sendmsg = aead_sendmsg,
562 .sendpage = aead_sendpage,
563 .recvmsg = aead_recvmsg,
564 .poll = aead_poll,
565};
566
567static void *aead_bind(const char *name, u32 type, u32 mask)
568{
569 return crypto_alloc_aead(name, type, mask);
570}
571
572static void aead_release(void *private)
573{
574 crypto_free_aead(private);
575}
576
577static int aead_setauthsize(void *private, unsigned int authsize)
578{
579 return crypto_aead_setauthsize(private, authsize);
580}
581
582static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
583{
584 return crypto_aead_setkey(private, key, keylen);
585}
586
587static void aead_sock_destruct(struct sock *sk)
588{
589 struct alg_sock *ask = alg_sk(sk);
590 struct aead_ctx *ctx = ask->private;
591 unsigned int ivlen = crypto_aead_ivsize(
592 crypto_aead_reqtfm(&ctx->aead_req));
593
594 aead_put_sgl(sk);
595 sock_kzfree_s(sk, ctx->iv, ivlen);
596 sock_kfree_s(sk, ctx, ctx->len);
597 af_alg_release_parent(sk);
598}
599
600static int aead_accept_parent(void *private, struct sock *sk)
601{
602 struct aead_ctx *ctx;
603 struct alg_sock *ask = alg_sk(sk);
604 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
605 unsigned int ivlen = crypto_aead_ivsize(private);
606
607 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
608 if (!ctx)
609 return -ENOMEM;
610 memset(ctx, 0, len);
611
612 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
613 if (!ctx->iv) {
614 sock_kfree_s(sk, ctx, len);
615 return -ENOMEM;
616 }
617 memset(ctx->iv, 0, ivlen);
618
619 ctx->len = len;
620 ctx->used = 0;
621 ctx->more = 0;
622 ctx->merge = 0;
623 ctx->enc = 0;
624 ctx->tsgl.cur = 0;
625 ctx->aead_assoclen = 0;
626 af_alg_init_completion(&ctx->completion);
627 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
628
629 ask->private = ctx;
630
631 aead_request_set_tfm(&ctx->aead_req, private);
632 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
633 af_alg_complete, &ctx->completion);
634
635 sk->sk_destruct = aead_sock_destruct;
636
637 return 0;
638}
639
640static const struct af_alg_type algif_type_aead = {
641 .bind = aead_bind,
642 .release = aead_release,
643 .setkey = aead_setkey,
644 .setauthsize = aead_setauthsize,
645 .accept = aead_accept_parent,
646 .ops = &algif_aead_ops,
647 .name = "aead",
648 .owner = THIS_MODULE
649};
650
651static int __init algif_aead_init(void)
652{
653 return af_alg_register_type(&algif_type_aead);
654}
655
656static void __exit algif_aead_exit(void)
657{
658 int err = af_alg_unregister_type(&algif_type_aead);
659 BUG_ON(err);
660}
661
662module_init(algif_aead_init);
663module_exit(algif_aead_exit);
664MODULE_LICENSE("GPL");
665MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
666MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");