aboutsummaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2010-10-19 09:31:55 -0400
committerHerbert Xu <herbert@gondor.apana.org.au>2010-11-26 07:53:59 -0500
commit8ff590903d5fc7f5a0a988c38267a3d08e6393a2 (patch)
tree2e73472f71848945136661c82f50176d6a3db388 /crypto
parentfe869cdb89c95d060c77eea20204d6c91f233b53 (diff)
crypto: algif_skcipher - User-space interface for skcipher operations
This patch adds the af_alg plugin for symmetric key ciphers, corresponding to the ablkcipher kernel operation type. Keys can optionally be set through the setsockopt interface. Once a sendmsg call occurs without MSG_MORE no further writes may be made to the socket until all previous data has been read. IVs and and whether encryption/decryption is performed can be set through the setsockopt interface or as a control message to sendmsg. The interface is completely synchronous, all operations are carried out in recvmsg(2) and will complete prior to the system call returning. The splice(2) interface support reading the user-space data directly without copying (except that the Crypto API itself may copy the data if alignment is off). The recvmsg(2) interface supports directly writing to user-space without additional copying, i.e., the kernel crypto interface will receive the user-space address as its output SG list. Thakns to Miloslav Trmac for reviewing this and contributing fixes and improvements. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/Kconfig8
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/algif_skcipher.c640
3 files changed, 649 insertions, 0 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 6db27d7ff8b3..69437e21217f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -852,6 +852,14 @@ config CRYPTO_USER_API_HASH
852 This option enables the user-spaces interface for hash 852 This option enables the user-spaces interface for hash
853 algorithms. 853 algorithms.
854 854
855config CRYPTO_USER_API_SKCIPHER
856 tristate "User-space interface for symmetric key cipher algorithms"
857 select CRYPTO_BLKCIPHER
858 select CRYPTO_USER_API
859 help
860 This option enables the user-spaces interface for symmetric
861 key cipher algorithms.
862
855source "drivers/crypto/Kconfig" 863source "drivers/crypto/Kconfig"
856 864
857endif # if CRYPTO 865endif # if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index 14ab4052a9c8..efc0f18dbb37 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
87obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o 87obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
88obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o 88obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
89obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o 89obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
90obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
90 91
91# 92#
92# generic algorithms and the async_tx api 93# generic algorithms and the async_tx api
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
new file mode 100644
index 000000000000..211c956952ca
--- /dev/null
+++ b/crypto/algif_skcipher.c
@@ -0,0 +1,640 @@
1/*
2 * algif_skcipher: User-space interface for skcipher algorithms
3 *
4 * This file provides the user-space API for symmetric key ciphers.
5 *
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15#include <crypto/scatterwalk.h>
16#include <crypto/skcipher.h>
17#include <crypto/if_alg.h>
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/net.h>
24#include <net/sock.h>
25
26struct skcipher_sg_list {
27 struct list_head list;
28
29 int cur;
30
31 struct scatterlist sg[0];
32};
33
34struct skcipher_ctx {
35 struct list_head tsgl;
36 struct af_alg_sgl rsgl;
37
38 void *iv;
39
40 struct af_alg_completion completion;
41
42 unsigned used;
43
44 unsigned int len;
45 bool more;
46 bool merge;
47 bool enc;
48
49 struct ablkcipher_request req;
50};
51
52#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
53 sizeof(struct scatterlist) - 1)
54
55static inline bool skcipher_writable(struct sock *sk)
56{
57 struct alg_sock *ask = alg_sk(sk);
58 struct skcipher_ctx *ctx = ask->private;
59
60 return ctx->used + PAGE_SIZE <= max_t(int, sk->sk_sndbuf, PAGE_SIZE);
61}
62
63static int skcipher_alloc_sgl(struct sock *sk)
64{
65 struct alg_sock *ask = alg_sk(sk);
66 struct skcipher_ctx *ctx = ask->private;
67 struct skcipher_sg_list *sgl;
68 struct scatterlist *sg = NULL;
69
70 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
71 if (!list_empty(&ctx->tsgl))
72 sg = sgl->sg;
73
74 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
75 sgl = sock_kmalloc(sk, sizeof(*sgl) +
76 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
77 GFP_KERNEL);
78 if (!sgl)
79 return -ENOMEM;
80
81 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
82 sgl->cur = 0;
83
84 if (sg)
85 scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
86
87 list_add_tail(&sgl->list, &ctx->tsgl);
88 }
89
90 return 0;
91}
92
93static void skcipher_pull_sgl(struct sock *sk, int used)
94{
95 struct alg_sock *ask = alg_sk(sk);
96 struct skcipher_ctx *ctx = ask->private;
97 struct skcipher_sg_list *sgl;
98 struct scatterlist *sg;
99 int i;
100
101 while (!list_empty(&ctx->tsgl)) {
102 sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
103 list);
104 sg = sgl->sg;
105
106 for (i = 0; i < sgl->cur; i++) {
107 int plen = min_t(int, used, sg[i].length);
108
109 if (!sg_page(sg + i))
110 continue;
111
112 sg[i].length -= plen;
113 sg[i].offset += plen;
114
115 used -= plen;
116 ctx->used -= plen;
117
118 if (sg[i].length)
119 return;
120
121 put_page(sg_page(sg + i));
122 sg_assign_page(sg + i, NULL);
123 }
124
125 list_del(&sgl->list);
126 sock_kfree_s(sk, sgl,
127 sizeof(*sgl) + sizeof(sgl->sg[0]) *
128 (MAX_SGL_ENTS + 1));
129 }
130
131 if (!ctx->used)
132 ctx->merge = 0;
133}
134
135static void skcipher_free_sgl(struct sock *sk)
136{
137 struct alg_sock *ask = alg_sk(sk);
138 struct skcipher_ctx *ctx = ask->private;
139
140 skcipher_pull_sgl(sk, ctx->used);
141}
142
143static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
144{
145 long timeout;
146 DEFINE_WAIT(wait);
147 int err = -ERESTARTSYS;
148
149 if (flags & MSG_DONTWAIT)
150 return -EAGAIN;
151
152 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
153
154 for (;;) {
155 if (signal_pending(current))
156 break;
157 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
158 timeout = MAX_SCHEDULE_TIMEOUT;
159 if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
160 err = 0;
161 break;
162 }
163 }
164 finish_wait(sk_sleep(sk), &wait);
165
166 return err;
167}
168
169static void skcipher_wmem_wakeup(struct sock *sk)
170{
171 struct socket_wq *wq;
172
173 if (!skcipher_writable(sk))
174 return;
175
176 rcu_read_lock();
177 wq = rcu_dereference(sk->sk_wq);
178 if (wq_has_sleeper(wq))
179 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
180 POLLRDNORM |
181 POLLRDBAND);
182 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
183 rcu_read_unlock();
184}
185
186static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
187{
188 struct alg_sock *ask = alg_sk(sk);
189 struct skcipher_ctx *ctx = ask->private;
190 long timeout;
191 DEFINE_WAIT(wait);
192 int err = -ERESTARTSYS;
193
194 if (flags & MSG_DONTWAIT) {
195 return -EAGAIN;
196 }
197
198 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
199
200 for (;;) {
201 if (signal_pending(current))
202 break;
203 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
204 timeout = MAX_SCHEDULE_TIMEOUT;
205 if (sk_wait_event(sk, &timeout, ctx->used)) {
206 err = 0;
207 break;
208 }
209 }
210 finish_wait(sk_sleep(sk), &wait);
211
212 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
213
214 return err;
215}
216
217static void skcipher_data_wakeup(struct sock *sk)
218{
219 struct alg_sock *ask = alg_sk(sk);
220 struct skcipher_ctx *ctx = ask->private;
221 struct socket_wq *wq;
222
223 if (!ctx->used)
224 return;
225
226 rcu_read_lock();
227 wq = rcu_dereference(sk->sk_wq);
228 if (wq_has_sleeper(wq))
229 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
230 POLLRDNORM |
231 POLLRDBAND);
232 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
233 rcu_read_unlock();
234}
235
236static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
237 struct msghdr *msg, size_t size)
238{
239 struct sock *sk = sock->sk;
240 struct alg_sock *ask = alg_sk(sk);
241 struct skcipher_ctx *ctx = ask->private;
242 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
243 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
244 struct skcipher_sg_list *sgl;
245 struct af_alg_control con = {};
246 long copied = 0;
247 bool enc = 0;
248 int limit;
249 int err;
250 int i;
251
252 if (msg->msg_controllen) {
253 err = af_alg_cmsg_send(msg, &con);
254 if (err)
255 return err;
256
257 switch (con.op) {
258 case ALG_OP_ENCRYPT:
259 enc = 1;
260 break;
261 case ALG_OP_DECRYPT:
262 enc = 0;
263 break;
264 default:
265 return -EINVAL;
266 }
267
268 if (con.iv && con.iv->ivlen != ivsize)
269 return -EINVAL;
270 }
271
272 err = -EINVAL;
273
274 lock_sock(sk);
275 if (!ctx->more && ctx->used)
276 goto unlock;
277
278 if (!ctx->used) {
279 ctx->enc = enc;
280 if (con.iv)
281 memcpy(ctx->iv, con.iv->iv, ivsize);
282 }
283
284 limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
285 limit -= ctx->used;
286
287 while (size) {
288 struct scatterlist *sg;
289 unsigned long len = size;
290 int plen;
291
292 if (ctx->merge) {
293 sgl = list_entry(ctx->tsgl.prev,
294 struct skcipher_sg_list, list);
295 sg = sgl->sg + sgl->cur - 1;
296 len = min_t(unsigned long, len,
297 PAGE_SIZE - sg->offset - sg->length);
298
299 err = memcpy_fromiovec(page_address(sg_page(sg)) +
300 sg->offset + sg->length,
301 msg->msg_iov, len);
302 if (err)
303 goto unlock;
304
305 sg->length += len;
306 ctx->merge = (sg->offset + sg->length) &
307 (PAGE_SIZE - 1);
308
309 ctx->used += len;
310 copied += len;
311 size -= len;
312 limit -= len;
313 continue;
314 }
315
316 if (limit < PAGE_SIZE) {
317 err = skcipher_wait_for_wmem(sk, msg->msg_flags);
318 if (err)
319 goto unlock;
320
321 limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
322 limit -= ctx->used;
323 }
324
325 len = min_t(unsigned long, len, limit);
326
327 err = skcipher_alloc_sgl(sk);
328 if (err)
329 goto unlock;
330
331 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
332 sg = sgl->sg;
333 do {
334 i = sgl->cur;
335 plen = min_t(int, len, PAGE_SIZE);
336
337 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
338 err = -ENOMEM;
339 if (!sg_page(sg + i))
340 goto unlock;
341
342 err = memcpy_fromiovec(page_address(sg_page(sg + i)),
343 msg->msg_iov, plen);
344 if (err) {
345 __free_page(sg_page(sg + i));
346 sg_assign_page(sg + i, NULL);
347 goto unlock;
348 }
349
350 sg[i].length = plen;
351 len -= plen;
352 ctx->used += plen;
353 copied += plen;
354 size -= plen;
355 limit -= plen;
356 sgl->cur++;
357 } while (len && sgl->cur < MAX_SGL_ENTS);
358
359 ctx->merge = plen & (PAGE_SIZE - 1);
360 }
361
362 err = 0;
363
364 ctx->more = msg->msg_flags & MSG_MORE;
365 if (!ctx->more && !list_empty(&ctx->tsgl))
366 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
367
368unlock:
369 skcipher_data_wakeup(sk);
370 release_sock(sk);
371
372 return copied ?: err;
373}
374
375static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
376 int offset, size_t size, int flags)
377{
378 struct sock *sk = sock->sk;
379 struct alg_sock *ask = alg_sk(sk);
380 struct skcipher_ctx *ctx = ask->private;
381 struct skcipher_sg_list *sgl;
382 int err = -EINVAL;
383 int limit;
384
385 lock_sock(sk);
386 if (!ctx->more && ctx->used)
387 goto unlock;
388
389 if (!size)
390 goto done;
391
392 limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
393 limit -= ctx->used;
394
395 if (limit < PAGE_SIZE) {
396 err = skcipher_wait_for_wmem(sk, flags);
397 if (err)
398 goto unlock;
399
400 limit = max_t(int, sk->sk_sndbuf, PAGE_SIZE);
401 limit -= ctx->used;
402 }
403
404 err = skcipher_alloc_sgl(sk);
405 if (err)
406 goto unlock;
407
408 ctx->merge = 0;
409 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
410
411 get_page(page);
412 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
413 sgl->cur++;
414 ctx->used += size;
415
416done:
417 ctx->more = flags & MSG_MORE;
418 if (!ctx->more && !list_empty(&ctx->tsgl))
419 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
420
421unlock:
422 skcipher_data_wakeup(sk);
423 release_sock(sk);
424
425 return err ?: size;
426}
427
428static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
429 struct msghdr *msg, size_t ignored, int flags)
430{
431 struct sock *sk = sock->sk;
432 struct alg_sock *ask = alg_sk(sk);
433 struct skcipher_ctx *ctx = ask->private;
434 unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
435 &ctx->req));
436 struct skcipher_sg_list *sgl;
437 struct scatterlist *sg;
438 unsigned long iovlen;
439 struct iovec *iov;
440 int err = -EAGAIN;
441 int used;
442 long copied = 0;
443
444 lock_sock(sk);
445 for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
446 iovlen--, iov++) {
447 unsigned long seglen = iov->iov_len;
448 char __user *from = iov->iov_base;
449
450 while (seglen) {
451 sgl = list_first_entry(&ctx->tsgl,
452 struct skcipher_sg_list, list);
453 sg = sgl->sg;
454
455 while (!sg->length)
456 sg++;
457
458 used = ctx->used;
459 if (!used) {
460 err = skcipher_wait_for_data(sk, flags);
461 if (err)
462 goto unlock;
463 }
464
465 used = min_t(unsigned long, used, seglen);
466
467 if (ctx->more || used < ctx->used)
468 used -= used % bs;
469
470 err = -EINVAL;
471 if (!used)
472 goto unlock;
473
474 used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
475 if (used < 0)
476 goto unlock;
477
478 ablkcipher_request_set_crypt(&ctx->req, sg,
479 ctx->rsgl.sg, used,
480 ctx->iv);
481
482 err = af_alg_wait_for_completion(
483 ctx->enc ?
484 crypto_ablkcipher_encrypt(&ctx->req) :
485 crypto_ablkcipher_decrypt(&ctx->req),
486 &ctx->completion);
487
488 af_alg_free_sg(&ctx->rsgl);
489
490 if (err)
491 goto unlock;
492
493 copied += used;
494 from += used;
495 seglen -= used;
496 skcipher_pull_sgl(sk, used);
497 }
498 }
499
500 err = 0;
501
502unlock:
503 skcipher_wmem_wakeup(sk);
504 release_sock(sk);
505
506 return copied ?: err;
507}
508
509
510static unsigned int skcipher_poll(struct file *file, struct socket *sock,
511 poll_table *wait)
512{
513 struct sock *sk = sock->sk;
514 struct alg_sock *ask = alg_sk(sk);
515 struct skcipher_ctx *ctx = ask->private;
516 unsigned int mask;
517
518 sock_poll_wait(file, sk_sleep(sk), wait);
519 mask = 0;
520
521 if (ctx->used)
522 mask |= POLLIN | POLLRDNORM;
523
524 if (skcipher_writable(sk))
525 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
526
527 return mask;
528}
529
530static struct proto_ops algif_skcipher_ops = {
531 .family = PF_ALG,
532
533 .connect = sock_no_connect,
534 .socketpair = sock_no_socketpair,
535 .getname = sock_no_getname,
536 .ioctl = sock_no_ioctl,
537 .listen = sock_no_listen,
538 .shutdown = sock_no_shutdown,
539 .getsockopt = sock_no_getsockopt,
540 .mmap = sock_no_mmap,
541 .bind = sock_no_bind,
542 .accept = sock_no_accept,
543 .setsockopt = sock_no_setsockopt,
544
545 .release = af_alg_release,
546 .sendmsg = skcipher_sendmsg,
547 .sendpage = skcipher_sendpage,
548 .recvmsg = skcipher_recvmsg,
549 .poll = skcipher_poll,
550};
551
552static void *skcipher_bind(const char *name, u32 type, u32 mask)
553{
554 return crypto_alloc_ablkcipher(name, type, mask);
555}
556
557static void skcipher_release(void *private)
558{
559 crypto_free_ablkcipher(private);
560}
561
562static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
563{
564 return crypto_ablkcipher_setkey(private, key, keylen);
565}
566
567static void skcipher_sock_destruct(struct sock *sk)
568{
569 struct alg_sock *ask = alg_sk(sk);
570 struct skcipher_ctx *ctx = ask->private;
571 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
572
573 skcipher_free_sgl(sk);
574 sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
575 sock_kfree_s(sk, ctx, ctx->len);
576 af_alg_release_parent(sk);
577}
578
579static int skcipher_accept_parent(void *private, struct sock *sk)
580{
581 struct skcipher_ctx *ctx;
582 struct alg_sock *ask = alg_sk(sk);
583 unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
584
585 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
586 if (!ctx)
587 return -ENOMEM;
588
589 ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
590 GFP_KERNEL);
591 if (!ctx->iv) {
592 sock_kfree_s(sk, ctx, len);
593 return -ENOMEM;
594 }
595
596 memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
597
598 INIT_LIST_HEAD(&ctx->tsgl);
599 ctx->len = len;
600 ctx->used = 0;
601 ctx->more = 0;
602 ctx->merge = 0;
603 ctx->enc = 0;
604 af_alg_init_completion(&ctx->completion);
605
606 ask->private = ctx;
607
608 ablkcipher_request_set_tfm(&ctx->req, private);
609 ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
610 af_alg_complete, &ctx->completion);
611
612 sk->sk_destruct = skcipher_sock_destruct;
613
614 return 0;
615}
616
617static const struct af_alg_type algif_type_skcipher = {
618 .bind = skcipher_bind,
619 .release = skcipher_release,
620 .setkey = skcipher_setkey,
621 .accept = skcipher_accept_parent,
622 .ops = &algif_skcipher_ops,
623 .name = "skcipher",
624 .owner = THIS_MODULE
625};
626
627static int __init algif_skcipher_init(void)
628{
629 return af_alg_register_type(&algif_type_skcipher);
630}
631
632static void __exit algif_skcipher_exit(void)
633{
634 int err = af_alg_unregister_type(&algif_type_skcipher);
635 BUG_ON(err);
636}
637
638module_init(algif_skcipher_init);
639module_exit(algif_skcipher_exit);
640MODULE_LICENSE("GPL");