aboutsummaryrefslogtreecommitdiffstats
path: root/net/xfrm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /net/xfrm
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'net/xfrm')
-rw-r--r--net/xfrm/Kconfig12
-rw-r--r--net/xfrm/Makefile7
-rw-r--r--net/xfrm/xfrm_algo.c729
-rw-r--r--net/xfrm/xfrm_input.c89
-rw-r--r--net/xfrm/xfrm_policy.c1367
-rw-r--r--net/xfrm/xfrm_state.c1037
-rw-r--r--net/xfrm/xfrm_user.c1253
7 files changed, 4494 insertions, 0 deletions
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
new file mode 100644
index 000000000000..58ca6a972c48
--- /dev/null
+++ b/net/xfrm/Kconfig
@@ -0,0 +1,12 @@
1#
2# XFRM configuration
3#
4config XFRM_USER
5 tristate "IPsec user configuration interface"
6 depends on INET && XFRM
7 ---help---
8 Support for IPsec user configuration interface used
9 by native Linux tools.
10
11 If unsure, say Y.
12
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
new file mode 100644
index 000000000000..693aac1aa833
--- /dev/null
+++ b/net/xfrm/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the XFRM subsystem.
3#
4
5obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_input.o xfrm_algo.o
6obj-$(CONFIG_XFRM_USER) += xfrm_user.o
7
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
new file mode 100644
index 000000000000..080aae243ce0
--- /dev/null
+++ b/net/xfrm/xfrm_algo.c
@@ -0,0 +1,729 @@
1/*
2 * xfrm algorithm interface
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/pfkeyv2.h>
16#include <linux/crypto.h>
17#include <net/xfrm.h>
18#if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
19#include <net/ah.h>
20#endif
21#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
22#include <net/esp.h>
23#endif
24#include <asm/scatterlist.h>
25
26/*
27 * Algorithms supported by IPsec. These entries contain properties which
28 * are used in key negotiation and xfrm processing, and are used to verify
29 * that instantiated crypto transforms have correct parameters for IPsec
30 * purposes.
31 */
32static struct xfrm_algo_desc aalg_list[] = {
33{
34 .name = "digest_null",
35
36 .uinfo = {
37 .auth = {
38 .icv_truncbits = 0,
39 .icv_fullbits = 0,
40 }
41 },
42
43 .desc = {
44 .sadb_alg_id = SADB_X_AALG_NULL,
45 .sadb_alg_ivlen = 0,
46 .sadb_alg_minbits = 0,
47 .sadb_alg_maxbits = 0
48 }
49},
50{
51 .name = "md5",
52
53 .uinfo = {
54 .auth = {
55 .icv_truncbits = 96,
56 .icv_fullbits = 128,
57 }
58 },
59
60 .desc = {
61 .sadb_alg_id = SADB_AALG_MD5HMAC,
62 .sadb_alg_ivlen = 0,
63 .sadb_alg_minbits = 128,
64 .sadb_alg_maxbits = 128
65 }
66},
67{
68 .name = "sha1",
69
70 .uinfo = {
71 .auth = {
72 .icv_truncbits = 96,
73 .icv_fullbits = 160,
74 }
75 },
76
77 .desc = {
78 .sadb_alg_id = SADB_AALG_SHA1HMAC,
79 .sadb_alg_ivlen = 0,
80 .sadb_alg_minbits = 160,
81 .sadb_alg_maxbits = 160
82 }
83},
84{
85 .name = "sha256",
86
87 .uinfo = {
88 .auth = {
89 .icv_truncbits = 96,
90 .icv_fullbits = 256,
91 }
92 },
93
94 .desc = {
95 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
96 .sadb_alg_ivlen = 0,
97 .sadb_alg_minbits = 256,
98 .sadb_alg_maxbits = 256
99 }
100},
101{
102 .name = "ripemd160",
103
104 .uinfo = {
105 .auth = {
106 .icv_truncbits = 96,
107 .icv_fullbits = 160,
108 }
109 },
110
111 .desc = {
112 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
113 .sadb_alg_ivlen = 0,
114 .sadb_alg_minbits = 160,
115 .sadb_alg_maxbits = 160
116 }
117},
118};
119
120static struct xfrm_algo_desc ealg_list[] = {
121{
122 .name = "cipher_null",
123
124 .uinfo = {
125 .encr = {
126 .blockbits = 8,
127 .defkeybits = 0,
128 }
129 },
130
131 .desc = {
132 .sadb_alg_id = SADB_EALG_NULL,
133 .sadb_alg_ivlen = 0,
134 .sadb_alg_minbits = 0,
135 .sadb_alg_maxbits = 0
136 }
137},
138{
139 .name = "des",
140
141 .uinfo = {
142 .encr = {
143 .blockbits = 64,
144 .defkeybits = 64,
145 }
146 },
147
148 .desc = {
149 .sadb_alg_id = SADB_EALG_DESCBC,
150 .sadb_alg_ivlen = 8,
151 .sadb_alg_minbits = 64,
152 .sadb_alg_maxbits = 64
153 }
154},
155{
156 .name = "des3_ede",
157
158 .uinfo = {
159 .encr = {
160 .blockbits = 64,
161 .defkeybits = 192,
162 }
163 },
164
165 .desc = {
166 .sadb_alg_id = SADB_EALG_3DESCBC,
167 .sadb_alg_ivlen = 8,
168 .sadb_alg_minbits = 192,
169 .sadb_alg_maxbits = 192
170 }
171},
172{
173 .name = "cast128",
174
175 .uinfo = {
176 .encr = {
177 .blockbits = 64,
178 .defkeybits = 128,
179 }
180 },
181
182 .desc = {
183 .sadb_alg_id = SADB_X_EALG_CASTCBC,
184 .sadb_alg_ivlen = 8,
185 .sadb_alg_minbits = 40,
186 .sadb_alg_maxbits = 128
187 }
188},
189{
190 .name = "blowfish",
191
192 .uinfo = {
193 .encr = {
194 .blockbits = 64,
195 .defkeybits = 128,
196 }
197 },
198
199 .desc = {
200 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
201 .sadb_alg_ivlen = 8,
202 .sadb_alg_minbits = 40,
203 .sadb_alg_maxbits = 448
204 }
205},
206{
207 .name = "aes",
208
209 .uinfo = {
210 .encr = {
211 .blockbits = 128,
212 .defkeybits = 128,
213 }
214 },
215
216 .desc = {
217 .sadb_alg_id = SADB_X_EALG_AESCBC,
218 .sadb_alg_ivlen = 8,
219 .sadb_alg_minbits = 128,
220 .sadb_alg_maxbits = 256
221 }
222},
223{
224 .name = "serpent",
225
226 .uinfo = {
227 .encr = {
228 .blockbits = 128,
229 .defkeybits = 128,
230 }
231 },
232
233 .desc = {
234 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
235 .sadb_alg_ivlen = 8,
236 .sadb_alg_minbits = 128,
237 .sadb_alg_maxbits = 256,
238 }
239},
240{
241 .name = "twofish",
242
243 .uinfo = {
244 .encr = {
245 .blockbits = 128,
246 .defkeybits = 128,
247 }
248 },
249
250 .desc = {
251 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
252 .sadb_alg_ivlen = 8,
253 .sadb_alg_minbits = 128,
254 .sadb_alg_maxbits = 256
255 }
256},
257};
258
259static struct xfrm_algo_desc calg_list[] = {
260{
261 .name = "deflate",
262 .uinfo = {
263 .comp = {
264 .threshold = 90,
265 }
266 },
267 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
268},
269{
270 .name = "lzs",
271 .uinfo = {
272 .comp = {
273 .threshold = 90,
274 }
275 },
276 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
277},
278{
279 .name = "lzjh",
280 .uinfo = {
281 .comp = {
282 .threshold = 50,
283 }
284 },
285 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
286},
287};
288
289static inline int aalg_entries(void)
290{
291 return ARRAY_SIZE(aalg_list);
292}
293
294static inline int ealg_entries(void)
295{
296 return ARRAY_SIZE(ealg_list);
297}
298
299static inline int calg_entries(void)
300{
301 return ARRAY_SIZE(calg_list);
302}
303
304/* Todo: generic iterators */
305struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
306{
307 int i;
308
309 for (i = 0; i < aalg_entries(); i++) {
310 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
311 if (aalg_list[i].available)
312 return &aalg_list[i];
313 else
314 break;
315 }
316 }
317 return NULL;
318}
319EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
320
321struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
322{
323 int i;
324
325 for (i = 0; i < ealg_entries(); i++) {
326 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
327 if (ealg_list[i].available)
328 return &ealg_list[i];
329 else
330 break;
331 }
332 }
333 return NULL;
334}
335EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
336
337struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
338{
339 int i;
340
341 for (i = 0; i < calg_entries(); i++) {
342 if (calg_list[i].desc.sadb_alg_id == alg_id) {
343 if (calg_list[i].available)
344 return &calg_list[i];
345 else
346 break;
347 }
348 }
349 return NULL;
350}
351EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
352
353static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
354 int entries, char *name,
355 int probe)
356{
357 int i, status;
358
359 if (!name)
360 return NULL;
361
362 for (i = 0; i < entries; i++) {
363 if (strcmp(name, list[i].name))
364 continue;
365
366 if (list[i].available)
367 return &list[i];
368
369 if (!probe)
370 break;
371
372 status = crypto_alg_available(name, 0);
373 if (!status)
374 break;
375
376 list[i].available = status;
377 return &list[i];
378 }
379 return NULL;
380}
381
382struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
383{
384 return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
385}
386EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
387
388struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
389{
390 return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
391}
392EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
393
394struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
395{
396 return xfrm_get_byname(calg_list, calg_entries(), name, probe);
397}
398EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
399
400struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
401{
402 if (idx >= aalg_entries())
403 return NULL;
404
405 return &aalg_list[idx];
406}
407EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
408
409struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
410{
411 if (idx >= ealg_entries())
412 return NULL;
413
414 return &ealg_list[idx];
415}
416EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
417
418/*
419 * Probe for the availability of crypto algorithms, and set the available
420 * flag for any algorithms found on the system. This is typically called by
421 * pfkey during userspace SA add, update or register.
422 */
423void xfrm_probe_algs(void)
424{
425#ifdef CONFIG_CRYPTO
426 int i, status;
427
428 BUG_ON(in_softirq());
429
430 for (i = 0; i < aalg_entries(); i++) {
431 status = crypto_alg_available(aalg_list[i].name, 0);
432 if (aalg_list[i].available != status)
433 aalg_list[i].available = status;
434 }
435
436 for (i = 0; i < ealg_entries(); i++) {
437 status = crypto_alg_available(ealg_list[i].name, 0);
438 if (ealg_list[i].available != status)
439 ealg_list[i].available = status;
440 }
441
442 for (i = 0; i < calg_entries(); i++) {
443 status = crypto_alg_available(calg_list[i].name, 0);
444 if (calg_list[i].available != status)
445 calg_list[i].available = status;
446 }
447#endif
448}
449EXPORT_SYMBOL_GPL(xfrm_probe_algs);
450
451int xfrm_count_auth_supported(void)
452{
453 int i, n;
454
455 for (i = 0, n = 0; i < aalg_entries(); i++)
456 if (aalg_list[i].available)
457 n++;
458 return n;
459}
460EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
461
462int xfrm_count_enc_supported(void)
463{
464 int i, n;
465
466 for (i = 0, n = 0; i < ealg_entries(); i++)
467 if (ealg_list[i].available)
468 n++;
469 return n;
470}
471EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
472
473/* Move to common area: it is shared with AH. */
474
475void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
476 int offset, int len, icv_update_fn_t icv_update)
477{
478 int start = skb_headlen(skb);
479 int i, copy = start - offset;
480 struct scatterlist sg;
481
482 /* Checksum header. */
483 if (copy > 0) {
484 if (copy > len)
485 copy = len;
486
487 sg.page = virt_to_page(skb->data + offset);
488 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
489 sg.length = copy;
490
491 icv_update(tfm, &sg, 1);
492
493 if ((len -= copy) == 0)
494 return;
495 offset += copy;
496 }
497
498 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
499 int end;
500
501 BUG_TRAP(start <= offset + len);
502
503 end = start + skb_shinfo(skb)->frags[i].size;
504 if ((copy = end - offset) > 0) {
505 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
506
507 if (copy > len)
508 copy = len;
509
510 sg.page = frag->page;
511 sg.offset = frag->page_offset + offset-start;
512 sg.length = copy;
513
514 icv_update(tfm, &sg, 1);
515
516 if (!(len -= copy))
517 return;
518 offset += copy;
519 }
520 start = end;
521 }
522
523 if (skb_shinfo(skb)->frag_list) {
524 struct sk_buff *list = skb_shinfo(skb)->frag_list;
525
526 for (; list; list = list->next) {
527 int end;
528
529 BUG_TRAP(start <= offset + len);
530
531 end = start + list->len;
532 if ((copy = end - offset) > 0) {
533 if (copy > len)
534 copy = len;
535 skb_icv_walk(list, tfm, offset-start, copy, icv_update);
536 if ((len -= copy) == 0)
537 return;
538 offset += copy;
539 }
540 start = end;
541 }
542 }
543 if (len)
544 BUG();
545}
546EXPORT_SYMBOL_GPL(skb_icv_walk);
547
548#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
549
550/* Looking generic it is not used in another places. */
551
552int
553skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
554{
555 int start = skb_headlen(skb);
556 int i, copy = start - offset;
557 int elt = 0;
558
559 if (copy > 0) {
560 if (copy > len)
561 copy = len;
562 sg[elt].page = virt_to_page(skb->data + offset);
563 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
564 sg[elt].length = copy;
565 elt++;
566 if ((len -= copy) == 0)
567 return elt;
568 offset += copy;
569 }
570
571 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
572 int end;
573
574 BUG_TRAP(start <= offset + len);
575
576 end = start + skb_shinfo(skb)->frags[i].size;
577 if ((copy = end - offset) > 0) {
578 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
579
580 if (copy > len)
581 copy = len;
582 sg[elt].page = frag->page;
583 sg[elt].offset = frag->page_offset+offset-start;
584 sg[elt].length = copy;
585 elt++;
586 if (!(len -= copy))
587 return elt;
588 offset += copy;
589 }
590 start = end;
591 }
592
593 if (skb_shinfo(skb)->frag_list) {
594 struct sk_buff *list = skb_shinfo(skb)->frag_list;
595
596 for (; list; list = list->next) {
597 int end;
598
599 BUG_TRAP(start <= offset + len);
600
601 end = start + list->len;
602 if ((copy = end - offset) > 0) {
603 if (copy > len)
604 copy = len;
605 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
606 if ((len -= copy) == 0)
607 return elt;
608 offset += copy;
609 }
610 start = end;
611 }
612 }
613 if (len)
614 BUG();
615 return elt;
616}
617EXPORT_SYMBOL_GPL(skb_to_sgvec);
618
619/* Check that skb data bits are writable. If they are not, copy data
620 * to newly created private area. If "tailbits" is given, make sure that
621 * tailbits bytes beyond current end of skb are writable.
622 *
623 * Returns amount of elements of scatterlist to load for subsequent
624 * transformations and pointer to writable trailer skb.
625 */
626
627int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
628{
629 int copyflag;
630 int elt;
631 struct sk_buff *skb1, **skb_p;
632
633 /* If skb is cloned or its head is paged, reallocate
634 * head pulling out all the pages (pages are considered not writable
635 * at the moment even if they are anonymous).
636 */
637 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
638 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
639 return -ENOMEM;
640
641 /* Easy case. Most of packets will go this way. */
642 if (!skb_shinfo(skb)->frag_list) {
643 /* A little of trouble, not enough of space for trailer.
644 * This should not happen, when stack is tuned to generate
645 * good frames. OK, on miss we reallocate and reserve even more
646 * space, 128 bytes is fair. */
647
648 if (skb_tailroom(skb) < tailbits &&
649 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
650 return -ENOMEM;
651
652 /* Voila! */
653 *trailer = skb;
654 return 1;
655 }
656
657 /* Misery. We are in troubles, going to mincer fragments... */
658
659 elt = 1;
660 skb_p = &skb_shinfo(skb)->frag_list;
661 copyflag = 0;
662
663 while ((skb1 = *skb_p) != NULL) {
664 int ntail = 0;
665
666 /* The fragment is partially pulled by someone,
667 * this can happen on input. Copy it and everything
668 * after it. */
669
670 if (skb_shared(skb1))
671 copyflag = 1;
672
673 /* If the skb is the last, worry about trailer. */
674
675 if (skb1->next == NULL && tailbits) {
676 if (skb_shinfo(skb1)->nr_frags ||
677 skb_shinfo(skb1)->frag_list ||
678 skb_tailroom(skb1) < tailbits)
679 ntail = tailbits + 128;
680 }
681
682 if (copyflag ||
683 skb_cloned(skb1) ||
684 ntail ||
685 skb_shinfo(skb1)->nr_frags ||
686 skb_shinfo(skb1)->frag_list) {
687 struct sk_buff *skb2;
688
689 /* Fuck, we are miserable poor guys... */
690 if (ntail == 0)
691 skb2 = skb_copy(skb1, GFP_ATOMIC);
692 else
693 skb2 = skb_copy_expand(skb1,
694 skb_headroom(skb1),
695 ntail,
696 GFP_ATOMIC);
697 if (unlikely(skb2 == NULL))
698 return -ENOMEM;
699
700 if (skb1->sk)
701 skb_set_owner_w(skb, skb1->sk);
702
703 /* Looking around. Are we still alive?
704 * OK, link new skb, drop old one */
705
706 skb2->next = skb1->next;
707 *skb_p = skb2;
708 kfree_skb(skb1);
709 skb1 = skb2;
710 }
711 elt++;
712 *trailer = skb1;
713 skb_p = &skb1->next;
714 }
715
716 return elt;
717}
718EXPORT_SYMBOL_GPL(skb_cow_data);
719
720void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
721{
722 if (tail != skb) {
723 skb->data_len += len;
724 skb->len += len;
725 }
726 return skb_put(tail, len);
727}
728EXPORT_SYMBOL_GPL(pskb_put);
729#endif
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
new file mode 100644
index 000000000000..c58a6f05a0b6
--- /dev/null
+++ b/net/xfrm/xfrm_input.c
@@ -0,0 +1,89 @@
1/*
2 * xfrm_input.c
3 *
4 * Changes:
5 * YOSHIFUJI Hideaki @USAGI
6 * Split up af-specific portion
7 *
8 */
9
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <net/ip.h>
13#include <net/xfrm.h>
14
15static kmem_cache_t *secpath_cachep;
16
17void __secpath_destroy(struct sec_path *sp)
18{
19 int i;
20 for (i = 0; i < sp->len; i++)
21 xfrm_state_put(sp->x[i].xvec);
22 kmem_cache_free(secpath_cachep, sp);
23}
24EXPORT_SYMBOL(__secpath_destroy);
25
26struct sec_path *secpath_dup(struct sec_path *src)
27{
28 struct sec_path *sp;
29
30 sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC);
31 if (!sp)
32 return NULL;
33
34 sp->len = 0;
35 if (src) {
36 int i;
37
38 memcpy(sp, src, sizeof(*sp));
39 for (i = 0; i < sp->len; i++)
40 xfrm_state_hold(sp->x[i].xvec);
41 }
42 atomic_set(&sp->refcnt, 1);
43 return sp;
44}
45EXPORT_SYMBOL(secpath_dup);
46
47/* Fetch spi and seq from ipsec header */
48
49int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
50{
51 int offset, offset_seq;
52
53 switch (nexthdr) {
54 case IPPROTO_AH:
55 offset = offsetof(struct ip_auth_hdr, spi);
56 offset_seq = offsetof(struct ip_auth_hdr, seq_no);
57 break;
58 case IPPROTO_ESP:
59 offset = offsetof(struct ip_esp_hdr, spi);
60 offset_seq = offsetof(struct ip_esp_hdr, seq_no);
61 break;
62 case IPPROTO_COMP:
63 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
64 return -EINVAL;
65 *spi = ntohl(ntohs(*(u16*)(skb->h.raw + 2)));
66 *seq = 0;
67 return 0;
68 default:
69 return 1;
70 }
71
72 if (!pskb_may_pull(skb, 16))
73 return -EINVAL;
74
75 *spi = *(u32*)(skb->h.raw + offset);
76 *seq = *(u32*)(skb->h.raw + offset_seq);
77 return 0;
78}
79EXPORT_SYMBOL(xfrm_parse_spi);
80
81void __init xfrm_input_init(void)
82{
83 secpath_cachep = kmem_cache_create("secpath_cache",
84 sizeof(struct sec_path),
85 0, SLAB_HWCACHE_ALIGN,
86 NULL, NULL);
87 if (!secpath_cachep)
88 panic("XFRM: failed to allocate secpath_cache\n");
89}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
new file mode 100644
index 000000000000..80828078733d
--- /dev/null
+++ b/net/xfrm/xfrm_policy.c
@@ -0,0 +1,1367 @@
1/*
2 * xfrm_policy.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * Kazunori MIYAZAWA @USAGI
10 * YOSHIFUJI Hideaki
11 * Split up af-specific portion
12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
13 *
14 */
15
16#include <asm/bug.h>
17#include <linux/config.h>
18#include <linux/slab.h>
19#include <linux/kmod.h>
20#include <linux/list.h>
21#include <linux/spinlock.h>
22#include <linux/workqueue.h>
23#include <linux/notifier.h>
24#include <linux/netdevice.h>
25#include <linux/module.h>
26#include <net/xfrm.h>
27#include <net/ip.h>
28
29DECLARE_MUTEX(xfrm_cfg_sem);
30EXPORT_SYMBOL(xfrm_cfg_sem);
31
32static DEFINE_RWLOCK(xfrm_policy_lock);
33
34struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
35EXPORT_SYMBOL(xfrm_policy_list);
36
37static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
38static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
39
40static kmem_cache_t *xfrm_dst_cache;
41
42static struct work_struct xfrm_policy_gc_work;
43static struct list_head xfrm_policy_gc_list =
44 LIST_HEAD_INIT(xfrm_policy_gc_list);
45static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
46
47static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
48static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
49
50int xfrm_register_type(struct xfrm_type *type, unsigned short family)
51{
52 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
53 struct xfrm_type_map *typemap;
54 int err = 0;
55
56 if (unlikely(afinfo == NULL))
57 return -EAFNOSUPPORT;
58 typemap = afinfo->type_map;
59
60 write_lock(&typemap->lock);
61 if (likely(typemap->map[type->proto] == NULL))
62 typemap->map[type->proto] = type;
63 else
64 err = -EEXIST;
65 write_unlock(&typemap->lock);
66 xfrm_policy_put_afinfo(afinfo);
67 return err;
68}
69EXPORT_SYMBOL(xfrm_register_type);
70
71int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
72{
73 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
74 struct xfrm_type_map *typemap;
75 int err = 0;
76
77 if (unlikely(afinfo == NULL))
78 return -EAFNOSUPPORT;
79 typemap = afinfo->type_map;
80
81 write_lock(&typemap->lock);
82 if (unlikely(typemap->map[type->proto] != type))
83 err = -ENOENT;
84 else
85 typemap->map[type->proto] = NULL;
86 write_unlock(&typemap->lock);
87 xfrm_policy_put_afinfo(afinfo);
88 return err;
89}
90EXPORT_SYMBOL(xfrm_unregister_type);
91
92struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
93{
94 struct xfrm_policy_afinfo *afinfo;
95 struct xfrm_type_map *typemap;
96 struct xfrm_type *type;
97 int modload_attempted = 0;
98
99retry:
100 afinfo = xfrm_policy_get_afinfo(family);
101 if (unlikely(afinfo == NULL))
102 return NULL;
103 typemap = afinfo->type_map;
104
105 read_lock(&typemap->lock);
106 type = typemap->map[proto];
107 if (unlikely(type && !try_module_get(type->owner)))
108 type = NULL;
109 read_unlock(&typemap->lock);
110 if (!type && !modload_attempted) {
111 xfrm_policy_put_afinfo(afinfo);
112 request_module("xfrm-type-%d-%d",
113 (int) family, (int) proto);
114 modload_attempted = 1;
115 goto retry;
116 }
117
118 xfrm_policy_put_afinfo(afinfo);
119 return type;
120}
121EXPORT_SYMBOL(xfrm_get_type);
122
123int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
124 unsigned short family)
125{
126 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
127 int err = 0;
128
129 if (unlikely(afinfo == NULL))
130 return -EAFNOSUPPORT;
131
132 if (likely(afinfo->dst_lookup != NULL))
133 err = afinfo->dst_lookup(dst, fl);
134 else
135 err = -EINVAL;
136 xfrm_policy_put_afinfo(afinfo);
137 return err;
138}
139EXPORT_SYMBOL(xfrm_dst_lookup);
140
141void xfrm_put_type(struct xfrm_type *type)
142{
143 module_put(type->owner);
144}
145
146static inline unsigned long make_jiffies(long secs)
147{
148 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
149 return MAX_SCHEDULE_TIMEOUT-1;
150 else
151 return secs*HZ;
152}
153
154static void xfrm_policy_timer(unsigned long data)
155{
156 struct xfrm_policy *xp = (struct xfrm_policy*)data;
157 unsigned long now = (unsigned long)xtime.tv_sec;
158 long next = LONG_MAX;
159 int warn = 0;
160 int dir;
161
162 read_lock(&xp->lock);
163
164 if (xp->dead)
165 goto out;
166
167 dir = xp->index & 7;
168
169 if (xp->lft.hard_add_expires_seconds) {
170 long tmo = xp->lft.hard_add_expires_seconds +
171 xp->curlft.add_time - now;
172 if (tmo <= 0)
173 goto expired;
174 if (tmo < next)
175 next = tmo;
176 }
177 if (xp->lft.hard_use_expires_seconds) {
178 long tmo = xp->lft.hard_use_expires_seconds +
179 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
180 if (tmo <= 0)
181 goto expired;
182 if (tmo < next)
183 next = tmo;
184 }
185 if (xp->lft.soft_add_expires_seconds) {
186 long tmo = xp->lft.soft_add_expires_seconds +
187 xp->curlft.add_time - now;
188 if (tmo <= 0) {
189 warn = 1;
190 tmo = XFRM_KM_TIMEOUT;
191 }
192 if (tmo < next)
193 next = tmo;
194 }
195 if (xp->lft.soft_use_expires_seconds) {
196 long tmo = xp->lft.soft_use_expires_seconds +
197 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
198 if (tmo <= 0) {
199 warn = 1;
200 tmo = XFRM_KM_TIMEOUT;
201 }
202 if (tmo < next)
203 next = tmo;
204 }
205
206 if (warn)
207 km_policy_expired(xp, dir, 0);
208 if (next != LONG_MAX &&
209 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
210 xfrm_pol_hold(xp);
211
212out:
213 read_unlock(&xp->lock);
214 xfrm_pol_put(xp);
215 return;
216
217expired:
218 read_unlock(&xp->lock);
219 km_policy_expired(xp, dir, 1);
220 xfrm_policy_delete(xp, dir);
221 xfrm_pol_put(xp);
222}
223
224
225/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
226 * SPD calls.
227 */
228
229struct xfrm_policy *xfrm_policy_alloc(int gfp)
230{
231 struct xfrm_policy *policy;
232
233 policy = kmalloc(sizeof(struct xfrm_policy), gfp);
234
235 if (policy) {
236 memset(policy, 0, sizeof(struct xfrm_policy));
237 atomic_set(&policy->refcnt, 1);
238 rwlock_init(&policy->lock);
239 init_timer(&policy->timer);
240 policy->timer.data = (unsigned long)policy;
241 policy->timer.function = xfrm_policy_timer;
242 }
243 return policy;
244}
245EXPORT_SYMBOL(xfrm_policy_alloc);
246
247/* Destroy xfrm_policy: descendant resources must be released to this moment. */
248
249void __xfrm_policy_destroy(struct xfrm_policy *policy)
250{
251 if (!policy->dead)
252 BUG();
253
254 if (policy->bundles)
255 BUG();
256
257 if (del_timer(&policy->timer))
258 BUG();
259
260 kfree(policy);
261}
262EXPORT_SYMBOL(__xfrm_policy_destroy);
263
264static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
265{
266 struct dst_entry *dst;
267
268 while ((dst = policy->bundles) != NULL) {
269 policy->bundles = dst->next;
270 dst_free(dst);
271 }
272
273 if (del_timer(&policy->timer))
274 atomic_dec(&policy->refcnt);
275
276 if (atomic_read(&policy->refcnt) > 1)
277 flow_cache_flush();
278
279 xfrm_pol_put(policy);
280}
281
282static void xfrm_policy_gc_task(void *data)
283{
284 struct xfrm_policy *policy;
285 struct list_head *entry, *tmp;
286 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
287
288 spin_lock_bh(&xfrm_policy_gc_lock);
289 list_splice_init(&xfrm_policy_gc_list, &gc_list);
290 spin_unlock_bh(&xfrm_policy_gc_lock);
291
292 list_for_each_safe(entry, tmp, &gc_list) {
293 policy = list_entry(entry, struct xfrm_policy, list);
294 xfrm_policy_gc_kill(policy);
295 }
296}
297
298/* Rule must be locked. Release descentant resources, announce
299 * entry dead. The rule must be unlinked from lists to the moment.
300 */
301
302static void xfrm_policy_kill(struct xfrm_policy *policy)
303{
304 int dead;
305
306 write_lock_bh(&policy->lock);
307 dead = policy->dead;
308 policy->dead = 1;
309 write_unlock_bh(&policy->lock);
310
311 if (unlikely(dead)) {
312 WARN_ON(1);
313 return;
314 }
315
316 spin_lock(&xfrm_policy_gc_lock);
317 list_add(&policy->list, &xfrm_policy_gc_list);
318 spin_unlock(&xfrm_policy_gc_lock);
319
320 schedule_work(&xfrm_policy_gc_work);
321}
322
323/* Generate new index... KAME seems to generate them ordered by cost
324 * of an absolute inpredictability of ordering of rules. This will not pass. */
325static u32 xfrm_gen_index(int dir)
326{
327 u32 idx;
328 struct xfrm_policy *p;
329 static u32 idx_generator;
330
331 for (;;) {
332 idx = (idx_generator | dir);
333 idx_generator += 8;
334 if (idx == 0)
335 idx = 8;
336 for (p = xfrm_policy_list[dir]; p; p = p->next) {
337 if (p->index == idx)
338 break;
339 }
340 if (!p)
341 return idx;
342 }
343}
344
345int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
346{
347 struct xfrm_policy *pol, **p;
348 struct xfrm_policy *delpol = NULL;
349 struct xfrm_policy **newpos = NULL;
350
351 write_lock_bh(&xfrm_policy_lock);
352 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) {
353 if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0) {
354 if (excl) {
355 write_unlock_bh(&xfrm_policy_lock);
356 return -EEXIST;
357 }
358 *p = pol->next;
359 delpol = pol;
360 if (policy->priority > pol->priority)
361 continue;
362 } else if (policy->priority >= pol->priority) {
363 p = &pol->next;
364 continue;
365 }
366 if (!newpos)
367 newpos = p;
368 if (delpol)
369 break;
370 p = &pol->next;
371 }
372 if (newpos)
373 p = newpos;
374 xfrm_pol_hold(policy);
375 policy->next = *p;
376 *p = policy;
377 atomic_inc(&flow_cache_genid);
378 policy->index = delpol ? delpol->index : xfrm_gen_index(dir);
379 policy->curlft.add_time = (unsigned long)xtime.tv_sec;
380 policy->curlft.use_time = 0;
381 if (!mod_timer(&policy->timer, jiffies + HZ))
382 xfrm_pol_hold(policy);
383 write_unlock_bh(&xfrm_policy_lock);
384
385 if (delpol) {
386 xfrm_policy_kill(delpol);
387 }
388 return 0;
389}
390EXPORT_SYMBOL(xfrm_policy_insert);
391
392struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
393 int delete)
394{
395 struct xfrm_policy *pol, **p;
396
397 write_lock_bh(&xfrm_policy_lock);
398 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
399 if (memcmp(sel, &pol->selector, sizeof(*sel)) == 0) {
400 xfrm_pol_hold(pol);
401 if (delete)
402 *p = pol->next;
403 break;
404 }
405 }
406 write_unlock_bh(&xfrm_policy_lock);
407
408 if (pol && delete) {
409 atomic_inc(&flow_cache_genid);
410 xfrm_policy_kill(pol);
411 }
412 return pol;
413}
414EXPORT_SYMBOL(xfrm_policy_bysel);
415
416struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
417{
418 struct xfrm_policy *pol, **p;
419
420 write_lock_bh(&xfrm_policy_lock);
421 for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) {
422 if (pol->index == id) {
423 xfrm_pol_hold(pol);
424 if (delete)
425 *p = pol->next;
426 break;
427 }
428 }
429 write_unlock_bh(&xfrm_policy_lock);
430
431 if (pol && delete) {
432 atomic_inc(&flow_cache_genid);
433 xfrm_policy_kill(pol);
434 }
435 return pol;
436}
437EXPORT_SYMBOL(xfrm_policy_byid);
438
439void xfrm_policy_flush(void)
440{
441 struct xfrm_policy *xp;
442 int dir;
443
444 write_lock_bh(&xfrm_policy_lock);
445 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
446 while ((xp = xfrm_policy_list[dir]) != NULL) {
447 xfrm_policy_list[dir] = xp->next;
448 write_unlock_bh(&xfrm_policy_lock);
449
450 xfrm_policy_kill(xp);
451
452 write_lock_bh(&xfrm_policy_lock);
453 }
454 }
455 atomic_inc(&flow_cache_genid);
456 write_unlock_bh(&xfrm_policy_lock);
457}
458EXPORT_SYMBOL(xfrm_policy_flush);
459
460int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
461 void *data)
462{
463 struct xfrm_policy *xp;
464 int dir;
465 int count = 0;
466 int error = 0;
467
468 read_lock_bh(&xfrm_policy_lock);
469 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
470 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next)
471 count++;
472 }
473
474 if (count == 0) {
475 error = -ENOENT;
476 goto out;
477 }
478
479 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
480 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) {
481 error = func(xp, dir%XFRM_POLICY_MAX, --count, data);
482 if (error)
483 goto out;
484 }
485 }
486
487out:
488 read_unlock_bh(&xfrm_policy_lock);
489 return error;
490}
491EXPORT_SYMBOL(xfrm_policy_walk);
492
493/* Find policy to apply to this flow. */
494
495static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
496 void **objp, atomic_t **obj_refp)
497{
498 struct xfrm_policy *pol;
499
500 read_lock_bh(&xfrm_policy_lock);
501 for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) {
502 struct xfrm_selector *sel = &pol->selector;
503 int match;
504
505 if (pol->family != family)
506 continue;
507
508 match = xfrm_selector_match(sel, fl, family);
509 if (match) {
510 xfrm_pol_hold(pol);
511 break;
512 }
513 }
514 read_unlock_bh(&xfrm_policy_lock);
515 if ((*objp = (void *) pol) != NULL)
516 *obj_refp = &pol->refcnt;
517}
518
519static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
520{
521 struct xfrm_policy *pol;
522
523 read_lock_bh(&xfrm_policy_lock);
524 if ((pol = sk->sk_policy[dir]) != NULL) {
525 int match = xfrm_selector_match(&pol->selector, fl,
526 sk->sk_family);
527 if (match)
528 xfrm_pol_hold(pol);
529 else
530 pol = NULL;
531 }
532 read_unlock_bh(&xfrm_policy_lock);
533 return pol;
534}
535
536static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
537{
538 pol->next = xfrm_policy_list[dir];
539 xfrm_policy_list[dir] = pol;
540 xfrm_pol_hold(pol);
541}
542
543static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
544 int dir)
545{
546 struct xfrm_policy **polp;
547
548 for (polp = &xfrm_policy_list[dir];
549 *polp != NULL; polp = &(*polp)->next) {
550 if (*polp == pol) {
551 *polp = pol->next;
552 return pol;
553 }
554 }
555 return NULL;
556}
557
558void xfrm_policy_delete(struct xfrm_policy *pol, int dir)
559{
560 write_lock_bh(&xfrm_policy_lock);
561 pol = __xfrm_policy_unlink(pol, dir);
562 write_unlock_bh(&xfrm_policy_lock);
563 if (pol) {
564 if (dir < XFRM_POLICY_MAX)
565 atomic_inc(&flow_cache_genid);
566 xfrm_policy_kill(pol);
567 }
568}
569
570int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
571{
572 struct xfrm_policy *old_pol;
573
574 write_lock_bh(&xfrm_policy_lock);
575 old_pol = sk->sk_policy[dir];
576 sk->sk_policy[dir] = pol;
577 if (pol) {
578 pol->curlft.add_time = (unsigned long)xtime.tv_sec;
579 pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir);
580 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
581 }
582 if (old_pol)
583 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
584 write_unlock_bh(&xfrm_policy_lock);
585
586 if (old_pol) {
587 xfrm_policy_kill(old_pol);
588 }
589 return 0;
590}
591
592static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
593{
594 struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
595
596 if (newp) {
597 newp->selector = old->selector;
598 newp->lft = old->lft;
599 newp->curlft = old->curlft;
600 newp->action = old->action;
601 newp->flags = old->flags;
602 newp->xfrm_nr = old->xfrm_nr;
603 newp->index = old->index;
604 memcpy(newp->xfrm_vec, old->xfrm_vec,
605 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
606 write_lock_bh(&xfrm_policy_lock);
607 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
608 write_unlock_bh(&xfrm_policy_lock);
609 xfrm_pol_put(newp);
610 }
611 return newp;
612}
613
614int __xfrm_sk_clone_policy(struct sock *sk)
615{
616 struct xfrm_policy *p0 = sk->sk_policy[0],
617 *p1 = sk->sk_policy[1];
618
619 sk->sk_policy[0] = sk->sk_policy[1] = NULL;
620 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
621 return -ENOMEM;
622 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
623 return -ENOMEM;
624 return 0;
625}
626
627/* Resolve list of templates for the flow, given policy. */
628
629static int
630xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl,
631 struct xfrm_state **xfrm,
632 unsigned short family)
633{
634 int nx;
635 int i, error;
636 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
637 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
638
639 for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
640 struct xfrm_state *x;
641 xfrm_address_t *remote = daddr;
642 xfrm_address_t *local = saddr;
643 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
644
645 if (tmpl->mode) {
646 remote = &tmpl->id.daddr;
647 local = &tmpl->saddr;
648 }
649
650 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
651
652 if (x && x->km.state == XFRM_STATE_VALID) {
653 xfrm[nx++] = x;
654 daddr = remote;
655 saddr = local;
656 continue;
657 }
658 if (x) {
659 error = (x->km.state == XFRM_STATE_ERROR ?
660 -EINVAL : -EAGAIN);
661 xfrm_state_put(x);
662 }
663
664 if (!tmpl->optional)
665 goto fail;
666 }
667 return nx;
668
669fail:
670 for (nx--; nx>=0; nx--)
671 xfrm_state_put(xfrm[nx]);
672 return error;
673}
674
675/* Check that the bundle accepts the flow and its components are
676 * still valid.
677 */
678
679static struct dst_entry *
680xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
681{
682 struct dst_entry *x;
683 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
684 if (unlikely(afinfo == NULL))
685 return ERR_PTR(-EINVAL);
686 x = afinfo->find_bundle(fl, policy);
687 xfrm_policy_put_afinfo(afinfo);
688 return x;
689}
690
691/* Allocate chain of dst_entry's, attach known xfrm's, calculate
692 * all the metrics... Shortly, bundle a bundle.
693 */
694
695static int
696xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx,
697 struct flowi *fl, struct dst_entry **dst_p,
698 unsigned short family)
699{
700 int err;
701 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
702 if (unlikely(afinfo == NULL))
703 return -EINVAL;
704 err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p);
705 xfrm_policy_put_afinfo(afinfo);
706 return err;
707}
708
709static inline int policy_to_flow_dir(int dir)
710{
711 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
712 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
713 XFRM_POLICY_FWD == FLOW_DIR_FWD)
714 return dir;
715 switch (dir) {
716 default:
717 case XFRM_POLICY_IN:
718 return FLOW_DIR_IN;
719 case XFRM_POLICY_OUT:
720 return FLOW_DIR_OUT;
721 case XFRM_POLICY_FWD:
722 return FLOW_DIR_FWD;
723 };
724}
725
726static int stale_bundle(struct dst_entry *dst);
727
728/* Main function: finds/creates a bundle for given flow.
729 *
730 * At the moment we eat a raw IP route. Mostly to speed up lookups
731 * on interfaces with disabled IPsec.
732 */
733int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl,
734 struct sock *sk, int flags)
735{
736 struct xfrm_policy *policy;
737 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
738 struct dst_entry *dst, *dst_orig = *dst_p;
739 int nx = 0;
740 int err;
741 u32 genid;
742 u16 family = dst_orig->ops->family;
743restart:
744 genid = atomic_read(&flow_cache_genid);
745 policy = NULL;
746 if (sk && sk->sk_policy[1])
747 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
748
749 if (!policy) {
750 /* To accelerate a bit... */
751 if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
752 return 0;
753
754 policy = flow_cache_lookup(fl, family,
755 policy_to_flow_dir(XFRM_POLICY_OUT),
756 xfrm_policy_lookup);
757 }
758
759 if (!policy)
760 return 0;
761
762 policy->curlft.use_time = (unsigned long)xtime.tv_sec;
763
764 switch (policy->action) {
765 case XFRM_POLICY_BLOCK:
766 /* Prohibit the flow */
767 xfrm_pol_put(policy);
768 return -EPERM;
769
770 case XFRM_POLICY_ALLOW:
771 if (policy->xfrm_nr == 0) {
772 /* Flow passes not transformed. */
773 xfrm_pol_put(policy);
774 return 0;
775 }
776
777 /* Try to find matching bundle.
778 *
779 * LATER: help from flow cache. It is optional, this
780 * is required only for output policy.
781 */
782 dst = xfrm_find_bundle(fl, policy, family);
783 if (IS_ERR(dst)) {
784 xfrm_pol_put(policy);
785 return PTR_ERR(dst);
786 }
787
788 if (dst)
789 break;
790
791 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
792
793 if (unlikely(nx<0)) {
794 err = nx;
795 if (err == -EAGAIN && flags) {
796 DECLARE_WAITQUEUE(wait, current);
797
798 add_wait_queue(&km_waitq, &wait);
799 set_current_state(TASK_INTERRUPTIBLE);
800 schedule();
801 set_current_state(TASK_RUNNING);
802 remove_wait_queue(&km_waitq, &wait);
803
804 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family);
805
806 if (nx == -EAGAIN && signal_pending(current)) {
807 err = -ERESTART;
808 goto error;
809 }
810 if (nx == -EAGAIN ||
811 genid != atomic_read(&flow_cache_genid)) {
812 xfrm_pol_put(policy);
813 goto restart;
814 }
815 err = nx;
816 }
817 if (err < 0)
818 goto error;
819 }
820 if (nx == 0) {
821 /* Flow passes not transformed. */
822 xfrm_pol_put(policy);
823 return 0;
824 }
825
826 dst = dst_orig;
827 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
828
829 if (unlikely(err)) {
830 int i;
831 for (i=0; i<nx; i++)
832 xfrm_state_put(xfrm[i]);
833 goto error;
834 }
835
836 write_lock_bh(&policy->lock);
837 if (unlikely(policy->dead || stale_bundle(dst))) {
838 /* Wow! While we worked on resolving, this
839 * policy has gone. Retry. It is not paranoia,
840 * we just cannot enlist new bundle to dead object.
841 * We can't enlist stable bundles either.
842 */
843 write_unlock_bh(&policy->lock);
844
845 xfrm_pol_put(policy);
846 if (dst)
847 dst_free(dst);
848 goto restart;
849 }
850 dst->next = policy->bundles;
851 policy->bundles = dst;
852 dst_hold(dst);
853 write_unlock_bh(&policy->lock);
854 }
855 *dst_p = dst;
856 dst_release(dst_orig);
857 xfrm_pol_put(policy);
858 return 0;
859
860error:
861 dst_release(dst_orig);
862 xfrm_pol_put(policy);
863 *dst_p = NULL;
864 return err;
865}
866EXPORT_SYMBOL(xfrm_lookup);
867
868/* When skb is transformed back to its "native" form, we have to
869 * check policy restrictions. At the moment we make this in maximally
870 * stupid way. Shame on me. :-) Of course, connected sockets must
871 * have policy cached at them.
872 */
873
874static inline int
875xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
876 unsigned short family)
877{
878 if (xfrm_state_kern(x))
879 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family);
880 return x->id.proto == tmpl->id.proto &&
881 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
882 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
883 x->props.mode == tmpl->mode &&
884 (tmpl->aalgos & (1<<x->props.aalgo)) &&
885 !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));
886}
887
888static inline int
889xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
890 unsigned short family)
891{
892 int idx = start;
893
894 if (tmpl->optional) {
895 if (!tmpl->mode)
896 return start;
897 } else
898 start = -1;
899 for (; idx < sp->len; idx++) {
900 if (xfrm_state_ok(tmpl, sp->x[idx].xvec, family))
901 return ++idx;
902 if (sp->x[idx].xvec->props.mode)
903 break;
904 }
905 return start;
906}
907
908static int
909_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
910{
911 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
912
913 if (unlikely(afinfo == NULL))
914 return -EAFNOSUPPORT;
915
916 afinfo->decode_session(skb, fl);
917 xfrm_policy_put_afinfo(afinfo);
918 return 0;
919}
920
921static inline int secpath_has_tunnel(struct sec_path *sp, int k)
922{
923 for (; k < sp->len; k++) {
924 if (sp->x[k].xvec->props.mode)
925 return 1;
926 }
927
928 return 0;
929}
930
931int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
932 unsigned short family)
933{
934 struct xfrm_policy *pol;
935 struct flowi fl;
936
937 if (_decode_session(skb, &fl, family) < 0)
938 return 0;
939
940 /* First, check used SA against their selectors. */
941 if (skb->sp) {
942 int i;
943
944 for (i=skb->sp->len-1; i>=0; i--) {
945 struct sec_decap_state *xvec = &(skb->sp->x[i]);
946 if (!xfrm_selector_match(&xvec->xvec->sel, &fl, family))
947 return 0;
948
949 /* If there is a post_input processor, try running it */
950 if (xvec->xvec->type->post_input &&
951 (xvec->xvec->type->post_input)(xvec->xvec,
952 &(xvec->decap),
953 skb) != 0)
954 return 0;
955 }
956 }
957
958 pol = NULL;
959 if (sk && sk->sk_policy[dir])
960 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
961
962 if (!pol)
963 pol = flow_cache_lookup(&fl, family,
964 policy_to_flow_dir(dir),
965 xfrm_policy_lookup);
966
967 if (!pol)
968 return !skb->sp || !secpath_has_tunnel(skb->sp, 0);
969
970 pol->curlft.use_time = (unsigned long)xtime.tv_sec;
971
972 if (pol->action == XFRM_POLICY_ALLOW) {
973 struct sec_path *sp;
974 static struct sec_path dummy;
975 int i, k;
976
977 if ((sp = skb->sp) == NULL)
978 sp = &dummy;
979
980 /* For each tunnel xfrm, find the first matching tmpl.
981 * For each tmpl before that, find corresponding xfrm.
982 * Order is _important_. Later we will implement
983 * some barriers, but at the moment barriers
984 * are implied between each two transformations.
985 */
986 for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) {
987 k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family);
988 if (k < 0)
989 goto reject;
990 }
991
992 if (secpath_has_tunnel(sp, k))
993 goto reject;
994
995 xfrm_pol_put(pol);
996 return 1;
997 }
998
999reject:
1000 xfrm_pol_put(pol);
1001 return 0;
1002}
1003EXPORT_SYMBOL(__xfrm_policy_check);
1004
1005int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1006{
1007 struct flowi fl;
1008
1009 if (_decode_session(skb, &fl, family) < 0)
1010 return 0;
1011
1012 return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
1013}
1014EXPORT_SYMBOL(__xfrm_route_forward);
1015
1016/* Optimize later using cookies and generation ids. */
1017
1018static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
1019{
1020 if (!stale_bundle(dst))
1021 return dst;
1022
1023 return NULL;
1024}
1025
1026static int stale_bundle(struct dst_entry *dst)
1027{
1028 return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC);
1029}
1030
1031static void xfrm_dst_destroy(struct dst_entry *dst)
1032{
1033 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1034
1035 dst_release(xdst->route);
1036
1037 if (!dst->xfrm)
1038 return;
1039 xfrm_state_put(dst->xfrm);
1040 dst->xfrm = NULL;
1041}
1042
1043static void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1044 int unregister)
1045{
1046 if (!unregister)
1047 return;
1048
1049 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
1050 dst->dev = &loopback_dev;
1051 dev_hold(&loopback_dev);
1052 dev_put(dev);
1053 }
1054}
1055
1056static void xfrm_link_failure(struct sk_buff *skb)
1057{
1058 /* Impossible. Such dst must be popped before reaches point of failure. */
1059 return;
1060}
1061
1062static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
1063{
1064 if (dst) {
1065 if (dst->obsolete) {
1066 dst_release(dst);
1067 dst = NULL;
1068 }
1069 }
1070 return dst;
1071}
1072
1073static void xfrm_prune_bundles(int (*func)(struct dst_entry *))
1074{
1075 int i;
1076 struct xfrm_policy *pol;
1077 struct dst_entry *dst, **dstp, *gc_list = NULL;
1078
1079 read_lock_bh(&xfrm_policy_lock);
1080 for (i=0; i<2*XFRM_POLICY_MAX; i++) {
1081 for (pol = xfrm_policy_list[i]; pol; pol = pol->next) {
1082 write_lock(&pol->lock);
1083 dstp = &pol->bundles;
1084 while ((dst=*dstp) != NULL) {
1085 if (func(dst)) {
1086 *dstp = dst->next;
1087 dst->next = gc_list;
1088 gc_list = dst;
1089 } else {
1090 dstp = &dst->next;
1091 }
1092 }
1093 write_unlock(&pol->lock);
1094 }
1095 }
1096 read_unlock_bh(&xfrm_policy_lock);
1097
1098 while (gc_list) {
1099 dst = gc_list;
1100 gc_list = dst->next;
1101 dst_free(dst);
1102 }
1103}
1104
1105static int unused_bundle(struct dst_entry *dst)
1106{
1107 return !atomic_read(&dst->__refcnt);
1108}
1109
1110static void __xfrm_garbage_collect(void)
1111{
1112 xfrm_prune_bundles(unused_bundle);
1113}
1114
1115int xfrm_flush_bundles(void)
1116{
1117 xfrm_prune_bundles(stale_bundle);
1118 return 0;
1119}
1120
1121void xfrm_init_pmtu(struct dst_entry *dst)
1122{
1123 do {
1124 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1125 u32 pmtu, route_mtu_cached;
1126
1127 pmtu = dst_mtu(dst->child);
1128 xdst->child_mtu_cached = pmtu;
1129
1130 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
1131
1132 route_mtu_cached = dst_mtu(xdst->route);
1133 xdst->route_mtu_cached = route_mtu_cached;
1134
1135 if (pmtu > route_mtu_cached)
1136 pmtu = route_mtu_cached;
1137
1138 dst->metrics[RTAX_MTU-1] = pmtu;
1139 } while ((dst = dst->next));
1140}
1141
1142EXPORT_SYMBOL(xfrm_init_pmtu);
1143
1144/* Check that the bundle accepts the flow and its components are
1145 * still valid.
1146 */
1147
1148int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
1149{
1150 struct dst_entry *dst = &first->u.dst;
1151 struct xfrm_dst *last;
1152 u32 mtu;
1153
1154 if (!dst_check(dst->path, 0) ||
1155 (dst->dev && !netif_running(dst->dev)))
1156 return 0;
1157
1158 last = NULL;
1159
1160 do {
1161 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1162
1163 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
1164 return 0;
1165 if (dst->xfrm->km.state != XFRM_STATE_VALID)
1166 return 0;
1167
1168 mtu = dst_mtu(dst->child);
1169 if (xdst->child_mtu_cached != mtu) {
1170 last = xdst;
1171 xdst->child_mtu_cached = mtu;
1172 }
1173
1174 if (!dst_check(xdst->route, 0))
1175 return 0;
1176 mtu = dst_mtu(xdst->route);
1177 if (xdst->route_mtu_cached != mtu) {
1178 last = xdst;
1179 xdst->route_mtu_cached = mtu;
1180 }
1181
1182 dst = dst->child;
1183 } while (dst->xfrm);
1184
1185 if (likely(!last))
1186 return 1;
1187
1188 mtu = last->child_mtu_cached;
1189 for (;;) {
1190 dst = &last->u.dst;
1191
1192 mtu = xfrm_state_mtu(dst->xfrm, mtu);
1193 if (mtu > last->route_mtu_cached)
1194 mtu = last->route_mtu_cached;
1195 dst->metrics[RTAX_MTU-1] = mtu;
1196
1197 if (last == first)
1198 break;
1199
1200 last = last->u.next;
1201 last->child_mtu_cached = mtu;
1202 }
1203
1204 return 1;
1205}
1206
1207EXPORT_SYMBOL(xfrm_bundle_ok);
1208
1209/* Well... that's _TASK_. We need to scan through transformation
1210 * list and figure out what mss tcp should generate in order to
1211 * final datagram fit to mtu. Mama mia... :-)
1212 *
1213 * Apparently, some easy way exists, but we used to choose the most
1214 * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta.
1215 *
1216 * Consider this function as something like dark humour. :-)
1217 */
1218static int xfrm_get_mss(struct dst_entry *dst, u32 mtu)
1219{
1220 int res = mtu - dst->header_len;
1221
1222 for (;;) {
1223 struct dst_entry *d = dst;
1224 int m = res;
1225
1226 do {
1227 struct xfrm_state *x = d->xfrm;
1228 if (x) {
1229 spin_lock_bh(&x->lock);
1230 if (x->km.state == XFRM_STATE_VALID &&
1231 x->type && x->type->get_max_size)
1232 m = x->type->get_max_size(d->xfrm, m);
1233 else
1234 m += x->props.header_len;
1235 spin_unlock_bh(&x->lock);
1236 }
1237 } while ((d = d->child) != NULL);
1238
1239 if (m <= mtu)
1240 break;
1241 res -= (m - mtu);
1242 if (res < 88)
1243 return mtu;
1244 }
1245
1246 return res + dst->header_len;
1247}
1248
1249int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
1250{
1251 int err = 0;
1252 if (unlikely(afinfo == NULL))
1253 return -EINVAL;
1254 if (unlikely(afinfo->family >= NPROTO))
1255 return -EAFNOSUPPORT;
1256 write_lock(&xfrm_policy_afinfo_lock);
1257 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
1258 err = -ENOBUFS;
1259 else {
1260 struct dst_ops *dst_ops = afinfo->dst_ops;
1261 if (likely(dst_ops->kmem_cachep == NULL))
1262 dst_ops->kmem_cachep = xfrm_dst_cache;
1263 if (likely(dst_ops->check == NULL))
1264 dst_ops->check = xfrm_dst_check;
1265 if (likely(dst_ops->destroy == NULL))
1266 dst_ops->destroy = xfrm_dst_destroy;
1267 if (likely(dst_ops->ifdown == NULL))
1268 dst_ops->ifdown = xfrm_dst_ifdown;
1269 if (likely(dst_ops->negative_advice == NULL))
1270 dst_ops->negative_advice = xfrm_negative_advice;
1271 if (likely(dst_ops->link_failure == NULL))
1272 dst_ops->link_failure = xfrm_link_failure;
1273 if (likely(dst_ops->get_mss == NULL))
1274 dst_ops->get_mss = xfrm_get_mss;
1275 if (likely(afinfo->garbage_collect == NULL))
1276 afinfo->garbage_collect = __xfrm_garbage_collect;
1277 xfrm_policy_afinfo[afinfo->family] = afinfo;
1278 }
1279 write_unlock(&xfrm_policy_afinfo_lock);
1280 return err;
1281}
1282EXPORT_SYMBOL(xfrm_policy_register_afinfo);
1283
1284int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
1285{
1286 int err = 0;
1287 if (unlikely(afinfo == NULL))
1288 return -EINVAL;
1289 if (unlikely(afinfo->family >= NPROTO))
1290 return -EAFNOSUPPORT;
1291 write_lock(&xfrm_policy_afinfo_lock);
1292 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
1293 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
1294 err = -EINVAL;
1295 else {
1296 struct dst_ops *dst_ops = afinfo->dst_ops;
1297 xfrm_policy_afinfo[afinfo->family] = NULL;
1298 dst_ops->kmem_cachep = NULL;
1299 dst_ops->check = NULL;
1300 dst_ops->destroy = NULL;
1301 dst_ops->ifdown = NULL;
1302 dst_ops->negative_advice = NULL;
1303 dst_ops->link_failure = NULL;
1304 dst_ops->get_mss = NULL;
1305 afinfo->garbage_collect = NULL;
1306 }
1307 }
1308 write_unlock(&xfrm_policy_afinfo_lock);
1309 return err;
1310}
1311EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
1312
1313static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
1314{
1315 struct xfrm_policy_afinfo *afinfo;
1316 if (unlikely(family >= NPROTO))
1317 return NULL;
1318 read_lock(&xfrm_policy_afinfo_lock);
1319 afinfo = xfrm_policy_afinfo[family];
1320 if (likely(afinfo != NULL))
1321 read_lock(&afinfo->lock);
1322 read_unlock(&xfrm_policy_afinfo_lock);
1323 return afinfo;
1324}
1325
1326static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
1327{
1328 if (unlikely(afinfo == NULL))
1329 return;
1330 read_unlock(&afinfo->lock);
1331}
1332
1333static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
1334{
1335 switch (event) {
1336 case NETDEV_DOWN:
1337 xfrm_flush_bundles();
1338 }
1339 return NOTIFY_DONE;
1340}
1341
1342static struct notifier_block xfrm_dev_notifier = {
1343 xfrm_dev_event,
1344 NULL,
1345 0
1346};
1347
1348static void __init xfrm_policy_init(void)
1349{
1350 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
1351 sizeof(struct xfrm_dst),
1352 0, SLAB_HWCACHE_ALIGN,
1353 NULL, NULL);
1354 if (!xfrm_dst_cache)
1355 panic("XFRM: failed to allocate xfrm_dst_cache\n");
1356
1357 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
1358 register_netdevice_notifier(&xfrm_dev_notifier);
1359}
1360
1361void __init xfrm_init(void)
1362{
1363 xfrm_state_init();
1364 xfrm_policy_init();
1365 xfrm_input_init();
1366}
1367
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
new file mode 100644
index 000000000000..1db59f11f37d
--- /dev/null
+++ b/net/xfrm/xfrm_state.c
@@ -0,0 +1,1037 @@
1/*
2 * xfrm_state.c
3 *
4 * Changes:
5 * Mitsuru KANDA @USAGI
6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8 * IPv6 support
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
13 *
14 */
15
16#include <linux/workqueue.h>
17#include <net/xfrm.h>
18#include <linux/pfkeyv2.h>
19#include <linux/ipsec.h>
20#include <linux/module.h>
21#include <asm/uaccess.h>
22
23/* Each xfrm_state may be linked to two tables:
24
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
28 */
29
30static DEFINE_SPINLOCK(xfrm_state_lock);
31
32/* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
34 *
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
37 */
38static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
40
41DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42EXPORT_SYMBOL(km_waitq);
43
44static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
46
47static struct work_struct xfrm_state_gc_work;
48static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49static DEFINE_SPINLOCK(xfrm_state_gc_lock);
50
51static int xfrm_state_gc_flush_bundles;
52
53static void __xfrm_state_delete(struct xfrm_state *x);
54
55static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
56static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
57
58static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
59static void km_state_expired(struct xfrm_state *x, int hard);
60
61static void xfrm_state_gc_destroy(struct xfrm_state *x)
62{
63 if (del_timer(&x->timer))
64 BUG();
65 if (x->aalg)
66 kfree(x->aalg);
67 if (x->ealg)
68 kfree(x->ealg);
69 if (x->calg)
70 kfree(x->calg);
71 if (x->encap)
72 kfree(x->encap);
73 if (x->type) {
74 x->type->destructor(x);
75 xfrm_put_type(x->type);
76 }
77 kfree(x);
78}
79
80static void xfrm_state_gc_task(void *data)
81{
82 struct xfrm_state *x;
83 struct list_head *entry, *tmp;
84 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
85
86 if (xfrm_state_gc_flush_bundles) {
87 xfrm_state_gc_flush_bundles = 0;
88 xfrm_flush_bundles();
89 }
90
91 spin_lock_bh(&xfrm_state_gc_lock);
92 list_splice_init(&xfrm_state_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm_state_gc_lock);
94
95 list_for_each_safe(entry, tmp, &gc_list) {
96 x = list_entry(entry, struct xfrm_state, bydst);
97 xfrm_state_gc_destroy(x);
98 }
99 wake_up(&km_waitq);
100}
101
102static inline unsigned long make_jiffies(long secs)
103{
104 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
105 return MAX_SCHEDULE_TIMEOUT-1;
106 else
107 return secs*HZ;
108}
109
110static void xfrm_timer_handler(unsigned long data)
111{
112 struct xfrm_state *x = (struct xfrm_state*)data;
113 unsigned long now = (unsigned long)xtime.tv_sec;
114 long next = LONG_MAX;
115 int warn = 0;
116
117 spin_lock(&x->lock);
118 if (x->km.state == XFRM_STATE_DEAD)
119 goto out;
120 if (x->km.state == XFRM_STATE_EXPIRED)
121 goto expired;
122 if (x->lft.hard_add_expires_seconds) {
123 long tmo = x->lft.hard_add_expires_seconds +
124 x->curlft.add_time - now;
125 if (tmo <= 0)
126 goto expired;
127 if (tmo < next)
128 next = tmo;
129 }
130 if (x->lft.hard_use_expires_seconds) {
131 long tmo = x->lft.hard_use_expires_seconds +
132 (x->curlft.use_time ? : now) - now;
133 if (tmo <= 0)
134 goto expired;
135 if (tmo < next)
136 next = tmo;
137 }
138 if (x->km.dying)
139 goto resched;
140 if (x->lft.soft_add_expires_seconds) {
141 long tmo = x->lft.soft_add_expires_seconds +
142 x->curlft.add_time - now;
143 if (tmo <= 0)
144 warn = 1;
145 else if (tmo < next)
146 next = tmo;
147 }
148 if (x->lft.soft_use_expires_seconds) {
149 long tmo = x->lft.soft_use_expires_seconds +
150 (x->curlft.use_time ? : now) - now;
151 if (tmo <= 0)
152 warn = 1;
153 else if (tmo < next)
154 next = tmo;
155 }
156
157 if (warn)
158 km_state_expired(x, 0);
159resched:
160 if (next != LONG_MAX &&
161 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
162 xfrm_state_hold(x);
163 goto out;
164
165expired:
166 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
167 x->km.state = XFRM_STATE_EXPIRED;
168 wake_up(&km_waitq);
169 next = 2;
170 goto resched;
171 }
172 if (x->id.spi != 0)
173 km_state_expired(x, 1);
174 __xfrm_state_delete(x);
175
176out:
177 spin_unlock(&x->lock);
178 xfrm_state_put(x);
179}
180
181struct xfrm_state *xfrm_state_alloc(void)
182{
183 struct xfrm_state *x;
184
185 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
186
187 if (x) {
188 memset(x, 0, sizeof(struct xfrm_state));
189 atomic_set(&x->refcnt, 1);
190 atomic_set(&x->tunnel_users, 0);
191 INIT_LIST_HEAD(&x->bydst);
192 INIT_LIST_HEAD(&x->byspi);
193 init_timer(&x->timer);
194 x->timer.function = xfrm_timer_handler;
195 x->timer.data = (unsigned long)x;
196 x->curlft.add_time = (unsigned long)xtime.tv_sec;
197 x->lft.soft_byte_limit = XFRM_INF;
198 x->lft.soft_packet_limit = XFRM_INF;
199 x->lft.hard_byte_limit = XFRM_INF;
200 x->lft.hard_packet_limit = XFRM_INF;
201 spin_lock_init(&x->lock);
202 }
203 return x;
204}
205EXPORT_SYMBOL(xfrm_state_alloc);
206
207void __xfrm_state_destroy(struct xfrm_state *x)
208{
209 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
210
211 spin_lock_bh(&xfrm_state_gc_lock);
212 list_add(&x->bydst, &xfrm_state_gc_list);
213 spin_unlock_bh(&xfrm_state_gc_lock);
214 schedule_work(&xfrm_state_gc_work);
215}
216EXPORT_SYMBOL(__xfrm_state_destroy);
217
218static void __xfrm_state_delete(struct xfrm_state *x)
219{
220 if (x->km.state != XFRM_STATE_DEAD) {
221 x->km.state = XFRM_STATE_DEAD;
222 spin_lock(&xfrm_state_lock);
223 list_del(&x->bydst);
224 atomic_dec(&x->refcnt);
225 if (x->id.spi) {
226 list_del(&x->byspi);
227 atomic_dec(&x->refcnt);
228 }
229 spin_unlock(&xfrm_state_lock);
230 if (del_timer(&x->timer))
231 atomic_dec(&x->refcnt);
232
233 /* The number two in this test is the reference
234 * mentioned in the comment below plus the reference
235 * our caller holds. A larger value means that
236 * there are DSTs attached to this xfrm_state.
237 */
238 if (atomic_read(&x->refcnt) > 2) {
239 xfrm_state_gc_flush_bundles = 1;
240 schedule_work(&xfrm_state_gc_work);
241 }
242
243 /* All xfrm_state objects are created by xfrm_state_alloc.
244 * The xfrm_state_alloc call gives a reference, and that
245 * is what we are dropping here.
246 */
247 atomic_dec(&x->refcnt);
248 }
249}
250
251void xfrm_state_delete(struct xfrm_state *x)
252{
253 spin_lock_bh(&x->lock);
254 __xfrm_state_delete(x);
255 spin_unlock_bh(&x->lock);
256}
257EXPORT_SYMBOL(xfrm_state_delete);
258
259void xfrm_state_flush(u8 proto)
260{
261 int i;
262 struct xfrm_state *x;
263
264 spin_lock_bh(&xfrm_state_lock);
265 for (i = 0; i < XFRM_DST_HSIZE; i++) {
266restart:
267 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
268 if (!xfrm_state_kern(x) &&
269 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
270 xfrm_state_hold(x);
271 spin_unlock_bh(&xfrm_state_lock);
272
273 xfrm_state_delete(x);
274 xfrm_state_put(x);
275
276 spin_lock_bh(&xfrm_state_lock);
277 goto restart;
278 }
279 }
280 }
281 spin_unlock_bh(&xfrm_state_lock);
282 wake_up(&km_waitq);
283}
284EXPORT_SYMBOL(xfrm_state_flush);
285
286static int
287xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
288 struct xfrm_tmpl *tmpl,
289 xfrm_address_t *daddr, xfrm_address_t *saddr,
290 unsigned short family)
291{
292 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
293 if (!afinfo)
294 return -1;
295 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
296 xfrm_state_put_afinfo(afinfo);
297 return 0;
298}
299
300struct xfrm_state *
301xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
302 struct flowi *fl, struct xfrm_tmpl *tmpl,
303 struct xfrm_policy *pol, int *err,
304 unsigned short family)
305{
306 unsigned h = xfrm_dst_hash(daddr, family);
307 struct xfrm_state *x, *x0;
308 int acquire_in_progress = 0;
309 int error = 0;
310 struct xfrm_state *best = NULL;
311 struct xfrm_state_afinfo *afinfo;
312
313 afinfo = xfrm_state_get_afinfo(family);
314 if (afinfo == NULL) {
315 *err = -EAFNOSUPPORT;
316 return NULL;
317 }
318
319 spin_lock_bh(&xfrm_state_lock);
320 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
321 if (x->props.family == family &&
322 x->props.reqid == tmpl->reqid &&
323 xfrm_state_addr_check(x, daddr, saddr, family) &&
324 tmpl->mode == x->props.mode &&
325 tmpl->id.proto == x->id.proto &&
326 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
327 /* Resolution logic:
328 1. There is a valid state with matching selector.
329 Done.
330 2. Valid state with inappropriate selector. Skip.
331
332 Entering area of "sysdeps".
333
334 3. If state is not valid, selector is temporary,
335 it selects only session which triggered
336 previous resolution. Key manager will do
337 something to install a state with proper
338 selector.
339 */
340 if (x->km.state == XFRM_STATE_VALID) {
341 if (!xfrm_selector_match(&x->sel, fl, family))
342 continue;
343 if (!best ||
344 best->km.dying > x->km.dying ||
345 (best->km.dying == x->km.dying &&
346 best->curlft.add_time < x->curlft.add_time))
347 best = x;
348 } else if (x->km.state == XFRM_STATE_ACQ) {
349 acquire_in_progress = 1;
350 } else if (x->km.state == XFRM_STATE_ERROR ||
351 x->km.state == XFRM_STATE_EXPIRED) {
352 if (xfrm_selector_match(&x->sel, fl, family))
353 error = -ESRCH;
354 }
355 }
356 }
357
358 x = best;
359 if (!x && !error && !acquire_in_progress) {
360 x0 = afinfo->state_lookup(&tmpl->id.daddr, tmpl->id.spi, tmpl->id.proto);
361 if (x0 != NULL) {
362 xfrm_state_put(x0);
363 error = -EEXIST;
364 goto out;
365 }
366 x = xfrm_state_alloc();
367 if (x == NULL) {
368 error = -ENOMEM;
369 goto out;
370 }
371 /* Initialize temporary selector matching only
372 * to current session. */
373 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
374
375 if (km_query(x, tmpl, pol) == 0) {
376 x->km.state = XFRM_STATE_ACQ;
377 list_add_tail(&x->bydst, xfrm_state_bydst+h);
378 xfrm_state_hold(x);
379 if (x->id.spi) {
380 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
381 list_add(&x->byspi, xfrm_state_byspi+h);
382 xfrm_state_hold(x);
383 }
384 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
385 xfrm_state_hold(x);
386 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
387 add_timer(&x->timer);
388 } else {
389 x->km.state = XFRM_STATE_DEAD;
390 xfrm_state_put(x);
391 x = NULL;
392 error = -ESRCH;
393 }
394 }
395out:
396 if (x)
397 xfrm_state_hold(x);
398 else
399 *err = acquire_in_progress ? -EAGAIN : error;
400 spin_unlock_bh(&xfrm_state_lock);
401 xfrm_state_put_afinfo(afinfo);
402 return x;
403}
404
405static void __xfrm_state_insert(struct xfrm_state *x)
406{
407 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
408
409 list_add(&x->bydst, xfrm_state_bydst+h);
410 xfrm_state_hold(x);
411
412 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
413
414 list_add(&x->byspi, xfrm_state_byspi+h);
415 xfrm_state_hold(x);
416
417 if (!mod_timer(&x->timer, jiffies + HZ))
418 xfrm_state_hold(x);
419
420 wake_up(&km_waitq);
421}
422
423void xfrm_state_insert(struct xfrm_state *x)
424{
425 spin_lock_bh(&xfrm_state_lock);
426 __xfrm_state_insert(x);
427 spin_unlock_bh(&xfrm_state_lock);
428}
429EXPORT_SYMBOL(xfrm_state_insert);
430
431static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
432
433int xfrm_state_add(struct xfrm_state *x)
434{
435 struct xfrm_state_afinfo *afinfo;
436 struct xfrm_state *x1;
437 int family;
438 int err;
439
440 family = x->props.family;
441 afinfo = xfrm_state_get_afinfo(family);
442 if (unlikely(afinfo == NULL))
443 return -EAFNOSUPPORT;
444
445 spin_lock_bh(&xfrm_state_lock);
446
447 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
448 if (x1) {
449 xfrm_state_put(x1);
450 x1 = NULL;
451 err = -EEXIST;
452 goto out;
453 }
454
455 if (x->km.seq) {
456 x1 = __xfrm_find_acq_byseq(x->km.seq);
457 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
458 xfrm_state_put(x1);
459 x1 = NULL;
460 }
461 }
462
463 if (!x1)
464 x1 = afinfo->find_acq(
465 x->props.mode, x->props.reqid, x->id.proto,
466 &x->id.daddr, &x->props.saddr, 0);
467
468 __xfrm_state_insert(x);
469 err = 0;
470
471out:
472 spin_unlock_bh(&xfrm_state_lock);
473 xfrm_state_put_afinfo(afinfo);
474
475 if (x1) {
476 xfrm_state_delete(x1);
477 xfrm_state_put(x1);
478 }
479
480 return err;
481}
482EXPORT_SYMBOL(xfrm_state_add);
483
484int xfrm_state_update(struct xfrm_state *x)
485{
486 struct xfrm_state_afinfo *afinfo;
487 struct xfrm_state *x1;
488 int err;
489
490 afinfo = xfrm_state_get_afinfo(x->props.family);
491 if (unlikely(afinfo == NULL))
492 return -EAFNOSUPPORT;
493
494 spin_lock_bh(&xfrm_state_lock);
495 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
496
497 err = -ESRCH;
498 if (!x1)
499 goto out;
500
501 if (xfrm_state_kern(x1)) {
502 xfrm_state_put(x1);
503 err = -EEXIST;
504 goto out;
505 }
506
507 if (x1->km.state == XFRM_STATE_ACQ) {
508 __xfrm_state_insert(x);
509 x = NULL;
510 }
511 err = 0;
512
513out:
514 spin_unlock_bh(&xfrm_state_lock);
515 xfrm_state_put_afinfo(afinfo);
516
517 if (err)
518 return err;
519
520 if (!x) {
521 xfrm_state_delete(x1);
522 xfrm_state_put(x1);
523 return 0;
524 }
525
526 err = -EINVAL;
527 spin_lock_bh(&x1->lock);
528 if (likely(x1->km.state == XFRM_STATE_VALID)) {
529 if (x->encap && x1->encap)
530 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
531 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
532 x1->km.dying = 0;
533
534 if (!mod_timer(&x1->timer, jiffies + HZ))
535 xfrm_state_hold(x1);
536 if (x1->curlft.use_time)
537 xfrm_state_check_expire(x1);
538
539 err = 0;
540 }
541 spin_unlock_bh(&x1->lock);
542
543 xfrm_state_put(x1);
544
545 return err;
546}
547EXPORT_SYMBOL(xfrm_state_update);
548
549int xfrm_state_check_expire(struct xfrm_state *x)
550{
551 if (!x->curlft.use_time)
552 x->curlft.use_time = (unsigned long)xtime.tv_sec;
553
554 if (x->km.state != XFRM_STATE_VALID)
555 return -EINVAL;
556
557 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
558 x->curlft.packets >= x->lft.hard_packet_limit) {
559 km_state_expired(x, 1);
560 if (!mod_timer(&x->timer, jiffies + XFRM_ACQ_EXPIRES*HZ))
561 xfrm_state_hold(x);
562 return -EINVAL;
563 }
564
565 if (!x->km.dying &&
566 (x->curlft.bytes >= x->lft.soft_byte_limit ||
567 x->curlft.packets >= x->lft.soft_packet_limit))
568 km_state_expired(x, 0);
569 return 0;
570}
571EXPORT_SYMBOL(xfrm_state_check_expire);
572
573static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
574{
575 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
576 - skb_headroom(skb);
577
578 if (nhead > 0)
579 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
580
581 /* Check tail too... */
582 return 0;
583}
584
585int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
586{
587 int err = xfrm_state_check_expire(x);
588 if (err < 0)
589 goto err;
590 err = xfrm_state_check_space(x, skb);
591err:
592 return err;
593}
594EXPORT_SYMBOL(xfrm_state_check);
595
596struct xfrm_state *
597xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
598 unsigned short family)
599{
600 struct xfrm_state *x;
601 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
602 if (!afinfo)
603 return NULL;
604
605 spin_lock_bh(&xfrm_state_lock);
606 x = afinfo->state_lookup(daddr, spi, proto);
607 spin_unlock_bh(&xfrm_state_lock);
608 xfrm_state_put_afinfo(afinfo);
609 return x;
610}
611EXPORT_SYMBOL(xfrm_state_lookup);
612
613struct xfrm_state *
614xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
615 xfrm_address_t *daddr, xfrm_address_t *saddr,
616 int create, unsigned short family)
617{
618 struct xfrm_state *x;
619 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
620 if (!afinfo)
621 return NULL;
622
623 spin_lock_bh(&xfrm_state_lock);
624 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
625 spin_unlock_bh(&xfrm_state_lock);
626 xfrm_state_put_afinfo(afinfo);
627 return x;
628}
629EXPORT_SYMBOL(xfrm_find_acq);
630
631/* Silly enough, but I'm lazy to build resolution list */
632
633static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
634{
635 int i;
636 struct xfrm_state *x;
637
638 for (i = 0; i < XFRM_DST_HSIZE; i++) {
639 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
640 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
641 xfrm_state_hold(x);
642 return x;
643 }
644 }
645 }
646 return NULL;
647}
648
649struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
650{
651 struct xfrm_state *x;
652
653 spin_lock_bh(&xfrm_state_lock);
654 x = __xfrm_find_acq_byseq(seq);
655 spin_unlock_bh(&xfrm_state_lock);
656 return x;
657}
658EXPORT_SYMBOL(xfrm_find_acq_byseq);
659
660u32 xfrm_get_acqseq(void)
661{
662 u32 res;
663 static u32 acqseq;
664 static DEFINE_SPINLOCK(acqseq_lock);
665
666 spin_lock_bh(&acqseq_lock);
667 res = (++acqseq ? : ++acqseq);
668 spin_unlock_bh(&acqseq_lock);
669 return res;
670}
671EXPORT_SYMBOL(xfrm_get_acqseq);
672
673void
674xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
675{
676 u32 h;
677 struct xfrm_state *x0;
678
679 if (x->id.spi)
680 return;
681
682 if (minspi == maxspi) {
683 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
684 if (x0) {
685 xfrm_state_put(x0);
686 return;
687 }
688 x->id.spi = minspi;
689 } else {
690 u32 spi = 0;
691 minspi = ntohl(minspi);
692 maxspi = ntohl(maxspi);
693 for (h=0; h<maxspi-minspi+1; h++) {
694 spi = minspi + net_random()%(maxspi-minspi+1);
695 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
696 if (x0 == NULL) {
697 x->id.spi = htonl(spi);
698 break;
699 }
700 xfrm_state_put(x0);
701 }
702 }
703 if (x->id.spi) {
704 spin_lock_bh(&xfrm_state_lock);
705 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
706 list_add(&x->byspi, xfrm_state_byspi+h);
707 xfrm_state_hold(x);
708 spin_unlock_bh(&xfrm_state_lock);
709 wake_up(&km_waitq);
710 }
711}
712EXPORT_SYMBOL(xfrm_alloc_spi);
713
714int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
715 void *data)
716{
717 int i;
718 struct xfrm_state *x;
719 int count = 0;
720 int err = 0;
721
722 spin_lock_bh(&xfrm_state_lock);
723 for (i = 0; i < XFRM_DST_HSIZE; i++) {
724 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
725 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
726 count++;
727 }
728 }
729 if (count == 0) {
730 err = -ENOENT;
731 goto out;
732 }
733
734 for (i = 0; i < XFRM_DST_HSIZE; i++) {
735 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
736 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
737 continue;
738 err = func(x, --count, data);
739 if (err)
740 goto out;
741 }
742 }
743out:
744 spin_unlock_bh(&xfrm_state_lock);
745 return err;
746}
747EXPORT_SYMBOL(xfrm_state_walk);
748
749int xfrm_replay_check(struct xfrm_state *x, u32 seq)
750{
751 u32 diff;
752
753 seq = ntohl(seq);
754
755 if (unlikely(seq == 0))
756 return -EINVAL;
757
758 if (likely(seq > x->replay.seq))
759 return 0;
760
761 diff = x->replay.seq - seq;
762 if (diff >= x->props.replay_window) {
763 x->stats.replay_window++;
764 return -EINVAL;
765 }
766
767 if (x->replay.bitmap & (1U << diff)) {
768 x->stats.replay++;
769 return -EINVAL;
770 }
771 return 0;
772}
773EXPORT_SYMBOL(xfrm_replay_check);
774
775void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
776{
777 u32 diff;
778
779 seq = ntohl(seq);
780
781 if (seq > x->replay.seq) {
782 diff = seq - x->replay.seq;
783 if (diff < x->props.replay_window)
784 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
785 else
786 x->replay.bitmap = 1;
787 x->replay.seq = seq;
788 } else {
789 diff = x->replay.seq - seq;
790 x->replay.bitmap |= (1U << diff);
791 }
792}
793EXPORT_SYMBOL(xfrm_replay_advance);
794
795static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
796static DEFINE_RWLOCK(xfrm_km_lock);
797
798static void km_state_expired(struct xfrm_state *x, int hard)
799{
800 struct xfrm_mgr *km;
801
802 if (hard)
803 x->km.state = XFRM_STATE_EXPIRED;
804 else
805 x->km.dying = 1;
806
807 read_lock(&xfrm_km_lock);
808 list_for_each_entry(km, &xfrm_km_list, list)
809 km->notify(x, hard);
810 read_unlock(&xfrm_km_lock);
811
812 if (hard)
813 wake_up(&km_waitq);
814}
815
816static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
817{
818 int err = -EINVAL;
819 struct xfrm_mgr *km;
820
821 read_lock(&xfrm_km_lock);
822 list_for_each_entry(km, &xfrm_km_list, list) {
823 err = km->acquire(x, t, pol, XFRM_POLICY_OUT);
824 if (!err)
825 break;
826 }
827 read_unlock(&xfrm_km_lock);
828 return err;
829}
830
831int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
832{
833 int err = -EINVAL;
834 struct xfrm_mgr *km;
835
836 read_lock(&xfrm_km_lock);
837 list_for_each_entry(km, &xfrm_km_list, list) {
838 if (km->new_mapping)
839 err = km->new_mapping(x, ipaddr, sport);
840 if (!err)
841 break;
842 }
843 read_unlock(&xfrm_km_lock);
844 return err;
845}
846EXPORT_SYMBOL(km_new_mapping);
847
848void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
849{
850 struct xfrm_mgr *km;
851
852 read_lock(&xfrm_km_lock);
853 list_for_each_entry(km, &xfrm_km_list, list)
854 if (km->notify_policy)
855 km->notify_policy(pol, dir, hard);
856 read_unlock(&xfrm_km_lock);
857
858 if (hard)
859 wake_up(&km_waitq);
860}
861
862int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
863{
864 int err;
865 u8 *data;
866 struct xfrm_mgr *km;
867 struct xfrm_policy *pol = NULL;
868
869 if (optlen <= 0 || optlen > PAGE_SIZE)
870 return -EMSGSIZE;
871
872 data = kmalloc(optlen, GFP_KERNEL);
873 if (!data)
874 return -ENOMEM;
875
876 err = -EFAULT;
877 if (copy_from_user(data, optval, optlen))
878 goto out;
879
880 err = -EINVAL;
881 read_lock(&xfrm_km_lock);
882 list_for_each_entry(km, &xfrm_km_list, list) {
883 pol = km->compile_policy(sk->sk_family, optname, data,
884 optlen, &err);
885 if (err >= 0)
886 break;
887 }
888 read_unlock(&xfrm_km_lock);
889
890 if (err >= 0) {
891 xfrm_sk_policy_insert(sk, err, pol);
892 xfrm_pol_put(pol);
893 err = 0;
894 }
895
896out:
897 kfree(data);
898 return err;
899}
900EXPORT_SYMBOL(xfrm_user_policy);
901
902int xfrm_register_km(struct xfrm_mgr *km)
903{
904 write_lock_bh(&xfrm_km_lock);
905 list_add_tail(&km->list, &xfrm_km_list);
906 write_unlock_bh(&xfrm_km_lock);
907 return 0;
908}
909EXPORT_SYMBOL(xfrm_register_km);
910
911int xfrm_unregister_km(struct xfrm_mgr *km)
912{
913 write_lock_bh(&xfrm_km_lock);
914 list_del(&km->list);
915 write_unlock_bh(&xfrm_km_lock);
916 return 0;
917}
918EXPORT_SYMBOL(xfrm_unregister_km);
919
920int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
921{
922 int err = 0;
923 if (unlikely(afinfo == NULL))
924 return -EINVAL;
925 if (unlikely(afinfo->family >= NPROTO))
926 return -EAFNOSUPPORT;
927 write_lock(&xfrm_state_afinfo_lock);
928 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
929 err = -ENOBUFS;
930 else {
931 afinfo->state_bydst = xfrm_state_bydst;
932 afinfo->state_byspi = xfrm_state_byspi;
933 xfrm_state_afinfo[afinfo->family] = afinfo;
934 }
935 write_unlock(&xfrm_state_afinfo_lock);
936 return err;
937}
938EXPORT_SYMBOL(xfrm_state_register_afinfo);
939
940int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
941{
942 int err = 0;
943 if (unlikely(afinfo == NULL))
944 return -EINVAL;
945 if (unlikely(afinfo->family >= NPROTO))
946 return -EAFNOSUPPORT;
947 write_lock(&xfrm_state_afinfo_lock);
948 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
949 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
950 err = -EINVAL;
951 else {
952 xfrm_state_afinfo[afinfo->family] = NULL;
953 afinfo->state_byspi = NULL;
954 afinfo->state_bydst = NULL;
955 }
956 }
957 write_unlock(&xfrm_state_afinfo_lock);
958 return err;
959}
960EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
961
962static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
963{
964 struct xfrm_state_afinfo *afinfo;
965 if (unlikely(family >= NPROTO))
966 return NULL;
967 read_lock(&xfrm_state_afinfo_lock);
968 afinfo = xfrm_state_afinfo[family];
969 if (likely(afinfo != NULL))
970 read_lock(&afinfo->lock);
971 read_unlock(&xfrm_state_afinfo_lock);
972 return afinfo;
973}
974
975static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
976{
977 if (unlikely(afinfo == NULL))
978 return;
979 read_unlock(&afinfo->lock);
980}
981
982/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
983void xfrm_state_delete_tunnel(struct xfrm_state *x)
984{
985 if (x->tunnel) {
986 struct xfrm_state *t = x->tunnel;
987
988 if (atomic_read(&t->tunnel_users) == 2)
989 xfrm_state_delete(t);
990 atomic_dec(&t->tunnel_users);
991 xfrm_state_put(t);
992 x->tunnel = NULL;
993 }
994}
995EXPORT_SYMBOL(xfrm_state_delete_tunnel);
996
997int xfrm_state_mtu(struct xfrm_state *x, int mtu)
998{
999 int res = mtu;
1000
1001 res -= x->props.header_len;
1002
1003 for (;;) {
1004 int m = res;
1005
1006 if (m < 68)
1007 return 68;
1008
1009 spin_lock_bh(&x->lock);
1010 if (x->km.state == XFRM_STATE_VALID &&
1011 x->type && x->type->get_max_size)
1012 m = x->type->get_max_size(x, m);
1013 else
1014 m += x->props.header_len;
1015 spin_unlock_bh(&x->lock);
1016
1017 if (m <= mtu)
1018 break;
1019 res -= (m - mtu);
1020 }
1021
1022 return res;
1023}
1024
1025EXPORT_SYMBOL(xfrm_state_mtu);
1026
1027void __init xfrm_state_init(void)
1028{
1029 int i;
1030
1031 for (i=0; i<XFRM_DST_HSIZE; i++) {
1032 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1033 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1034 }
1035 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
1036}
1037
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
new file mode 100644
index 000000000000..63661b0fd736
--- /dev/null
+++ b/net/xfrm/xfrm_user.c
@@ -0,0 +1,1253 @@
1/* xfrm_user.c: User interface to configure xfrm engine.
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/socket.h>
18#include <linux/string.h>
19#include <linux/net.h>
20#include <linux/skbuff.h>
21#include <linux/netlink.h>
22#include <linux/rtnetlink.h>
23#include <linux/pfkeyv2.h>
24#include <linux/ipsec.h>
25#include <linux/init.h>
26#include <linux/security.h>
27#include <net/sock.h>
28#include <net/xfrm.h>
29#include <asm/uaccess.h>
30
31static struct sock *xfrm_nl;
32
33static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
34{
35 struct rtattr *rt = xfrma[type - 1];
36 struct xfrm_algo *algp;
37
38 if (!rt)
39 return 0;
40
41 if ((rt->rta_len - sizeof(*rt)) < sizeof(*algp))
42 return -EINVAL;
43
44 algp = RTA_DATA(rt);
45 switch (type) {
46 case XFRMA_ALG_AUTH:
47 if (!algp->alg_key_len &&
48 strcmp(algp->alg_name, "digest_null") != 0)
49 return -EINVAL;
50 break;
51
52 case XFRMA_ALG_CRYPT:
53 if (!algp->alg_key_len &&
54 strcmp(algp->alg_name, "cipher_null") != 0)
55 return -EINVAL;
56 break;
57
58 case XFRMA_ALG_COMP:
59 /* Zero length keys are legal. */
60 break;
61
62 default:
63 return -EINVAL;
64 };
65
66 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
67 return 0;
68}
69
70static int verify_encap_tmpl(struct rtattr **xfrma)
71{
72 struct rtattr *rt = xfrma[XFRMA_ENCAP - 1];
73 struct xfrm_encap_tmpl *encap;
74
75 if (!rt)
76 return 0;
77
78 if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap))
79 return -EINVAL;
80
81 return 0;
82}
83
84static int verify_newsa_info(struct xfrm_usersa_info *p,
85 struct rtattr **xfrma)
86{
87 int err;
88
89 err = -EINVAL;
90 switch (p->family) {
91 case AF_INET:
92 break;
93
94 case AF_INET6:
95#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
96 break;
97#else
98 err = -EAFNOSUPPORT;
99 goto out;
100#endif
101
102 default:
103 goto out;
104 };
105
106 err = -EINVAL;
107 switch (p->id.proto) {
108 case IPPROTO_AH:
109 if (!xfrma[XFRMA_ALG_AUTH-1] ||
110 xfrma[XFRMA_ALG_CRYPT-1] ||
111 xfrma[XFRMA_ALG_COMP-1])
112 goto out;
113 break;
114
115 case IPPROTO_ESP:
116 if ((!xfrma[XFRMA_ALG_AUTH-1] &&
117 !xfrma[XFRMA_ALG_CRYPT-1]) ||
118 xfrma[XFRMA_ALG_COMP-1])
119 goto out;
120 break;
121
122 case IPPROTO_COMP:
123 if (!xfrma[XFRMA_ALG_COMP-1] ||
124 xfrma[XFRMA_ALG_AUTH-1] ||
125 xfrma[XFRMA_ALG_CRYPT-1])
126 goto out;
127 break;
128
129 default:
130 goto out;
131 };
132
133 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH)))
134 goto out;
135 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT)))
136 goto out;
137 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP)))
138 goto out;
139 if ((err = verify_encap_tmpl(xfrma)))
140 goto out;
141
142 err = -EINVAL;
143 switch (p->mode) {
144 case 0:
145 case 1:
146 break;
147
148 default:
149 goto out;
150 };
151
152 err = 0;
153
154out:
155 return err;
156}
157
158static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
159 struct xfrm_algo_desc *(*get_byname)(char *, int),
160 struct rtattr *u_arg)
161{
162 struct rtattr *rta = u_arg;
163 struct xfrm_algo *p, *ualg;
164 struct xfrm_algo_desc *algo;
165
166 if (!rta)
167 return 0;
168
169 ualg = RTA_DATA(rta);
170
171 algo = get_byname(ualg->alg_name, 1);
172 if (!algo)
173 return -ENOSYS;
174 *props = algo->desc.sadb_alg_id;
175
176 p = kmalloc(sizeof(*ualg) + ualg->alg_key_len, GFP_KERNEL);
177 if (!p)
178 return -ENOMEM;
179
180 memcpy(p, ualg, sizeof(*ualg) + ualg->alg_key_len);
181 *algpp = p;
182 return 0;
183}
184
185static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg)
186{
187 struct rtattr *rta = u_arg;
188 struct xfrm_encap_tmpl *p, *uencap;
189
190 if (!rta)
191 return 0;
192
193 uencap = RTA_DATA(rta);
194 p = kmalloc(sizeof(*p), GFP_KERNEL);
195 if (!p)
196 return -ENOMEM;
197
198 memcpy(p, uencap, sizeof(*p));
199 *encapp = p;
200 return 0;
201}
202
203static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
204{
205 memcpy(&x->id, &p->id, sizeof(x->id));
206 memcpy(&x->sel, &p->sel, sizeof(x->sel));
207 memcpy(&x->lft, &p->lft, sizeof(x->lft));
208 x->props.mode = p->mode;
209 x->props.replay_window = p->replay_window;
210 x->props.reqid = p->reqid;
211 x->props.family = p->family;
212 x->props.saddr = p->saddr;
213 x->props.flags = p->flags;
214}
215
216static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
217 struct rtattr **xfrma,
218 int *errp)
219{
220 struct xfrm_state *x = xfrm_state_alloc();
221 int err = -ENOMEM;
222
223 if (!x)
224 goto error_no_put;
225
226 copy_from_user_state(x, p);
227
228 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
229 xfrm_aalg_get_byname,
230 xfrma[XFRMA_ALG_AUTH-1])))
231 goto error;
232 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
233 xfrm_ealg_get_byname,
234 xfrma[XFRMA_ALG_CRYPT-1])))
235 goto error;
236 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
237 xfrm_calg_get_byname,
238 xfrma[XFRMA_ALG_COMP-1])))
239 goto error;
240 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1])))
241 goto error;
242
243 err = -ENOENT;
244 x->type = xfrm_get_type(x->id.proto, x->props.family);
245 if (x->type == NULL)
246 goto error;
247
248 err = x->type->init_state(x, NULL);
249 if (err)
250 goto error;
251
252 x->curlft.add_time = (unsigned long) xtime.tv_sec;
253 x->km.state = XFRM_STATE_VALID;
254 x->km.seq = p->seq;
255
256 return x;
257
258error:
259 x->km.state = XFRM_STATE_DEAD;
260 xfrm_state_put(x);
261error_no_put:
262 *errp = err;
263 return NULL;
264}
265
266static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
267{
268 struct xfrm_usersa_info *p = NLMSG_DATA(nlh);
269 struct xfrm_state *x;
270 int err;
271
272 err = verify_newsa_info(p, (struct rtattr **) xfrma);
273 if (err)
274 return err;
275
276 x = xfrm_state_construct(p, (struct rtattr **) xfrma, &err);
277 if (!x)
278 return err;
279
280 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
281 err = xfrm_state_add(x);
282 else
283 err = xfrm_state_update(x);
284
285 if (err < 0) {
286 x->km.state = XFRM_STATE_DEAD;
287 xfrm_state_put(x);
288 }
289
290 return err;
291}
292
293static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
294{
295 struct xfrm_state *x;
296 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
297
298 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
299 if (x == NULL)
300 return -ESRCH;
301
302 if (xfrm_state_kern(x)) {
303 xfrm_state_put(x);
304 return -EPERM;
305 }
306
307 xfrm_state_delete(x);
308 xfrm_state_put(x);
309
310 return 0;
311}
312
313static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
314{
315 memcpy(&p->id, &x->id, sizeof(p->id));
316 memcpy(&p->sel, &x->sel, sizeof(p->sel));
317 memcpy(&p->lft, &x->lft, sizeof(p->lft));
318 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
319 memcpy(&p->stats, &x->stats, sizeof(p->stats));
320 p->saddr = x->props.saddr;
321 p->mode = x->props.mode;
322 p->replay_window = x->props.replay_window;
323 p->reqid = x->props.reqid;
324 p->family = x->props.family;
325 p->flags = x->props.flags;
326 p->seq = x->km.seq;
327}
328
329struct xfrm_dump_info {
330 struct sk_buff *in_skb;
331 struct sk_buff *out_skb;
332 u32 nlmsg_seq;
333 u16 nlmsg_flags;
334 int start_idx;
335 int this_idx;
336};
337
338static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
339{
340 struct xfrm_dump_info *sp = ptr;
341 struct sk_buff *in_skb = sp->in_skb;
342 struct sk_buff *skb = sp->out_skb;
343 struct xfrm_usersa_info *p;
344 struct nlmsghdr *nlh;
345 unsigned char *b = skb->tail;
346
347 if (sp->this_idx < sp->start_idx)
348 goto out;
349
350 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
351 sp->nlmsg_seq,
352 XFRM_MSG_NEWSA, sizeof(*p));
353 nlh->nlmsg_flags = sp->nlmsg_flags;
354
355 p = NLMSG_DATA(nlh);
356 copy_to_user_state(x, p);
357
358 if (x->aalg)
359 RTA_PUT(skb, XFRMA_ALG_AUTH,
360 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
361 if (x->ealg)
362 RTA_PUT(skb, XFRMA_ALG_CRYPT,
363 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
364 if (x->calg)
365 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
366
367 if (x->encap)
368 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
369
370 nlh->nlmsg_len = skb->tail - b;
371out:
372 sp->this_idx++;
373 return 0;
374
375nlmsg_failure:
376rtattr_failure:
377 skb_trim(skb, b - skb->data);
378 return -1;
379}
380
381static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
382{
383 struct xfrm_dump_info info;
384
385 info.in_skb = cb->skb;
386 info.out_skb = skb;
387 info.nlmsg_seq = cb->nlh->nlmsg_seq;
388 info.nlmsg_flags = NLM_F_MULTI;
389 info.this_idx = 0;
390 info.start_idx = cb->args[0];
391 (void) xfrm_state_walk(IPSEC_PROTO_ANY, dump_one_state, &info);
392 cb->args[0] = info.this_idx;
393
394 return skb->len;
395}
396
397static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
398 struct xfrm_state *x, u32 seq)
399{
400 struct xfrm_dump_info info;
401 struct sk_buff *skb;
402
403 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
404 if (!skb)
405 return ERR_PTR(-ENOMEM);
406
407 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
408 info.in_skb = in_skb;
409 info.out_skb = skb;
410 info.nlmsg_seq = seq;
411 info.nlmsg_flags = 0;
412 info.this_idx = info.start_idx = 0;
413
414 if (dump_one_state(x, 0, &info)) {
415 kfree_skb(skb);
416 return NULL;
417 }
418
419 return skb;
420}
421
422static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
423{
424 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
425 struct xfrm_state *x;
426 struct sk_buff *resp_skb;
427 int err;
428
429 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
430 err = -ESRCH;
431 if (x == NULL)
432 goto out_noput;
433
434 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
435 if (IS_ERR(resp_skb)) {
436 err = PTR_ERR(resp_skb);
437 } else {
438 err = netlink_unicast(xfrm_nl, resp_skb,
439 NETLINK_CB(skb).pid, MSG_DONTWAIT);
440 }
441 xfrm_state_put(x);
442out_noput:
443 return err;
444}
445
446static int verify_userspi_info(struct xfrm_userspi_info *p)
447{
448 switch (p->info.id.proto) {
449 case IPPROTO_AH:
450 case IPPROTO_ESP:
451 break;
452
453 case IPPROTO_COMP:
454 /* IPCOMP spi is 16-bits. */
455 if (p->max >= 0x10000)
456 return -EINVAL;
457 break;
458
459 default:
460 return -EINVAL;
461 };
462
463 if (p->min > p->max)
464 return -EINVAL;
465
466 return 0;
467}
468
469static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
470{
471 struct xfrm_state *x;
472 struct xfrm_userspi_info *p;
473 struct sk_buff *resp_skb;
474 xfrm_address_t *daddr;
475 int family;
476 int err;
477
478 p = NLMSG_DATA(nlh);
479 err = verify_userspi_info(p);
480 if (err)
481 goto out_noput;
482
483 family = p->info.family;
484 daddr = &p->info.id.daddr;
485
486 x = NULL;
487 if (p->info.seq) {
488 x = xfrm_find_acq_byseq(p->info.seq);
489 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
490 xfrm_state_put(x);
491 x = NULL;
492 }
493 }
494
495 if (!x)
496 x = xfrm_find_acq(p->info.mode, p->info.reqid,
497 p->info.id.proto, daddr,
498 &p->info.saddr, 1,
499 family);
500 err = -ENOENT;
501 if (x == NULL)
502 goto out_noput;
503
504 resp_skb = ERR_PTR(-ENOENT);
505
506 spin_lock_bh(&x->lock);
507 if (x->km.state != XFRM_STATE_DEAD) {
508 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
509 if (x->id.spi)
510 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
511 }
512 spin_unlock_bh(&x->lock);
513
514 if (IS_ERR(resp_skb)) {
515 err = PTR_ERR(resp_skb);
516 goto out;
517 }
518
519 err = netlink_unicast(xfrm_nl, resp_skb,
520 NETLINK_CB(skb).pid, MSG_DONTWAIT);
521
522out:
523 xfrm_state_put(x);
524out_noput:
525 return err;
526}
527
528static int verify_policy_dir(__u8 dir)
529{
530 switch (dir) {
531 case XFRM_POLICY_IN:
532 case XFRM_POLICY_OUT:
533 case XFRM_POLICY_FWD:
534 break;
535
536 default:
537 return -EINVAL;
538 };
539
540 return 0;
541}
542
543static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
544{
545 switch (p->share) {
546 case XFRM_SHARE_ANY:
547 case XFRM_SHARE_SESSION:
548 case XFRM_SHARE_USER:
549 case XFRM_SHARE_UNIQUE:
550 break;
551
552 default:
553 return -EINVAL;
554 };
555
556 switch (p->action) {
557 case XFRM_POLICY_ALLOW:
558 case XFRM_POLICY_BLOCK:
559 break;
560
561 default:
562 return -EINVAL;
563 };
564
565 switch (p->sel.family) {
566 case AF_INET:
567 break;
568
569 case AF_INET6:
570#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
571 break;
572#else
573 return -EAFNOSUPPORT;
574#endif
575
576 default:
577 return -EINVAL;
578 };
579
580 return verify_policy_dir(p->dir);
581}
582
583static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
584 int nr)
585{
586 int i;
587
588 xp->xfrm_nr = nr;
589 for (i = 0; i < nr; i++, ut++) {
590 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
591
592 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
593 memcpy(&t->saddr, &ut->saddr,
594 sizeof(xfrm_address_t));
595 t->reqid = ut->reqid;
596 t->mode = ut->mode;
597 t->share = ut->share;
598 t->optional = ut->optional;
599 t->aalgos = ut->aalgos;
600 t->ealgos = ut->ealgos;
601 t->calgos = ut->calgos;
602 }
603}
604
605static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma)
606{
607 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
608 struct xfrm_user_tmpl *utmpl;
609 int nr;
610
611 if (!rt) {
612 pol->xfrm_nr = 0;
613 } else {
614 nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
615
616 if (nr > XFRM_MAX_DEPTH)
617 return -EINVAL;
618
619 copy_templates(pol, RTA_DATA(rt), nr);
620 }
621 return 0;
622}
623
624static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
625{
626 xp->priority = p->priority;
627 xp->index = p->index;
628 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
629 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
630 xp->action = p->action;
631 xp->flags = p->flags;
632 xp->family = p->sel.family;
633 /* XXX xp->share = p->share; */
634}
635
636static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
637{
638 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
639 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
640 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
641 p->priority = xp->priority;
642 p->index = xp->index;
643 p->sel.family = xp->family;
644 p->dir = dir;
645 p->action = xp->action;
646 p->flags = xp->flags;
647 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
648}
649
650static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp)
651{
652 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
653 int err;
654
655 if (!xp) {
656 *errp = -ENOMEM;
657 return NULL;
658 }
659
660 copy_from_user_policy(xp, p);
661 err = copy_from_user_tmpl(xp, xfrma);
662 if (err) {
663 *errp = err;
664 kfree(xp);
665 xp = NULL;
666 }
667
668 return xp;
669}
670
671static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
672{
673 struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh);
674 struct xfrm_policy *xp;
675 int err;
676 int excl;
677
678 err = verify_newpolicy_info(p);
679 if (err)
680 return err;
681
682 xp = xfrm_policy_construct(p, (struct rtattr **) xfrma, &err);
683 if (!xp)
684 return err;
685
686 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
687 err = xfrm_policy_insert(p->dir, xp, excl);
688 if (err) {
689 kfree(xp);
690 return err;
691 }
692
693 xfrm_pol_put(xp);
694
695 return 0;
696}
697
698static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
699{
700 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
701 int i;
702
703 if (xp->xfrm_nr == 0)
704 return 0;
705
706 for (i = 0; i < xp->xfrm_nr; i++) {
707 struct xfrm_user_tmpl *up = &vec[i];
708 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
709
710 memcpy(&up->id, &kp->id, sizeof(up->id));
711 up->family = xp->family;
712 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
713 up->reqid = kp->reqid;
714 up->mode = kp->mode;
715 up->share = kp->share;
716 up->optional = kp->optional;
717 up->aalgos = kp->aalgos;
718 up->ealgos = kp->ealgos;
719 up->calgos = kp->calgos;
720 }
721 RTA_PUT(skb, XFRMA_TMPL,
722 (sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr),
723 vec);
724
725 return 0;
726
727rtattr_failure:
728 return -1;
729}
730
731static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
732{
733 struct xfrm_dump_info *sp = ptr;
734 struct xfrm_userpolicy_info *p;
735 struct sk_buff *in_skb = sp->in_skb;
736 struct sk_buff *skb = sp->out_skb;
737 struct nlmsghdr *nlh;
738 unsigned char *b = skb->tail;
739
740 if (sp->this_idx < sp->start_idx)
741 goto out;
742
743 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
744 sp->nlmsg_seq,
745 XFRM_MSG_NEWPOLICY, sizeof(*p));
746 p = NLMSG_DATA(nlh);
747 nlh->nlmsg_flags = sp->nlmsg_flags;
748
749 copy_to_user_policy(xp, p, dir);
750 if (copy_to_user_tmpl(xp, skb) < 0)
751 goto nlmsg_failure;
752
753 nlh->nlmsg_len = skb->tail - b;
754out:
755 sp->this_idx++;
756 return 0;
757
758nlmsg_failure:
759 skb_trim(skb, b - skb->data);
760 return -1;
761}
762
763static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
764{
765 struct xfrm_dump_info info;
766
767 info.in_skb = cb->skb;
768 info.out_skb = skb;
769 info.nlmsg_seq = cb->nlh->nlmsg_seq;
770 info.nlmsg_flags = NLM_F_MULTI;
771 info.this_idx = 0;
772 info.start_idx = cb->args[0];
773 (void) xfrm_policy_walk(dump_one_policy, &info);
774 cb->args[0] = info.this_idx;
775
776 return skb->len;
777}
778
779static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
780 struct xfrm_policy *xp,
781 int dir, u32 seq)
782{
783 struct xfrm_dump_info info;
784 struct sk_buff *skb;
785
786 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
787 if (!skb)
788 return ERR_PTR(-ENOMEM);
789
790 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
791 info.in_skb = in_skb;
792 info.out_skb = skb;
793 info.nlmsg_seq = seq;
794 info.nlmsg_flags = 0;
795 info.this_idx = info.start_idx = 0;
796
797 if (dump_one_policy(xp, dir, 0, &info) < 0) {
798 kfree_skb(skb);
799 return NULL;
800 }
801
802 return skb;
803}
804
805static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
806{
807 struct xfrm_policy *xp;
808 struct xfrm_userpolicy_id *p;
809 int err;
810 int delete;
811
812 p = NLMSG_DATA(nlh);
813 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
814
815 err = verify_policy_dir(p->dir);
816 if (err)
817 return err;
818
819 if (p->index)
820 xp = xfrm_policy_byid(p->dir, p->index, delete);
821 else
822 xp = xfrm_policy_bysel(p->dir, &p->sel, delete);
823 if (xp == NULL)
824 return -ENOENT;
825
826 if (!delete) {
827 struct sk_buff *resp_skb;
828
829 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
830 if (IS_ERR(resp_skb)) {
831 err = PTR_ERR(resp_skb);
832 } else {
833 err = netlink_unicast(xfrm_nl, resp_skb,
834 NETLINK_CB(skb).pid,
835 MSG_DONTWAIT);
836 }
837 }
838
839 xfrm_pol_put(xp);
840
841 return err;
842}
843
844static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
845{
846 struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
847
848 xfrm_state_flush(p->proto);
849 return 0;
850}
851
852static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
853{
854 xfrm_policy_flush();
855 return 0;
856}
857
858static const int xfrm_msg_min[(XFRM_MSG_MAX + 1 - XFRM_MSG_BASE)] = {
859 NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* NEW SA */
860 NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* DEL SA */
861 NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* GET SA */
862 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* NEW POLICY */
863 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* DEL POLICY */
864 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* GET POLICY */
865 NLMSG_LENGTH(sizeof(struct xfrm_userspi_info)), /* ALLOC SPI */
866 NLMSG_LENGTH(sizeof(struct xfrm_user_acquire)), /* ACQUIRE */
867 NLMSG_LENGTH(sizeof(struct xfrm_user_expire)), /* EXPIRE */
868 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* UPD POLICY */
869 NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* UPD SA */
870 NLMSG_LENGTH(sizeof(struct xfrm_user_polexpire)), /* POLEXPIRE */
871 NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)), /* FLUSH SA */
872 NLMSG_LENGTH(0), /* FLUSH POLICY */
873};
874
875static struct xfrm_link {
876 int (*doit)(struct sk_buff *, struct nlmsghdr *, void **);
877 int (*dump)(struct sk_buff *, struct netlink_callback *);
878} xfrm_dispatch[] = {
879 { .doit = xfrm_add_sa, },
880 { .doit = xfrm_del_sa, },
881 {
882 .doit = xfrm_get_sa,
883 .dump = xfrm_dump_sa,
884 },
885 { .doit = xfrm_add_policy },
886 { .doit = xfrm_get_policy },
887 {
888 .doit = xfrm_get_policy,
889 .dump = xfrm_dump_policy,
890 },
891 { .doit = xfrm_alloc_userspi },
892 {},
893 {},
894 { .doit = xfrm_add_policy },
895 { .doit = xfrm_add_sa, },
896 {},
897 { .doit = xfrm_flush_sa },
898 { .doit = xfrm_flush_policy },
899};
900
901static int xfrm_done(struct netlink_callback *cb)
902{
903 return 0;
904}
905
906static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
907{
908 struct rtattr *xfrma[XFRMA_MAX];
909 struct xfrm_link *link;
910 int type, min_len;
911
912 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
913 return 0;
914
915 type = nlh->nlmsg_type;
916
917 /* A control message: ignore them */
918 if (type < XFRM_MSG_BASE)
919 return 0;
920
921 /* Unknown message: reply with EINVAL */
922 if (type > XFRM_MSG_MAX)
923 goto err_einval;
924
925 type -= XFRM_MSG_BASE;
926 link = &xfrm_dispatch[type];
927
928 /* All operations require privileges, even GET */
929 if (security_netlink_recv(skb)) {
930 *errp = -EPERM;
931 return -1;
932 }
933
934 if ((type == 2 || type == 5) && (nlh->nlmsg_flags & NLM_F_DUMP)) {
935 u32 rlen;
936
937 if (link->dump == NULL)
938 goto err_einval;
939
940 if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh,
941 link->dump,
942 xfrm_done)) != 0) {
943 return -1;
944 }
945 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
946 if (rlen > skb->len)
947 rlen = skb->len;
948 skb_pull(skb, rlen);
949 return -1;
950 }
951
952 memset(xfrma, 0, sizeof(xfrma));
953
954 if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type]))
955 goto err_einval;
956
957 if (nlh->nlmsg_len > min_len) {
958 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
959 struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len);
960
961 while (RTA_OK(attr, attrlen)) {
962 unsigned short flavor = attr->rta_type;
963 if (flavor) {
964 if (flavor > XFRMA_MAX)
965 goto err_einval;
966 xfrma[flavor - 1] = attr;
967 }
968 attr = RTA_NEXT(attr, attrlen);
969 }
970 }
971
972 if (link->doit == NULL)
973 goto err_einval;
974 *errp = link->doit(skb, nlh, (void **) &xfrma);
975
976 return *errp;
977
978err_einval:
979 *errp = -EINVAL;
980 return -1;
981}
982
983static int xfrm_user_rcv_skb(struct sk_buff *skb)
984{
985 int err;
986 struct nlmsghdr *nlh;
987
988 while (skb->len >= NLMSG_SPACE(0)) {
989 u32 rlen;
990
991 nlh = (struct nlmsghdr *) skb->data;
992 if (nlh->nlmsg_len < sizeof(*nlh) ||
993 skb->len < nlh->nlmsg_len)
994 return 0;
995 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
996 if (rlen > skb->len)
997 rlen = skb->len;
998 if (xfrm_user_rcv_msg(skb, nlh, &err) < 0) {
999 if (err == 0)
1000 return -1;
1001 netlink_ack(skb, nlh, err);
1002 } else if (nlh->nlmsg_flags & NLM_F_ACK)
1003 netlink_ack(skb, nlh, 0);
1004 skb_pull(skb, rlen);
1005 }
1006
1007 return 0;
1008}
1009
1010static void xfrm_netlink_rcv(struct sock *sk, int len)
1011{
1012 do {
1013 struct sk_buff *skb;
1014
1015 down(&xfrm_cfg_sem);
1016
1017 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1018 if (xfrm_user_rcv_skb(skb)) {
1019 if (skb->len)
1020 skb_queue_head(&sk->sk_receive_queue,
1021 skb);
1022 else
1023 kfree_skb(skb);
1024 break;
1025 }
1026 kfree_skb(skb);
1027 }
1028
1029 up(&xfrm_cfg_sem);
1030
1031 } while (xfrm_nl && xfrm_nl->sk_receive_queue.qlen);
1032}
1033
1034static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard)
1035{
1036 struct xfrm_user_expire *ue;
1037 struct nlmsghdr *nlh;
1038 unsigned char *b = skb->tail;
1039
1040 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_EXPIRE,
1041 sizeof(*ue));
1042 ue = NLMSG_DATA(nlh);
1043 nlh->nlmsg_flags = 0;
1044
1045 copy_to_user_state(x, &ue->state);
1046 ue->hard = (hard != 0) ? 1 : 0;
1047
1048 nlh->nlmsg_len = skb->tail - b;
1049 return skb->len;
1050
1051nlmsg_failure:
1052 skb_trim(skb, b - skb->data);
1053 return -1;
1054}
1055
1056static int xfrm_send_state_notify(struct xfrm_state *x, int hard)
1057{
1058 struct sk_buff *skb;
1059
1060 skb = alloc_skb(sizeof(struct xfrm_user_expire) + 16, GFP_ATOMIC);
1061 if (skb == NULL)
1062 return -ENOMEM;
1063
1064 if (build_expire(skb, x, hard) < 0)
1065 BUG();
1066
1067 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
1068
1069 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1070}
1071
1072static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
1073 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
1074 int dir)
1075{
1076 struct xfrm_user_acquire *ua;
1077 struct nlmsghdr *nlh;
1078 unsigned char *b = skb->tail;
1079 __u32 seq = xfrm_get_acqseq();
1080
1081 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE,
1082 sizeof(*ua));
1083 ua = NLMSG_DATA(nlh);
1084 nlh->nlmsg_flags = 0;
1085
1086 memcpy(&ua->id, &x->id, sizeof(ua->id));
1087 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
1088 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
1089 copy_to_user_policy(xp, &ua->policy, dir);
1090 ua->aalgos = xt->aalgos;
1091 ua->ealgos = xt->ealgos;
1092 ua->calgos = xt->calgos;
1093 ua->seq = x->km.seq = seq;
1094
1095 if (copy_to_user_tmpl(xp, skb) < 0)
1096 goto nlmsg_failure;
1097
1098 nlh->nlmsg_len = skb->tail - b;
1099 return skb->len;
1100
1101nlmsg_failure:
1102 skb_trim(skb, b - skb->data);
1103 return -1;
1104}
1105
1106static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
1107 struct xfrm_policy *xp, int dir)
1108{
1109 struct sk_buff *skb;
1110 size_t len;
1111
1112 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1113 len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire));
1114 skb = alloc_skb(len, GFP_ATOMIC);
1115 if (skb == NULL)
1116 return -ENOMEM;
1117
1118 if (build_acquire(skb, x, xt, xp, dir) < 0)
1119 BUG();
1120
1121 NETLINK_CB(skb).dst_groups = XFRMGRP_ACQUIRE;
1122
1123 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_ACQUIRE, GFP_ATOMIC);
1124}
1125
1126/* User gives us xfrm_user_policy_info followed by an array of 0
1127 * or more templates.
1128 */
1129static struct xfrm_policy *xfrm_compile_policy(u16 family, int opt,
1130 u8 *data, int len, int *dir)
1131{
1132 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
1133 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
1134 struct xfrm_policy *xp;
1135 int nr;
1136
1137 switch (family) {
1138 case AF_INET:
1139 if (opt != IP_XFRM_POLICY) {
1140 *dir = -EOPNOTSUPP;
1141 return NULL;
1142 }
1143 break;
1144#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1145 case AF_INET6:
1146 if (opt != IPV6_XFRM_POLICY) {
1147 *dir = -EOPNOTSUPP;
1148 return NULL;
1149 }
1150 break;
1151#endif
1152 default:
1153 *dir = -EINVAL;
1154 return NULL;
1155 }
1156
1157 *dir = -EINVAL;
1158
1159 if (len < sizeof(*p) ||
1160 verify_newpolicy_info(p))
1161 return NULL;
1162
1163 nr = ((len - sizeof(*p)) / sizeof(*ut));
1164 if (nr > XFRM_MAX_DEPTH)
1165 return NULL;
1166
1167 xp = xfrm_policy_alloc(GFP_KERNEL);
1168 if (xp == NULL) {
1169 *dir = -ENOBUFS;
1170 return NULL;
1171 }
1172
1173 copy_from_user_policy(xp, p);
1174 copy_templates(xp, ut, nr);
1175
1176 *dir = p->dir;
1177
1178 return xp;
1179}
1180
1181static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
1182 int dir, int hard)
1183{
1184 struct xfrm_user_polexpire *upe;
1185 struct nlmsghdr *nlh;
1186 unsigned char *b = skb->tail;
1187
1188 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe));
1189 upe = NLMSG_DATA(nlh);
1190 nlh->nlmsg_flags = 0;
1191
1192 copy_to_user_policy(xp, &upe->pol, dir);
1193 if (copy_to_user_tmpl(xp, skb) < 0)
1194 goto nlmsg_failure;
1195 upe->hard = !!hard;
1196
1197 nlh->nlmsg_len = skb->tail - b;
1198 return skb->len;
1199
1200nlmsg_failure:
1201 skb_trim(skb, b - skb->data);
1202 return -1;
1203}
1204
1205static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, int hard)
1206{
1207 struct sk_buff *skb;
1208 size_t len;
1209
1210 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1211 len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire));
1212 skb = alloc_skb(len, GFP_ATOMIC);
1213 if (skb == NULL)
1214 return -ENOMEM;
1215
1216 if (build_polexpire(skb, xp, dir, hard) < 0)
1217 BUG();
1218
1219 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
1220
1221 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1222}
1223
1224static struct xfrm_mgr netlink_mgr = {
1225 .id = "netlink",
1226 .notify = xfrm_send_state_notify,
1227 .acquire = xfrm_send_acquire,
1228 .compile_policy = xfrm_compile_policy,
1229 .notify_policy = xfrm_send_policy_notify,
1230};
1231
1232static int __init xfrm_user_init(void)
1233{
1234 printk(KERN_INFO "Initializing IPsec netlink socket\n");
1235
1236 xfrm_nl = netlink_kernel_create(NETLINK_XFRM, xfrm_netlink_rcv);
1237 if (xfrm_nl == NULL)
1238 return -ENOMEM;
1239
1240 xfrm_register_km(&netlink_mgr);
1241
1242 return 0;
1243}
1244
1245static void __exit xfrm_user_exit(void)
1246{
1247 xfrm_unregister_km(&netlink_mgr);
1248 sock_release(xfrm_nl->sk_socket);
1249}
1250
1251module_init(xfrm_user_init);
1252module_exit(xfrm_user_exit);
1253MODULE_LICENSE("GPL");