diff options
author | Alexey Dobriyan <adobriyan@gmail.com> | 2008-11-25 20:16:58 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-25 20:16:58 -0500 |
commit | 73d189dce486cd6693fa29169b1aac0872efbcea (patch) | |
tree | 1c82e87feef2d99d3644f98b66bc90006259d86e /net/xfrm/xfrm_state.c | |
parent | 9d4139c76905833afcb77fe8ccc17f302a0eb9ab (diff) |
netns xfrm: per-netns xfrm_state_bydst hash
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/xfrm/xfrm_state.c')
-rw-r--r-- | net/xfrm/xfrm_state.c | 50 |
1 files changed, 26 insertions, 24 deletions
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 85bb85484b70..08b78895ffbc 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -44,13 +44,6 @@ u32 sysctl_xfrm_acq_expires __read_mostly = 30; | |||
44 | 44 | ||
45 | static DEFINE_SPINLOCK(xfrm_state_lock); | 45 | static DEFINE_SPINLOCK(xfrm_state_lock); |
46 | 46 | ||
47 | /* Hash table to find appropriate SA towards given target (endpoint | ||
48 | * of tunnel or destination of transport mode) allowed by selector. | ||
49 | * | ||
50 | * Main use is finding SA after policy selected tunnel or transport mode. | ||
51 | * Also, it can be used by ah/esp icmp error handler to find offending SA. | ||
52 | */ | ||
53 | static struct hlist_head *xfrm_state_bydst __read_mostly; | ||
54 | static struct hlist_head *xfrm_state_bysrc __read_mostly; | 47 | static struct hlist_head *xfrm_state_bysrc __read_mostly; |
55 | static struct hlist_head *xfrm_state_byspi __read_mostly; | 48 | static struct hlist_head *xfrm_state_byspi __read_mostly; |
56 | static unsigned int xfrm_state_hmask __read_mostly; | 49 | static unsigned int xfrm_state_hmask __read_mostly; |
@@ -157,15 +150,15 @@ static void xfrm_hash_resize(struct work_struct *__unused) | |||
157 | 150 | ||
158 | nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; | 151 | nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; |
159 | for (i = xfrm_state_hmask; i >= 0; i--) | 152 | for (i = xfrm_state_hmask; i >= 0; i--) |
160 | xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi, | 153 | xfrm_hash_transfer(init_net.xfrm.state_bydst+i, ndst, nsrc, nspi, |
161 | nhashmask); | 154 | nhashmask); |
162 | 155 | ||
163 | odst = xfrm_state_bydst; | 156 | odst = init_net.xfrm.state_bydst; |
164 | osrc = xfrm_state_bysrc; | 157 | osrc = xfrm_state_bysrc; |
165 | ospi = xfrm_state_byspi; | 158 | ospi = xfrm_state_byspi; |
166 | ohashmask = xfrm_state_hmask; | 159 | ohashmask = xfrm_state_hmask; |
167 | 160 | ||
168 | xfrm_state_bydst = ndst; | 161 | init_net.xfrm.state_bydst = ndst; |
169 | xfrm_state_bysrc = nsrc; | 162 | xfrm_state_bysrc = nsrc; |
170 | xfrm_state_byspi = nspi; | 163 | xfrm_state_byspi = nspi; |
171 | xfrm_state_hmask = nhashmask; | 164 | xfrm_state_hmask = nhashmask; |
@@ -595,7 +588,7 @@ xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info) | |||
595 | struct hlist_node *entry; | 588 | struct hlist_node *entry; |
596 | struct xfrm_state *x; | 589 | struct xfrm_state *x; |
597 | 590 | ||
598 | hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { | 591 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) { |
599 | if (xfrm_id_proto_match(x->id.proto, proto) && | 592 | if (xfrm_id_proto_match(x->id.proto, proto) && |
600 | (err = security_xfrm_state_delete(x)) != 0) { | 593 | (err = security_xfrm_state_delete(x)) != 0) { |
601 | xfrm_audit_state_delete(x, 0, | 594 | xfrm_audit_state_delete(x, 0, |
@@ -630,7 +623,7 @@ int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info) | |||
630 | struct hlist_node *entry; | 623 | struct hlist_node *entry; |
631 | struct xfrm_state *x; | 624 | struct xfrm_state *x; |
632 | restart: | 625 | restart: |
633 | hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { | 626 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) { |
634 | if (!xfrm_state_kern(x) && | 627 | if (!xfrm_state_kern(x) && |
635 | xfrm_id_proto_match(x->id.proto, proto)) { | 628 | xfrm_id_proto_match(x->id.proto, proto)) { |
636 | xfrm_state_hold(x); | 629 | xfrm_state_hold(x); |
@@ -785,7 +778,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
785 | 778 | ||
786 | spin_lock_bh(&xfrm_state_lock); | 779 | spin_lock_bh(&xfrm_state_lock); |
787 | h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family); | 780 | h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family); |
788 | hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { | 781 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { |
789 | if (x->props.family == family && | 782 | if (x->props.family == family && |
790 | x->props.reqid == tmpl->reqid && | 783 | x->props.reqid == tmpl->reqid && |
791 | !(x->props.flags & XFRM_STATE_WILDRECV) && | 784 | !(x->props.flags & XFRM_STATE_WILDRECV) && |
@@ -855,7 +848,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
855 | if (km_query(x, tmpl, pol) == 0) { | 848 | if (km_query(x, tmpl, pol) == 0) { |
856 | x->km.state = XFRM_STATE_ACQ; | 849 | x->km.state = XFRM_STATE_ACQ; |
857 | list_add(&x->km.all, &init_net.xfrm.state_all); | 850 | list_add(&x->km.all, &init_net.xfrm.state_all); |
858 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); | 851 | hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h); |
859 | h = xfrm_src_hash(daddr, saddr, family); | 852 | h = xfrm_src_hash(daddr, saddr, family); |
860 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); | 853 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); |
861 | if (x->id.spi) { | 854 | if (x->id.spi) { |
@@ -895,7 +888,7 @@ xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
895 | 888 | ||
896 | spin_lock(&xfrm_state_lock); | 889 | spin_lock(&xfrm_state_lock); |
897 | h = xfrm_dst_hash(daddr, saddr, reqid, family); | 890 | h = xfrm_dst_hash(daddr, saddr, reqid, family); |
898 | hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { | 891 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { |
899 | if (x->props.family == family && | 892 | if (x->props.family == family && |
900 | x->props.reqid == reqid && | 893 | x->props.reqid == reqid && |
901 | !(x->props.flags & XFRM_STATE_WILDRECV) && | 894 | !(x->props.flags & XFRM_STATE_WILDRECV) && |
@@ -927,7 +920,7 @@ static void __xfrm_state_insert(struct xfrm_state *x) | |||
927 | 920 | ||
928 | h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr, | 921 | h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr, |
929 | x->props.reqid, x->props.family); | 922 | x->props.reqid, x->props.family); |
930 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); | 923 | hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h); |
931 | 924 | ||
932 | h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family); | 925 | h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family); |
933 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); | 926 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); |
@@ -960,7 +953,7 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew) | |||
960 | unsigned int h; | 953 | unsigned int h; |
961 | 954 | ||
962 | h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family); | 955 | h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family); |
963 | hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { | 956 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { |
964 | if (x->props.family == family && | 957 | if (x->props.family == family && |
965 | x->props.reqid == reqid && | 958 | x->props.reqid == reqid && |
966 | !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && | 959 | !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && |
@@ -985,7 +978,7 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re | |||
985 | struct hlist_node *entry; | 978 | struct hlist_node *entry; |
986 | struct xfrm_state *x; | 979 | struct xfrm_state *x; |
987 | 980 | ||
988 | hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { | 981 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { |
989 | if (x->props.reqid != reqid || | 982 | if (x->props.reqid != reqid || |
990 | x->props.mode != mode || | 983 | x->props.mode != mode || |
991 | x->props.family != family || | 984 | x->props.family != family || |
@@ -1053,7 +1046,7 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re | |||
1053 | x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ; | 1046 | x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ; |
1054 | add_timer(&x->timer); | 1047 | add_timer(&x->timer); |
1055 | list_add(&x->km.all, &init_net.xfrm.state_all); | 1048 | list_add(&x->km.all, &init_net.xfrm.state_all); |
1056 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); | 1049 | hlist_add_head(&x->bydst, init_net.xfrm.state_bydst+h); |
1057 | h = xfrm_src_hash(daddr, saddr, family); | 1050 | h = xfrm_src_hash(daddr, saddr, family); |
1058 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); | 1051 | hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); |
1059 | 1052 | ||
@@ -1208,7 +1201,7 @@ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m) | |||
1208 | if (m->reqid) { | 1201 | if (m->reqid) { |
1209 | h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr, | 1202 | h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr, |
1210 | m->reqid, m->old_family); | 1203 | m->reqid, m->old_family); |
1211 | hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { | 1204 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { |
1212 | if (x->props.mode != m->mode || | 1205 | if (x->props.mode != m->mode || |
1213 | x->id.proto != m->proto) | 1206 | x->id.proto != m->proto) |
1214 | continue; | 1207 | continue; |
@@ -1457,7 +1450,7 @@ static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq) | |||
1457 | struct hlist_node *entry; | 1450 | struct hlist_node *entry; |
1458 | struct xfrm_state *x; | 1451 | struct xfrm_state *x; |
1459 | 1452 | ||
1460 | hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { | 1453 | hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+i, bydst) { |
1461 | if (x->km.seq == seq && | 1454 | if (x->km.seq == seq && |
1462 | x->km.state == XFRM_STATE_ACQ) { | 1455 | x->km.state == XFRM_STATE_ACQ) { |
1463 | xfrm_state_hold(x); | 1456 | xfrm_state_hold(x); |
@@ -2088,20 +2081,29 @@ int __net_init xfrm_state_init(struct net *net) | |||
2088 | 2081 | ||
2089 | sz = sizeof(struct hlist_head) * 8; | 2082 | sz = sizeof(struct hlist_head) * 8; |
2090 | 2083 | ||
2091 | xfrm_state_bydst = xfrm_hash_alloc(sz); | 2084 | net->xfrm.state_bydst = xfrm_hash_alloc(sz); |
2085 | if (!net->xfrm.state_bydst) | ||
2086 | goto out_bydst; | ||
2092 | xfrm_state_bysrc = xfrm_hash_alloc(sz); | 2087 | xfrm_state_bysrc = xfrm_hash_alloc(sz); |
2093 | xfrm_state_byspi = xfrm_hash_alloc(sz); | 2088 | xfrm_state_byspi = xfrm_hash_alloc(sz); |
2094 | if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi) | ||
2095 | panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); | ||
2096 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); | 2089 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); |
2097 | 2090 | ||
2098 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); | 2091 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); |
2099 | return 0; | 2092 | return 0; |
2093 | |||
2094 | out_bydst: | ||
2095 | return -ENOMEM; | ||
2100 | } | 2096 | } |
2101 | 2097 | ||
2102 | void xfrm_state_fini(struct net *net) | 2098 | void xfrm_state_fini(struct net *net) |
2103 | { | 2099 | { |
2100 | unsigned int sz; | ||
2101 | |||
2104 | WARN_ON(!list_empty(&net->xfrm.state_all)); | 2102 | WARN_ON(!list_empty(&net->xfrm.state_all)); |
2103 | |||
2104 | sz = (xfrm_state_hmask + 1) * sizeof(struct hlist_head); | ||
2105 | WARN_ON(!hlist_empty(net->xfrm.state_bydst)); | ||
2106 | xfrm_hash_free(net->xfrm.state_bydst, sz); | ||
2105 | } | 2107 | } |
2106 | 2108 | ||
2107 | #ifdef CONFIG_AUDITSYSCALL | 2109 | #ifdef CONFIG_AUDITSYSCALL |