aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2008-10-08 05:35:11 -0400
committerPatrick McHardy <kaber@trash.net>2008-10-08 05:35:11 -0400
commit0c4c9288ada0e6642d511ef872f10a4781a896ff (patch)
treeee0e22ad78f9a7454e4d8eca09f38414afa643ee /net/ipv4
parente099a173573ce1ba171092aee7bb3c72ea686e59 (diff)
netfilter: netns nat: per-netns bysource hash
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c72
1 files changed, 45 insertions, 27 deletions
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 5d4a5b70da2b..2ac9eaf1a8c9 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -37,9 +37,6 @@ static struct nf_conntrack_l3proto *l3proto __read_mostly;
37 37
38/* Calculated at init based on memory size */ 38/* Calculated at init based on memory size */
39static unsigned int nf_nat_htable_size __read_mostly; 39static unsigned int nf_nat_htable_size __read_mostly;
40static int nf_nat_vmalloced;
41
42static struct hlist_head *bysource __read_mostly;
43 40
44#define MAX_IP_NAT_PROTO 256 41#define MAX_IP_NAT_PROTO 256
45static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 42static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
@@ -145,7 +142,8 @@ same_src(const struct nf_conn *ct,
145 142
146/* Only called for SRC manip */ 143/* Only called for SRC manip */
147static int 144static int
148find_appropriate_src(const struct nf_conntrack_tuple *tuple, 145find_appropriate_src(struct net *net,
146 const struct nf_conntrack_tuple *tuple,
149 struct nf_conntrack_tuple *result, 147 struct nf_conntrack_tuple *result,
150 const struct nf_nat_range *range) 148 const struct nf_nat_range *range)
151{ 149{
@@ -155,7 +153,7 @@ find_appropriate_src(const struct nf_conntrack_tuple *tuple,
155 const struct hlist_node *n; 153 const struct hlist_node *n;
156 154
157 rcu_read_lock(); 155 rcu_read_lock();
158 hlist_for_each_entry_rcu(nat, n, &bysource[h], bysource) { 156 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
159 ct = nat->ct; 157 ct = nat->ct;
160 if (same_src(ct, tuple)) { 158 if (same_src(ct, tuple)) {
161 /* Copy source part from reply tuple. */ 159 /* Copy source part from reply tuple. */
@@ -231,6 +229,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
231 struct nf_conn *ct, 229 struct nf_conn *ct,
232 enum nf_nat_manip_type maniptype) 230 enum nf_nat_manip_type maniptype)
233{ 231{
232 struct net *net = nf_ct_net(ct);
234 const struct nf_nat_protocol *proto; 233 const struct nf_nat_protocol *proto;
235 234
236 /* 1) If this srcip/proto/src-proto-part is currently mapped, 235 /* 1) If this srcip/proto/src-proto-part is currently mapped,
@@ -242,7 +241,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
242 manips not an issue. */ 241 manips not an issue. */
243 if (maniptype == IP_NAT_MANIP_SRC && 242 if (maniptype == IP_NAT_MANIP_SRC &&
244 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) { 243 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
245 if (find_appropriate_src(orig_tuple, tuple, range)) { 244 if (find_appropriate_src(net, orig_tuple, tuple, range)) {
246 pr_debug("get_unique_tuple: Found current src map\n"); 245 pr_debug("get_unique_tuple: Found current src map\n");
247 if (!nf_nat_used_tuple(tuple, ct)) 246 if (!nf_nat_used_tuple(tuple, ct))
248 return; 247 return;
@@ -283,6 +282,7 @@ nf_nat_setup_info(struct nf_conn *ct,
283 const struct nf_nat_range *range, 282 const struct nf_nat_range *range,
284 enum nf_nat_manip_type maniptype) 283 enum nf_nat_manip_type maniptype)
285{ 284{
285 struct net *net = nf_ct_net(ct);
286 struct nf_conntrack_tuple curr_tuple, new_tuple; 286 struct nf_conntrack_tuple curr_tuple, new_tuple;
287 struct nf_conn_nat *nat; 287 struct nf_conn_nat *nat;
288 int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK); 288 int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
@@ -334,7 +334,8 @@ nf_nat_setup_info(struct nf_conn *ct,
334 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 334 /* nf_conntrack_alter_reply might re-allocate exntension aera */
335 nat = nfct_nat(ct); 335 nat = nfct_nat(ct);
336 nat->ct = ct; 336 nat->ct = ct;
337 hlist_add_head_rcu(&nat->bysource, &bysource[srchash]); 337 hlist_add_head_rcu(&nat->bysource,
338 &net->ipv4.nat_bysource[srchash]);
338 spin_unlock_bh(&nf_nat_lock); 339 spin_unlock_bh(&nf_nat_lock);
339 } 340 }
340 341
@@ -583,6 +584,40 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
583 .flags = NF_CT_EXT_F_PREALLOC, 584 .flags = NF_CT_EXT_F_PREALLOC,
584}; 585};
585 586
587static int __net_init nf_nat_net_init(struct net *net)
588{
589 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
590 &net->ipv4.nat_vmalloced);
591 if (!net->ipv4.nat_bysource)
592 return -ENOMEM;
593 return 0;
594}
595
596/* Clear NAT section of all conntracks, in case we're loaded again. */
597static int clean_nat(struct nf_conn *i, void *data)
598{
599 struct nf_conn_nat *nat = nfct_nat(i);
600
601 if (!nat)
602 return 0;
603 memset(nat, 0, sizeof(*nat));
604 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
605 return 0;
606}
607
608static void __net_exit nf_nat_net_exit(struct net *net)
609{
610 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
611 synchronize_rcu();
612 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
613 nf_nat_htable_size);
614}
615
616static struct pernet_operations nf_nat_net_ops = {
617 .init = nf_nat_net_init,
618 .exit = nf_nat_net_exit,
619};
620
586static int __init nf_nat_init(void) 621static int __init nf_nat_init(void)
587{ 622{
588 size_t i; 623 size_t i;
@@ -599,12 +634,9 @@ static int __init nf_nat_init(void)
599 /* Leave them the same for the moment. */ 634 /* Leave them the same for the moment. */
600 nf_nat_htable_size = nf_conntrack_htable_size; 635 nf_nat_htable_size = nf_conntrack_htable_size;
601 636
602 bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 637 ret = register_pernet_subsys(&nf_nat_net_ops);
603 &nf_nat_vmalloced); 638 if (ret < 0)
604 if (!bysource) {
605 ret = -ENOMEM;
606 goto cleanup_extend; 639 goto cleanup_extend;
607 }
608 640
609 /* Sew in builtin protocols. */ 641 /* Sew in builtin protocols. */
610 spin_lock_bh(&nf_nat_lock); 642 spin_lock_bh(&nf_nat_lock);
@@ -629,23 +661,9 @@ static int __init nf_nat_init(void)
629 return ret; 661 return ret;
630} 662}
631 663
632/* Clear NAT section of all conntracks, in case we're loaded again. */
633static int clean_nat(struct nf_conn *i, void *data)
634{
635 struct nf_conn_nat *nat = nfct_nat(i);
636
637 if (!nat)
638 return 0;
639 memset(nat, 0, sizeof(*nat));
640 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
641 return 0;
642}
643
644static void __exit nf_nat_cleanup(void) 664static void __exit nf_nat_cleanup(void)
645{ 665{
646 nf_ct_iterate_cleanup(&init_net, &clean_nat, NULL); 666 unregister_pernet_subsys(&nf_nat_net_ops);
647 synchronize_rcu();
648 nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size);
649 nf_ct_l3proto_put(l3proto); 667 nf_ct_l3proto_put(l3proto);
650 nf_ct_extend_unregister(&nat_extend); 668 nf_ct_extend_unregister(&nat_extend);
651 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL); 669 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);