aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2008-10-08 05:35:03 -0400
committerPatrick McHardy <kaber@trash.net>2008-10-08 05:35:03 -0400
commit400dad39d1c33fe797e47326d87a3f54d0ac5181 (patch)
treef3bb7c9f75bd24161c2dd745f1b475f5a6165cae
parent49ac8713b6d064adf7474080fdccebd7cce76be0 (diff)
netfilter: netns nf_conntrack: per-netns conntrack hash
* make per-netns conntrack hash Other solution is to add ->ct_net pointer to tuplehashes and still has one hash, I tried that it's ugly and requires more code deep down in protocol modules et al. * propagate netns pointer to where needed, e. g. to conntrack iterators. Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/net/netfilter/nf_conntrack_core.h3
-rw-r--r--include/net/netns/conntrack.h2
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c3
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c74
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c16
-rw-r--r--net/netfilter/nf_conntrack_pptp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto.c4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/xt_connlimit.c2
16 files changed, 67 insertions, 63 deletions
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 5999c5313d0b..f5447f143047 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -195,11 +195,11 @@ extern void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced,
195 unsigned int size); 195 unsigned int size);
196 196
197extern struct nf_conntrack_tuple_hash * 197extern struct nf_conntrack_tuple_hash *
198__nf_conntrack_find(const struct nf_conntrack_tuple *tuple); 198__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple);
199 199
200extern void nf_conntrack_hash_insert(struct nf_conn *ct); 200extern void nf_conntrack_hash_insert(struct nf_conn *ct);
201 201
202extern void nf_conntrack_flush(void); 202extern void nf_conntrack_flush(struct net *net);
203 203
204extern bool nf_ct_get_tuplepr(const struct sk_buff *skb, 204extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
205 unsigned int nhoff, u_int16_t l3num, 205 unsigned int nhoff, u_int16_t l3num,
@@ -261,7 +261,7 @@ extern struct nf_conn nf_conntrack_untracked;
261 261
262/* Iterate over all conntracks: if iter returns true, it's deleted. */ 262/* Iterate over all conntracks: if iter returns true, it's deleted. */
263extern void 263extern void
264nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data); 264nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
265extern void nf_conntrack_free(struct nf_conn *ct); 265extern void nf_conntrack_free(struct nf_conn *ct);
266extern struct nf_conn * 266extern struct nf_conn *
267nf_conntrack_alloc(struct net *net, 267nf_conntrack_alloc(struct net *net,
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 532aa200cbc9..1c373564396a 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -48,7 +48,7 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
48 48
49/* Find a connection corresponding to a tuple. */ 49/* Find a connection corresponding to a tuple. */
50extern struct nf_conntrack_tuple_hash * 50extern struct nf_conntrack_tuple_hash *
51nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple); 51nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple);
52 52
53extern int __nf_conntrack_confirm(struct sk_buff *skb); 53extern int __nf_conntrack_confirm(struct sk_buff *skb);
54 54
@@ -71,7 +71,6 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
71 const struct nf_conntrack_l3proto *l3proto, 71 const struct nf_conntrack_l3proto *l3proto,
72 const struct nf_conntrack_l4proto *proto); 72 const struct nf_conntrack_l4proto *proto);
73 73
74extern struct hlist_head *nf_conntrack_hash;
75extern spinlock_t nf_conntrack_lock ; 74extern spinlock_t nf_conntrack_lock ;
76extern struct hlist_head unconfirmed; 75extern struct hlist_head unconfirmed;
77 76
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index edf84714d7c7..b767683f112b 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -5,5 +5,7 @@
5 5
6struct netns_ct { 6struct netns_ct {
7 atomic_t count; 7 atomic_t count;
8 struct hlist_head *hash;
9 int hash_vmalloc;
8}; 10};
9#endif 11#endif
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 9a4822f8243f..5e1c81791e5a 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -129,7 +129,8 @@ static int masq_device_event(struct notifier_block *this,
129 and forget them. */ 129 and forget them. */
130 NF_CT_ASSERT(dev->ifindex != 0); 130 NF_CT_ASSERT(dev->ifindex != 0);
131 131
132 nf_ct_iterate_cleanup(device_cmp, (void *)(long)dev->ifindex); 132 nf_ct_iterate_cleanup(&init_net, device_cmp,
133 (void *)(long)dev->ifindex);
133 } 134 }
134 135
135 return NOTIFY_DONE; 136 return NOTIFY_DONE;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 31abee3e29f9..03dd108015c2 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -323,7 +323,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
323 return -EINVAL; 323 return -EINVAL;
324 } 324 }
325 325
326 h = nf_conntrack_find_get(&tuple); 326 h = nf_conntrack_find_get(sock_net(sk), &tuple);
327 if (h) { 327 if (h) {
328 struct sockaddr_in sin; 328 struct sockaddr_in sin;
329 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); 329 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 4556805027f7..8e0afdc2b134 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -32,7 +32,7 @@ static struct hlist_node *ct_get_first(struct seq_file *seq)
32 for (st->bucket = 0; 32 for (st->bucket = 0;
33 st->bucket < nf_conntrack_htable_size; 33 st->bucket < nf_conntrack_htable_size;
34 st->bucket++) { 34 st->bucket++) {
35 n = rcu_dereference(nf_conntrack_hash[st->bucket].first); 35 n = rcu_dereference(init_net.ct.hash[st->bucket].first);
36 if (n) 36 if (n)
37 return n; 37 return n;
38 } 38 }
@@ -48,7 +48,7 @@ static struct hlist_node *ct_get_next(struct seq_file *seq,
48 while (head == NULL) { 48 while (head == NULL) {
49 if (++st->bucket >= nf_conntrack_htable_size) 49 if (++st->bucket >= nf_conntrack_htable_size)
50 return NULL; 50 return NULL;
51 head = rcu_dereference(nf_conntrack_hash[st->bucket].first); 51 head = rcu_dereference(init_net.ct.hash[st->bucket].first);
52 } 52 }
53 return head; 53 return head;
54} 54}
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index da8edcdaef32..daf346377b66 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -155,7 +155,7 @@ icmp_error_message(struct sk_buff *skb,
155 155
156 *ctinfo = IP_CT_RELATED; 156 *ctinfo = IP_CT_RELATED;
157 157
158 h = nf_conntrack_find_get(&innertuple); 158 h = nf_conntrack_find_get(&init_net, &innertuple);
159 if (!h) { 159 if (!h) {
160 pr_debug("icmp_error_message: no match\n"); 160 pr_debug("icmp_error_message: no match\n");
161 return -NF_ACCEPT; 161 return -NF_ACCEPT;
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 6c6a3cba8d50..5d4a5b70da2b 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -643,7 +643,7 @@ static int clean_nat(struct nf_conn *i, void *data)
643 643
644static void __exit nf_nat_cleanup(void) 644static void __exit nf_nat_cleanup(void)
645{ 645{
646 nf_ct_iterate_cleanup(&clean_nat, NULL); 646 nf_ct_iterate_cleanup(&init_net, &clean_nat, NULL);
647 synchronize_rcu(); 647 synchronize_rcu();
648 nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size); 648 nf_ct_free_hashtable(bysource, nf_nat_vmalloced, nf_nat_htable_size);
649 nf_ct_l3proto_put(l3proto); 649 nf_ct_l3proto_put(l3proto);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 5756f30ebc68..548cf4f15c08 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -156,7 +156,7 @@ icmpv6_error_message(struct sk_buff *skb,
156 156
157 *ctinfo = IP_CT_RELATED; 157 *ctinfo = IP_CT_RELATED;
158 158
159 h = nf_conntrack_find_get(&intuple); 159 h = nf_conntrack_find_get(&init_net, &intuple);
160 if (!h) { 160 if (!h) {
161 pr_debug("icmpv6_error: no match\n"); 161 pr_debug("icmpv6_error: no match\n");
162 return -NF_ACCEPT; 162 return -NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8299b3490e77..da56b2605529 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -50,15 +50,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
50int nf_conntrack_max __read_mostly; 50int nf_conntrack_max __read_mostly;
51EXPORT_SYMBOL_GPL(nf_conntrack_max); 51EXPORT_SYMBOL_GPL(nf_conntrack_max);
52 52
53struct hlist_head *nf_conntrack_hash __read_mostly;
54EXPORT_SYMBOL_GPL(nf_conntrack_hash);
55
56struct nf_conn nf_conntrack_untracked __read_mostly; 53struct nf_conn nf_conntrack_untracked __read_mostly;
57EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 54EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
58 55
59unsigned int nf_ct_log_invalid __read_mostly; 56unsigned int nf_ct_log_invalid __read_mostly;
60HLIST_HEAD(unconfirmed); 57HLIST_HEAD(unconfirmed);
61static int nf_conntrack_vmalloc __read_mostly;
62static struct kmem_cache *nf_conntrack_cachep __read_mostly; 58static struct kmem_cache *nf_conntrack_cachep __read_mostly;
63 59
64DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); 60DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
@@ -242,7 +238,7 @@ static void death_by_timeout(unsigned long ul_conntrack)
242} 238}
243 239
244struct nf_conntrack_tuple_hash * 240struct nf_conntrack_tuple_hash *
245__nf_conntrack_find(const struct nf_conntrack_tuple *tuple) 241__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
246{ 242{
247 struct nf_conntrack_tuple_hash *h; 243 struct nf_conntrack_tuple_hash *h;
248 struct hlist_node *n; 244 struct hlist_node *n;
@@ -252,7 +248,7 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple)
252 * at least once for the stats anyway. 248 * at least once for the stats anyway.
253 */ 249 */
254 local_bh_disable(); 250 local_bh_disable();
255 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { 251 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
256 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 252 if (nf_ct_tuple_equal(tuple, &h->tuple)) {
257 NF_CT_STAT_INC(found); 253 NF_CT_STAT_INC(found);
258 local_bh_enable(); 254 local_bh_enable();
@@ -268,13 +264,13 @@ EXPORT_SYMBOL_GPL(__nf_conntrack_find);
268 264
269/* Find a connection corresponding to a tuple. */ 265/* Find a connection corresponding to a tuple. */
270struct nf_conntrack_tuple_hash * 266struct nf_conntrack_tuple_hash *
271nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple) 267nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
272{ 268{
273 struct nf_conntrack_tuple_hash *h; 269 struct nf_conntrack_tuple_hash *h;
274 struct nf_conn *ct; 270 struct nf_conn *ct;
275 271
276 rcu_read_lock(); 272 rcu_read_lock();
277 h = __nf_conntrack_find(tuple); 273 h = __nf_conntrack_find(net, tuple);
278 if (h) { 274 if (h) {
279 ct = nf_ct_tuplehash_to_ctrack(h); 275 ct = nf_ct_tuplehash_to_ctrack(h);
280 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 276 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
@@ -290,10 +286,12 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
290 unsigned int hash, 286 unsigned int hash,
291 unsigned int repl_hash) 287 unsigned int repl_hash)
292{ 288{
289 struct net *net = nf_ct_net(ct);
290
293 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 291 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
294 &nf_conntrack_hash[hash]); 292 &net->ct.hash[hash]);
295 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, 293 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
296 &nf_conntrack_hash[repl_hash]); 294 &net->ct.hash[repl_hash]);
297} 295}
298 296
299void nf_conntrack_hash_insert(struct nf_conn *ct) 297void nf_conntrack_hash_insert(struct nf_conn *ct)
@@ -319,8 +317,10 @@ __nf_conntrack_confirm(struct sk_buff *skb)
319 struct nf_conn_help *help; 317 struct nf_conn_help *help;
320 struct hlist_node *n; 318 struct hlist_node *n;
321 enum ip_conntrack_info ctinfo; 319 enum ip_conntrack_info ctinfo;
320 struct net *net;
322 321
323 ct = nf_ct_get(skb, &ctinfo); 322 ct = nf_ct_get(skb, &ctinfo);
323 net = nf_ct_net(ct);
324 324
325 /* ipt_REJECT uses nf_conntrack_attach to attach related 325 /* ipt_REJECT uses nf_conntrack_attach to attach related
326 ICMP/TCP RST packets in other direction. Actual packet 326 ICMP/TCP RST packets in other direction. Actual packet
@@ -347,11 +347,11 @@ __nf_conntrack_confirm(struct sk_buff *skb)
347 /* See if there's one in the list already, including reverse: 347 /* See if there's one in the list already, including reverse:
348 NAT could have grabbed it without realizing, since we're 348 NAT could have grabbed it without realizing, since we're
349 not in the hash. If there is, we lost race. */ 349 not in the hash. If there is, we lost race. */
350 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) 350 hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode)
351 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 351 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
352 &h->tuple)) 352 &h->tuple))
353 goto out; 353 goto out;
354 hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode) 354 hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode)
355 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 355 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
356 &h->tuple)) 356 &h->tuple))
357 goto out; 357 goto out;
@@ -394,6 +394,7 @@ int
394nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, 394nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
395 const struct nf_conn *ignored_conntrack) 395 const struct nf_conn *ignored_conntrack)
396{ 396{
397 struct net *net = nf_ct_net(ignored_conntrack);
397 struct nf_conntrack_tuple_hash *h; 398 struct nf_conntrack_tuple_hash *h;
398 struct hlist_node *n; 399 struct hlist_node *n;
399 unsigned int hash = hash_conntrack(tuple); 400 unsigned int hash = hash_conntrack(tuple);
@@ -402,7 +403,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
402 * least once for the stats anyway. 403 * least once for the stats anyway.
403 */ 404 */
404 rcu_read_lock_bh(); 405 rcu_read_lock_bh();
405 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { 406 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
406 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 407 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
407 nf_ct_tuple_equal(tuple, &h->tuple)) { 408 nf_ct_tuple_equal(tuple, &h->tuple)) {
408 NF_CT_STAT_INC(found); 409 NF_CT_STAT_INC(found);
@@ -421,7 +422,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
421 422
422/* There's a small race here where we may free a just-assured 423/* There's a small race here where we may free a just-assured
423 connection. Too bad: we're in trouble anyway. */ 424 connection. Too bad: we're in trouble anyway. */
424static noinline int early_drop(unsigned int hash) 425static noinline int early_drop(struct net *net, unsigned int hash)
425{ 426{
426 /* Use oldest entry, which is roughly LRU */ 427 /* Use oldest entry, which is roughly LRU */
427 struct nf_conntrack_tuple_hash *h; 428 struct nf_conntrack_tuple_hash *h;
@@ -432,7 +433,7 @@ static noinline int early_drop(unsigned int hash)
432 433
433 rcu_read_lock(); 434 rcu_read_lock();
434 for (i = 0; i < nf_conntrack_htable_size; i++) { 435 for (i = 0; i < nf_conntrack_htable_size; i++) {
435 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], 436 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash],
436 hnode) { 437 hnode) {
437 tmp = nf_ct_tuplehash_to_ctrack(h); 438 tmp = nf_ct_tuplehash_to_ctrack(h);
438 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) 439 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
@@ -478,7 +479,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
478 if (nf_conntrack_max && 479 if (nf_conntrack_max &&
479 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 480 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
480 unsigned int hash = hash_conntrack(orig); 481 unsigned int hash = hash_conntrack(orig);
481 if (!early_drop(hash)) { 482 if (!early_drop(net, hash)) {
482 atomic_dec(&net->ct.count); 483 atomic_dec(&net->ct.count);
483 if (net_ratelimit()) 484 if (net_ratelimit())
484 printk(KERN_WARNING 485 printk(KERN_WARNING
@@ -631,7 +632,7 @@ resolve_normal_ct(struct sk_buff *skb,
631 } 632 }
632 633
633 /* look for tuple match */ 634 /* look for tuple match */
634 h = nf_conntrack_find_get(&tuple); 635 h = nf_conntrack_find_get(&init_net, &tuple);
635 if (!h) { 636 if (!h) {
636 h = init_conntrack(&init_net, &tuple, l3proto, l4proto, skb, 637 h = init_conntrack(&init_net, &tuple, l3proto, l4proto, skb,
637 dataoff); 638 dataoff);
@@ -941,7 +942,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
941 942
942/* Bring out ya dead! */ 943/* Bring out ya dead! */
943static struct nf_conn * 944static struct nf_conn *
944get_next_corpse(int (*iter)(struct nf_conn *i, void *data), 945get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
945 void *data, unsigned int *bucket) 946 void *data, unsigned int *bucket)
946{ 947{
947 struct nf_conntrack_tuple_hash *h; 948 struct nf_conntrack_tuple_hash *h;
@@ -950,7 +951,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
950 951
951 spin_lock_bh(&nf_conntrack_lock); 952 spin_lock_bh(&nf_conntrack_lock);
952 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 953 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
953 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) { 954 hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) {
954 ct = nf_ct_tuplehash_to_ctrack(h); 955 ct = nf_ct_tuplehash_to_ctrack(h);
955 if (iter(ct, data)) 956 if (iter(ct, data))
956 goto found; 957 goto found;
@@ -969,13 +970,14 @@ found:
969 return ct; 970 return ct;
970} 971}
971 972
972void 973void nf_ct_iterate_cleanup(struct net *net,
973nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data) 974 int (*iter)(struct nf_conn *i, void *data),
975 void *data)
974{ 976{
975 struct nf_conn *ct; 977 struct nf_conn *ct;
976 unsigned int bucket = 0; 978 unsigned int bucket = 0;
977 979
978 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { 980 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
979 /* Time to push up daises... */ 981 /* Time to push up daises... */
980 if (del_timer(&ct->timeout)) 982 if (del_timer(&ct->timeout))
981 death_by_timeout((unsigned long)ct); 983 death_by_timeout((unsigned long)ct);
@@ -1001,9 +1003,9 @@ void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int s
1001} 1003}
1002EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); 1004EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1003 1005
1004void nf_conntrack_flush(void) 1006void nf_conntrack_flush(struct net *net)
1005{ 1007{
1006 nf_ct_iterate_cleanup(kill_all, NULL); 1008 nf_ct_iterate_cleanup(net, kill_all, NULL);
1007} 1009}
1008EXPORT_SYMBOL_GPL(nf_conntrack_flush); 1010EXPORT_SYMBOL_GPL(nf_conntrack_flush);
1009 1011
@@ -1020,7 +1022,7 @@ void nf_conntrack_cleanup(struct net *net)
1020 1022
1021 nf_ct_event_cache_flush(); 1023 nf_ct_event_cache_flush();
1022 i_see_dead_people: 1024 i_see_dead_people:
1023 nf_conntrack_flush(); 1025 nf_conntrack_flush(net);
1024 if (atomic_read(&net->ct.count) != 0) { 1026 if (atomic_read(&net->ct.count) != 0) {
1025 schedule(); 1027 schedule();
1026 goto i_see_dead_people; 1028 goto i_see_dead_people;
@@ -1032,7 +1034,7 @@ void nf_conntrack_cleanup(struct net *net)
1032 rcu_assign_pointer(nf_ct_destroy, NULL); 1034 rcu_assign_pointer(nf_ct_destroy, NULL);
1033 1035
1034 kmem_cache_destroy(nf_conntrack_cachep); 1036 kmem_cache_destroy(nf_conntrack_cachep);
1035 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc, 1037 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1036 nf_conntrack_htable_size); 1038 nf_conntrack_htable_size);
1037 1039
1038 nf_conntrack_acct_fini(); 1040 nf_conntrack_acct_fini();
@@ -1097,8 +1099,8 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1097 */ 1099 */
1098 spin_lock_bh(&nf_conntrack_lock); 1100 spin_lock_bh(&nf_conntrack_lock);
1099 for (i = 0; i < nf_conntrack_htable_size; i++) { 1101 for (i = 0; i < nf_conntrack_htable_size; i++) {
1100 while (!hlist_empty(&nf_conntrack_hash[i])) { 1102 while (!hlist_empty(&init_net.ct.hash[i])) {
1101 h = hlist_entry(nf_conntrack_hash[i].first, 1103 h = hlist_entry(init_net.ct.hash[i].first,
1102 struct nf_conntrack_tuple_hash, hnode); 1104 struct nf_conntrack_tuple_hash, hnode);
1103 hlist_del_rcu(&h->hnode); 1105 hlist_del_rcu(&h->hnode);
1104 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1106 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
@@ -1106,12 +1108,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1106 } 1108 }
1107 } 1109 }
1108 old_size = nf_conntrack_htable_size; 1110 old_size = nf_conntrack_htable_size;
1109 old_vmalloced = nf_conntrack_vmalloc; 1111 old_vmalloced = init_net.ct.hash_vmalloc;
1110 old_hash = nf_conntrack_hash; 1112 old_hash = init_net.ct.hash;
1111 1113
1112 nf_conntrack_htable_size = hashsize; 1114 nf_conntrack_htable_size = hashsize;
1113 nf_conntrack_vmalloc = vmalloced; 1115 init_net.ct.hash_vmalloc = vmalloced;
1114 nf_conntrack_hash = hash; 1116 init_net.ct.hash = hash;
1115 nf_conntrack_hash_rnd = rnd; 1117 nf_conntrack_hash_rnd = rnd;
1116 spin_unlock_bh(&nf_conntrack_lock); 1118 spin_unlock_bh(&nf_conntrack_lock);
1117 1119
@@ -1146,9 +1148,9 @@ int nf_conntrack_init(struct net *net)
1146 max_factor = 4; 1148 max_factor = 4;
1147 } 1149 }
1148 atomic_set(&net->ct.count, 0); 1150 atomic_set(&net->ct.count, 0);
1149 nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1151 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1150 &nf_conntrack_vmalloc); 1152 &net->ct.hash_vmalloc);
1151 if (!nf_conntrack_hash) { 1153 if (!net->ct.hash) {
1152 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1154 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1153 goto err_out; 1155 goto err_out;
1154 } 1156 }
@@ -1207,7 +1209,7 @@ out_fini_proto:
1207err_free_conntrack_slab: 1209err_free_conntrack_slab:
1208 kmem_cache_destroy(nf_conntrack_cachep); 1210 kmem_cache_destroy(nf_conntrack_cachep);
1209err_free_hash: 1211err_free_hash:
1210 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc, 1212 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1211 nf_conntrack_htable_size); 1213 nf_conntrack_htable_size);
1212err_out: 1214err_out:
1213 return -ENOMEM; 1215 return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 8e0b4c8f62a8..d91278dfdafd 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -159,7 +159,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
159 hlist_for_each_entry(h, n, &unconfirmed, hnode) 159 hlist_for_each_entry(h, n, &unconfirmed, hnode)
160 unhelp(h, me); 160 unhelp(h, me);
161 for (i = 0; i < nf_conntrack_htable_size; i++) { 161 for (i = 0; i < nf_conntrack_htable_size; i++) {
162 hlist_for_each_entry(h, n, &nf_conntrack_hash[i], hnode) 162 hlist_for_each_entry(h, n, &init_net.ct.hash[i], hnode)
163 unhelp(h, me); 163 unhelp(h, me);
164 } 164 }
165 spin_unlock_bh(&nf_conntrack_lock); 165 spin_unlock_bh(&nf_conntrack_lock);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index da3cdc8db700..918a3358a126 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -549,7 +549,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
549 last = (struct nf_conn *)cb->args[1]; 549 last = (struct nf_conn *)cb->args[1];
550 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 550 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
551restart: 551restart:
552 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[cb->args[0]], 552 hlist_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
553 hnode) { 553 hnode) {
554 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 554 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
555 continue; 555 continue;
@@ -794,14 +794,14 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
794 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); 794 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
795 else { 795 else {
796 /* Flush the whole table */ 796 /* Flush the whole table */
797 nf_conntrack_flush(); 797 nf_conntrack_flush(&init_net);
798 return 0; 798 return 0;
799 } 799 }
800 800
801 if (err < 0) 801 if (err < 0)
802 return err; 802 return err;
803 803
804 h = nf_conntrack_find_get(&tuple); 804 h = nf_conntrack_find_get(&init_net, &tuple);
805 if (!h) 805 if (!h)
806 return -ENOENT; 806 return -ENOENT;
807 807
@@ -847,7 +847,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
847 if (err < 0) 847 if (err < 0)
848 return err; 848 return err;
849 849
850 h = nf_conntrack_find_get(&tuple); 850 h = nf_conntrack_find_get(&init_net, &tuple);
851 if (!h) 851 if (!h)
852 return -ENOENT; 852 return -ENOENT;
853 853
@@ -1213,9 +1213,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1213 1213
1214 spin_lock_bh(&nf_conntrack_lock); 1214 spin_lock_bh(&nf_conntrack_lock);
1215 if (cda[CTA_TUPLE_ORIG]) 1215 if (cda[CTA_TUPLE_ORIG])
1216 h = __nf_conntrack_find(&otuple); 1216 h = __nf_conntrack_find(&init_net, &otuple);
1217 else if (cda[CTA_TUPLE_REPLY]) 1217 else if (cda[CTA_TUPLE_REPLY])
1218 h = __nf_conntrack_find(&rtuple); 1218 h = __nf_conntrack_find(&init_net, &rtuple);
1219 1219
1220 if (h == NULL) { 1220 if (h == NULL) {
1221 struct nf_conntrack_tuple master; 1221 struct nf_conntrack_tuple master;
@@ -1230,7 +1230,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1230 if (err < 0) 1230 if (err < 0)
1231 goto out_unlock; 1231 goto out_unlock;
1232 1232
1233 master_h = __nf_conntrack_find(&master); 1233 master_h = __nf_conntrack_find(&init_net, &master);
1234 if (master_h == NULL) { 1234 if (master_h == NULL) {
1235 err = -ENOENT; 1235 err = -ENOENT;
1236 goto out_unlock; 1236 goto out_unlock;
@@ -1670,7 +1670,7 @@ ctnetlink_create_expect(struct nlattr *cda[], u_int8_t u3)
1670 return err; 1670 return err;
1671 1671
1672 /* Look for master conntrack of this expectation */ 1672 /* Look for master conntrack of this expectation */
1673 h = nf_conntrack_find_get(&master_tuple); 1673 h = nf_conntrack_find_get(&init_net, &master_tuple);
1674 if (!h) 1674 if (!h)
1675 return -ENOENT; 1675 return -ENOENT;
1676 ct = nf_ct_tuplehash_to_ctrack(h); 1676 ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 97e54b0e43a3..7caf45b59d2c 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -143,7 +143,7 @@ static int destroy_sibling_or_exp(const struct nf_conntrack_tuple *t)
143 pr_debug("trying to timeout ct or exp for tuple "); 143 pr_debug("trying to timeout ct or exp for tuple ");
144 nf_ct_dump_tuple(t); 144 nf_ct_dump_tuple(t);
145 145
146 h = nf_conntrack_find_get(t); 146 h = nf_conntrack_find_get(&init_net, t);
147 if (h) { 147 if (h) {
148 sibling = nf_ct_tuplehash_to_ctrack(h); 148 sibling = nf_ct_tuplehash_to_ctrack(h);
149 pr_debug("setting timeout of conntrack %p to 0\n", sibling); 149 pr_debug("setting timeout of conntrack %p to 0\n", sibling);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index a49fc932629b..3a2f7ef997f4 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -219,7 +219,7 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
219 synchronize_rcu(); 219 synchronize_rcu();
220 220
221 /* Remove all contrack entries for this protocol */ 221 /* Remove all contrack entries for this protocol */
222 nf_ct_iterate_cleanup(kill_l3proto, proto); 222 nf_ct_iterate_cleanup(&init_net, kill_l3proto, proto);
223} 223}
224EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); 224EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
225 225
@@ -328,7 +328,7 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
328 synchronize_rcu(); 328 synchronize_rcu();
329 329
330 /* Remove all contrack entries for this protocol */ 330 /* Remove all contrack entries for this protocol */
331 nf_ct_iterate_cleanup(kill_l4proto, l4proto); 331 nf_ct_iterate_cleanup(&init_net, kill_l4proto, l4proto);
332} 332}
333EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); 333EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
334 334
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 021b505907d2..5456e4b94244 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -51,7 +51,7 @@ static struct hlist_node *ct_get_first(struct seq_file *seq)
51 for (st->bucket = 0; 51 for (st->bucket = 0;
52 st->bucket < nf_conntrack_htable_size; 52 st->bucket < nf_conntrack_htable_size;
53 st->bucket++) { 53 st->bucket++) {
54 n = rcu_dereference(nf_conntrack_hash[st->bucket].first); 54 n = rcu_dereference(init_net.ct.hash[st->bucket].first);
55 if (n) 55 if (n)
56 return n; 56 return n;
57 } 57 }
@@ -67,7 +67,7 @@ static struct hlist_node *ct_get_next(struct seq_file *seq,
67 while (head == NULL) { 67 while (head == NULL) {
68 if (++st->bucket >= nf_conntrack_htable_size) 68 if (++st->bucket >= nf_conntrack_htable_size)
69 return NULL; 69 return NULL;
70 head = rcu_dereference(nf_conntrack_hash[st->bucket].first); 70 head = rcu_dereference(init_net.ct.hash[st->bucket].first);
71 } 71 }
72 return head; 72 return head;
73} 73}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index d2453d182d68..bd00830ff697 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -123,7 +123,7 @@ static int count_them(struct xt_connlimit_data *data,
123 123
124 /* check the saved connections */ 124 /* check the saved connections */
125 list_for_each_entry_safe(conn, tmp, hash, list) { 125 list_for_each_entry_safe(conn, tmp, hash, list) {
126 found = __nf_conntrack_find(&conn->tuple); 126 found = __nf_conntrack_find(&init_net, &conn->tuple);
127 found_ct = NULL; 127 found_ct = NULL;
128 128
129 if (found != NULL) 129 if (found != NULL)