aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-02-08 14:18:07 -0500
committerDavid S. Miller <davem@davemloft.net>2010-02-08 14:18:07 -0500
commitd696c7bdaa55e2208e56c6f98e6bc1599f34286d (patch)
tree628782197c21b1e8611a41914865cdba586a1c65 /net
parent14c7dbe043d01a83a30633ab6b109ba2ac61d9f7 (diff)
netfilter: nf_conntrack: fix hash resizing with namespaces
As noticed by Jon Masters <jonathan@jonmasters.org>, the conntrack hash size is global and not per namespace, but modifiable at runtime through /sys/module/nf_conntrack/hashsize. Changing the hash size will only resize the hash in the current namespace however, so other namespaces will use an invalid hash size. This can cause crashes when enlarging the hashsize, or false negative lookups when shrinking it. Move the hash size into the per-namespace data and only use the global hash size to initialize the per-namespace value when instanciating a new namespace. Additionally restrict hash resizing to init_net for now as other namespaces are not handled currently. Cc: stable@kernel.org Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c22
-rw-r--r--net/netfilter/nf_conntrack_core.c53
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c7
8 files changed, 47 insertions, 47 deletions
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d171b123a65..d1ea38a7c49 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = {
210 }, 210 },
211 { 211 {
212 .procname = "ip_conntrack_buckets", 212 .procname = "ip_conntrack_buckets",
213 .data = &nf_conntrack_htable_size, 213 .data = &init_net.ct.htable_size,
214 .maxlen = sizeof(unsigned int), 214 .maxlen = sizeof(unsigned int),
215 .mode = 0444, 215 .mode = 0444,
216 .proc_handler = proc_dointvec, 216 .proc_handler = proc_dointvec,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 8668a3defda..2fb7b76da94 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
32 struct hlist_nulls_node *n; 32 struct hlist_nulls_node *n;
33 33
34 for (st->bucket = 0; 34 for (st->bucket = 0;
35 st->bucket < nf_conntrack_htable_size; 35 st->bucket < net->ct.htable_size;
36 st->bucket++) { 36 st->bucket++) {
37 n = rcu_dereference(net->ct.hash[st->bucket].first); 37 n = rcu_dereference(net->ct.hash[st->bucket].first);
38 if (!is_a_nulls(n)) 38 if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
50 head = rcu_dereference(head->next); 50 head = rcu_dereference(head->next);
51 while (is_a_nulls(head)) { 51 while (is_a_nulls(head)) {
52 if (likely(get_nulls_value(head) == st->bucket)) { 52 if (likely(get_nulls_value(head) == st->bucket)) {
53 if (++st->bucket >= nf_conntrack_htable_size) 53 if (++st->bucket >= net->ct.htable_size)
54 return NULL; 54 return NULL;
55 } 55 }
56 head = rcu_dereference(net->ct.hash[st->bucket].first); 56 head = rcu_dereference(net->ct.hash[st->bucket].first);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index fe1a64479dd..26066a2327a 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock);
35 35
36static struct nf_conntrack_l3proto *l3proto __read_mostly; 36static struct nf_conntrack_l3proto *l3proto __read_mostly;
37 37
38/* Calculated at init based on memory size */
39static unsigned int nf_nat_htable_size __read_mostly;
40
41#define MAX_IP_NAT_PROTO 256 38#define MAX_IP_NAT_PROTO 256
42static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 39static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
43 __read_mostly; 40 __read_mostly;
@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
72 69
73/* We keep an extra hash for each conntrack, for fast searching. */ 70/* We keep an extra hash for each conntrack, for fast searching. */
74static inline unsigned int 71static inline unsigned int
75hash_by_src(const struct nf_conntrack_tuple *tuple) 72hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
76{ 73{
77 unsigned int hash; 74 unsigned int hash;
78 75
@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple)
80 hash = jhash_3words((__force u32)tuple->src.u3.ip, 77 hash = jhash_3words((__force u32)tuple->src.u3.ip,
81 (__force u32)tuple->src.u.all, 78 (__force u32)tuple->src.u.all,
82 tuple->dst.protonum, 0); 79 tuple->dst.protonum, 0);
83 return ((u64)hash * nf_nat_htable_size) >> 32; 80 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
84} 81}
85 82
86/* Is this tuple already taken? (not by us) */ 83/* Is this tuple already taken? (not by us) */
@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net,
147 struct nf_conntrack_tuple *result, 144 struct nf_conntrack_tuple *result,
148 const struct nf_nat_range *range) 145 const struct nf_nat_range *range)
149{ 146{
150 unsigned int h = hash_by_src(tuple); 147 unsigned int h = hash_by_src(net, tuple);
151 const struct nf_conn_nat *nat; 148 const struct nf_conn_nat *nat;
152 const struct nf_conn *ct; 149 const struct nf_conn *ct;
153 const struct hlist_node *n; 150 const struct hlist_node *n;
@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct,
330 if (have_to_hash) { 327 if (have_to_hash) {
331 unsigned int srchash; 328 unsigned int srchash;
332 329
333 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 330 srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
334 spin_lock_bh(&nf_nat_lock); 331 spin_lock_bh(&nf_nat_lock);
335 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 332 /* nf_conntrack_alter_reply might re-allocate exntension aera */
336 nat = nfct_nat(ct); 333 nat = nfct_nat(ct);
@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
679 676
680static int __net_init nf_nat_net_init(struct net *net) 677static int __net_init nf_nat_net_init(struct net *net)
681{ 678{
682 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 679 /* Leave them the same for the moment. */
683 &net->ipv4.nat_vmalloced, 0); 680 net->ipv4.nat_htable_size = net->ct.htable_size;
681 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
682 &net->ipv4.nat_vmalloced, 0);
684 if (!net->ipv4.nat_bysource) 683 if (!net->ipv4.nat_bysource)
685 return -ENOMEM; 684 return -ENOMEM;
686 return 0; 685 return 0;
@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
703 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 702 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
704 synchronize_rcu(); 703 synchronize_rcu();
705 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, 704 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
706 nf_nat_htable_size); 705 net->ipv4.nat_htable_size);
707} 706}
708 707
709static struct pernet_operations nf_nat_net_ops = { 708static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +723,6 @@ static int __init nf_nat_init(void)
724 return ret; 723 return ret;
725 } 724 }
726 725
727 /* Leave them the same for the moment. */
728 nf_nat_htable_size = nf_conntrack_htable_size;
729
730 ret = register_pernet_subsys(&nf_nat_net_ops); 726 ret = register_pernet_subsys(&nf_nat_net_ops);
731 if (ret < 0) 727 if (ret < 0)
732 goto cleanup_extend; 728 goto cleanup_extend;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 9de4bd4c0dd..4d79e3c1616 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -30,6 +30,7 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/socket.h> 31#include <linux/socket.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/nsproxy.h>
33#include <linux/rculist_nulls.h> 34#include <linux/rculist_nulls.h>
34 35
35#include <net/netfilter/nf_conntrack.h> 36#include <net/netfilter/nf_conntrack.h>
@@ -84,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
84 return ((u64)h * size) >> 32; 85 return ((u64)h * size) >> 32;
85} 86}
86 87
87static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 88static inline u_int32_t hash_conntrack(const struct net *net,
89 const struct nf_conntrack_tuple *tuple)
88{ 90{
89 return __hash_conntrack(tuple, nf_conntrack_htable_size, 91 return __hash_conntrack(tuple, net->ct.htable_size,
90 nf_conntrack_hash_rnd); 92 nf_conntrack_hash_rnd);
91} 93}
92 94
@@ -294,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
294{ 296{
295 struct nf_conntrack_tuple_hash *h; 297 struct nf_conntrack_tuple_hash *h;
296 struct hlist_nulls_node *n; 298 struct hlist_nulls_node *n;
297 unsigned int hash = hash_conntrack(tuple); 299 unsigned int hash = hash_conntrack(net, tuple);
298 300
299 /* Disable BHs the entire time since we normally need to disable them 301 /* Disable BHs the entire time since we normally need to disable them
300 * at least once for the stats anyway. 302 * at least once for the stats anyway.
@@ -364,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
364 366
365void nf_conntrack_hash_insert(struct nf_conn *ct) 367void nf_conntrack_hash_insert(struct nf_conn *ct)
366{ 368{
369 struct net *net = nf_ct_net(ct);
367 unsigned int hash, repl_hash; 370 unsigned int hash, repl_hash;
368 371
369 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 372 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
370 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 373 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
371 374
372 __nf_conntrack_hash_insert(ct, hash, repl_hash); 375 __nf_conntrack_hash_insert(ct, hash, repl_hash);
373} 376}
@@ -395,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
395 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 398 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
396 return NF_ACCEPT; 399 return NF_ACCEPT;
397 400
398 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 401 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
399 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 402 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
400 403
401 /* We're not in hash table, and we refuse to set up related 404 /* We're not in hash table, and we refuse to set up related
402 connections for unconfirmed conns. But packet copies and 405 connections for unconfirmed conns. But packet copies and
@@ -466,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
466 struct net *net = nf_ct_net(ignored_conntrack); 469 struct net *net = nf_ct_net(ignored_conntrack);
467 struct nf_conntrack_tuple_hash *h; 470 struct nf_conntrack_tuple_hash *h;
468 struct hlist_nulls_node *n; 471 struct hlist_nulls_node *n;
469 unsigned int hash = hash_conntrack(tuple); 472 unsigned int hash = hash_conntrack(net, tuple);
470 473
471 /* Disable BHs the entire time since we need to disable them at 474 /* Disable BHs the entire time since we need to disable them at
472 * least once for the stats anyway. 475 * least once for the stats anyway.
@@ -501,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
501 int dropped = 0; 504 int dropped = 0;
502 505
503 rcu_read_lock(); 506 rcu_read_lock();
504 for (i = 0; i < nf_conntrack_htable_size; i++) { 507 for (i = 0; i < net->ct.htable_size; i++) {
505 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 508 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
506 hnnode) { 509 hnnode) {
507 tmp = nf_ct_tuplehash_to_ctrack(h); 510 tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -521,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
521 if (cnt >= NF_CT_EVICTION_RANGE) 524 if (cnt >= NF_CT_EVICTION_RANGE)
522 break; 525 break;
523 526
524 hash = (hash + 1) % nf_conntrack_htable_size; 527 hash = (hash + 1) % net->ct.htable_size;
525 } 528 }
526 rcu_read_unlock(); 529 rcu_read_unlock();
527 530
@@ -555,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
555 558
556 if (nf_conntrack_max && 559 if (nf_conntrack_max &&
557 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 560 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
558 unsigned int hash = hash_conntrack(orig); 561 unsigned int hash = hash_conntrack(net, orig);
559 if (!early_drop(net, hash)) { 562 if (!early_drop(net, hash)) {
560 atomic_dec(&net->ct.count); 563 atomic_dec(&net->ct.count);
561 if (net_ratelimit()) 564 if (net_ratelimit())
@@ -1012,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1012 struct hlist_nulls_node *n; 1015 struct hlist_nulls_node *n;
1013 1016
1014 spin_lock_bh(&nf_conntrack_lock); 1017 spin_lock_bh(&nf_conntrack_lock);
1015 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 1018 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1016 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1019 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1017 ct = nf_ct_tuplehash_to_ctrack(h); 1020 ct = nf_ct_tuplehash_to_ctrack(h);
1018 if (iter(ct, data)) 1021 if (iter(ct, data))
@@ -1130,7 +1133,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
1130 } 1133 }
1131 1134
1132 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1135 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1133 nf_conntrack_htable_size); 1136 net->ct.htable_size);
1134 nf_conntrack_ecache_fini(net); 1137 nf_conntrack_ecache_fini(net);
1135 nf_conntrack_acct_fini(net); 1138 nf_conntrack_acct_fini(net);
1136 nf_conntrack_expect_fini(net); 1139 nf_conntrack_expect_fini(net);
@@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1190{ 1193{
1191 int i, bucket, vmalloced, old_vmalloced; 1194 int i, bucket, vmalloced, old_vmalloced;
1192 unsigned int hashsize, old_size; 1195 unsigned int hashsize, old_size;
1193 int rnd;
1194 struct hlist_nulls_head *hash, *old_hash; 1196 struct hlist_nulls_head *hash, *old_hash;
1195 struct nf_conntrack_tuple_hash *h; 1197 struct nf_conntrack_tuple_hash *h;
1196 1198
1199 if (current->nsproxy->net_ns != &init_net)
1200 return -EOPNOTSUPP;
1201
1197 /* On boot, we can set this without any fancy locking. */ 1202 /* On boot, we can set this without any fancy locking. */
1198 if (!nf_conntrack_htable_size) 1203 if (!nf_conntrack_htable_size)
1199 return param_set_uint(val, kp); 1204 return param_set_uint(val, kp);
@@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1206 if (!hash) 1211 if (!hash)
1207 return -ENOMEM; 1212 return -ENOMEM;
1208 1213
1209 /* We have to rehahs for the new table anyway, so we also can
1210 * use a newrandom seed */
1211 get_random_bytes(&rnd, sizeof(rnd));
1212
1213 /* Lookups in the old hash might happen in parallel, which means we 1214 /* Lookups in the old hash might happen in parallel, which means we
1214 * might get false negatives during connection lookup. New connections 1215 * might get false negatives during connection lookup. New connections
1215 * created because of a false negative won't make it into the hash 1216 * created because of a false negative won't make it into the hash
1216 * though since that required taking the lock. 1217 * though since that required taking the lock.
1217 */ 1218 */
1218 spin_lock_bh(&nf_conntrack_lock); 1219 spin_lock_bh(&nf_conntrack_lock);
1219 for (i = 0; i < nf_conntrack_htable_size; i++) { 1220 for (i = 0; i < init_net.ct.htable_size; i++) {
1220 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1221 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1221 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1222 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1222 struct nf_conntrack_tuple_hash, hnnode); 1223 struct nf_conntrack_tuple_hash, hnnode);
1223 hlist_nulls_del_rcu(&h->hnnode); 1224 hlist_nulls_del_rcu(&h->hnnode);
1224 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1225 bucket = __hash_conntrack(&h->tuple, hashsize,
1226 nf_conntrack_hash_rnd);
1225 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1227 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1226 } 1228 }
1227 } 1229 }
1228 old_size = nf_conntrack_htable_size; 1230 old_size = init_net.ct.htable_size;
1229 old_vmalloced = init_net.ct.hash_vmalloc; 1231 old_vmalloced = init_net.ct.hash_vmalloc;
1230 old_hash = init_net.ct.hash; 1232 old_hash = init_net.ct.hash;
1231 1233
1232 nf_conntrack_htable_size = hashsize; 1234 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1233 init_net.ct.hash_vmalloc = vmalloced; 1235 init_net.ct.hash_vmalloc = vmalloced;
1234 init_net.ct.hash = hash; 1236 init_net.ct.hash = hash;
1235 nf_conntrack_hash_rnd = rnd;
1236 spin_unlock_bh(&nf_conntrack_lock); 1237 spin_unlock_bh(&nf_conntrack_lock);
1237 1238
1238 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1239 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1328,7 +1329,9 @@ static int nf_conntrack_init_net(struct net *net)
1328 ret = -ENOMEM; 1329 ret = -ENOMEM;
1329 goto err_cache; 1330 goto err_cache;
1330 } 1331 }
1331 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1332
1333 net->ct.htable_size = nf_conntrack_htable_size;
1334 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
1332 &net->ct.hash_vmalloc, 1); 1335 &net->ct.hash_vmalloc, 1);
1333 if (!net->ct.hash) { 1336 if (!net->ct.hash) {
1334 ret = -ENOMEM; 1337 ret = -ENOMEM;
@@ -1353,7 +1356,7 @@ err_acct:
1353 nf_conntrack_expect_fini(net); 1356 nf_conntrack_expect_fini(net);
1354err_expect: 1357err_expect:
1355 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1358 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1356 nf_conntrack_htable_size); 1359 net->ct.htable_size);
1357err_hash: 1360err_hash:
1358 kmem_cache_destroy(net->ct.nf_conntrack_cachep); 1361 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1359err_cache: 1362err_cache:
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 4ad7d1d809a..2f25ff61098 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net)
577 577
578 if (net_eq(net, &init_net)) { 578 if (net_eq(net, &init_net)) {
579 if (!nf_ct_expect_hsize) { 579 if (!nf_ct_expect_hsize) {
580 nf_ct_expect_hsize = nf_conntrack_htable_size / 256; 580 nf_ct_expect_hsize = net->ct.htable_size / 256;
581 if (!nf_ct_expect_hsize) 581 if (!nf_ct_expect_hsize)
582 nf_ct_expect_hsize = 1; 582 nf_ct_expect_hsize = 1;
583 } 583 }
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 65c2a7bc3af..4b1a56bd074 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
192 /* Get rid of expecteds, set helpers to NULL. */ 192 /* Get rid of expecteds, set helpers to NULL. */
193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
194 unhelp(h, me); 194 unhelp(h, me);
195 for (i = 0; i < nf_conntrack_htable_size; i++) { 195 for (i = 0; i < net->ct.htable_size; i++) {
196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
197 unhelp(h, me); 197 unhelp(h, me);
198 } 198 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 42f21c01a93..0ffe689dfe9 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
594 594
595 rcu_read_lock(); 595 rcu_read_lock();
596 last = (struct nf_conn *)cb->args[1]; 596 last = (struct nf_conn *)cb->args[1];
597 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 597 for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
598restart: 598restart:
599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
600 hnnode) { 600 hnnode) {
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 028aba667ef..e310f1561bb 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
51 struct hlist_nulls_node *n; 51 struct hlist_nulls_node *n;
52 52
53 for (st->bucket = 0; 53 for (st->bucket = 0;
54 st->bucket < nf_conntrack_htable_size; 54 st->bucket < net->ct.htable_size;
55 st->bucket++) { 55 st->bucket++) {
56 n = rcu_dereference(net->ct.hash[st->bucket].first); 56 n = rcu_dereference(net->ct.hash[st->bucket].first);
57 if (!is_a_nulls(n)) 57 if (!is_a_nulls(n))
@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
69 head = rcu_dereference(head->next); 69 head = rcu_dereference(head->next);
70 while (is_a_nulls(head)) { 70 while (is_a_nulls(head)) {
71 if (likely(get_nulls_value(head) == st->bucket)) { 71 if (likely(get_nulls_value(head) == st->bucket)) {
72 if (++st->bucket >= nf_conntrack_htable_size) 72 if (++st->bucket >= net->ct.htable_size)
73 return NULL; 73 return NULL;
74 } 74 }
75 head = rcu_dereference(net->ct.hash[st->bucket].first); 75 head = rcu_dereference(net->ct.hash[st->bucket].first);
@@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = {
355 }, 355 },
356 { 356 {
357 .procname = "nf_conntrack_buckets", 357 .procname = "nf_conntrack_buckets",
358 .data = &nf_conntrack_htable_size, 358 .data = &init_net.ct.htable_size,
359 .maxlen = sizeof(unsigned int), 359 .maxlen = sizeof(unsigned int),
360 .mode = 0444, 360 .mode = 0444,
361 .proc_handler = proc_dointvec, 361 .proc_handler = proc_dointvec,
@@ -421,6 +421,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
421 goto out_kmemdup; 421 goto out_kmemdup;
422 422
423 table[1].data = &net->ct.count; 423 table[1].data = &net->ct.count;
424 table[2].data = &net->ct.htable_size;
424 table[3].data = &net->ct.sysctl_checksum; 425 table[3].data = &net->ct.sysctl_checksum;
425 table[4].data = &net->ct.sysctl_log_invalid; 426 table[4].data = &net->ct.sysctl_log_invalid;
426 427