aboutsummaryrefslogtreecommitdiffstats
path: root/net/netfilter/nf_conntrack_core.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-03-27 01:45:23 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-27 01:45:23 -0400
commit01e6de64d9c8d0e75dca3bb4cf898db73abe00d4 (patch)
tree925982e6241e5ac47f268bc2c2942ab0f06775cd /net/netfilter/nf_conntrack_core.c
parent8f1ead2d1a626ed0c85b3d2c2046a49081d5933f (diff)
parentd271e8bd8c60ce059ee36d836ba063cfc61c3e21 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6
Diffstat (limited to 'net/netfilter/nf_conntrack_core.c')
-rw-r--r--net/netfilter/nf_conntrack_core.c129
1 files changed, 76 insertions, 53 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index dfb447b584d..8020db6274b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -29,6 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/socket.h> 30#include <linux/socket.h>
31#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/rculist_nulls.h>
32 33
33#include <net/netfilter/nf_conntrack.h> 34#include <net/netfilter/nf_conntrack.h>
34#include <net/netfilter/nf_conntrack_l3proto.h> 35#include <net/netfilter/nf_conntrack_l3proto.h>
@@ -163,8 +164,8 @@ static void
163clean_from_lists(struct nf_conn *ct) 164clean_from_lists(struct nf_conn *ct)
164{ 165{
165 pr_debug("clean_from_lists(%p)\n", ct); 166 pr_debug("clean_from_lists(%p)\n", ct);
166 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 167 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
167 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode); 168 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
168 169
169 /* Destroy all pending expectations */ 170 /* Destroy all pending expectations */
170 nf_ct_remove_expectations(ct); 171 nf_ct_remove_expectations(ct);
@@ -204,8 +205,8 @@ destroy_conntrack(struct nf_conntrack *nfct)
204 205
205 /* We overload first tuple to link into unconfirmed list. */ 206 /* We overload first tuple to link into unconfirmed list. */
206 if (!nf_ct_is_confirmed(ct)) { 207 if (!nf_ct_is_confirmed(ct)) {
207 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode)); 208 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
208 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 209 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
209 } 210 }
210 211
211 NF_CT_STAT_INC(net, delete); 212 NF_CT_STAT_INC(net, delete);
@@ -242,18 +243,26 @@ static void death_by_timeout(unsigned long ul_conntrack)
242 nf_ct_put(ct); 243 nf_ct_put(ct);
243} 244}
244 245
246/*
247 * Warning :
248 * - Caller must take a reference on returned object
249 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
250 * OR
251 * - Caller must lock nf_conntrack_lock before calling this function
252 */
245struct nf_conntrack_tuple_hash * 253struct nf_conntrack_tuple_hash *
246__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) 254__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
247{ 255{
248 struct nf_conntrack_tuple_hash *h; 256 struct nf_conntrack_tuple_hash *h;
249 struct hlist_node *n; 257 struct hlist_nulls_node *n;
250 unsigned int hash = hash_conntrack(tuple); 258 unsigned int hash = hash_conntrack(tuple);
251 259
252 /* Disable BHs the entire time since we normally need to disable them 260 /* Disable BHs the entire time since we normally need to disable them
253 * at least once for the stats anyway. 261 * at least once for the stats anyway.
254 */ 262 */
255 local_bh_disable(); 263 local_bh_disable();
256 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) { 264begin:
265 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
257 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 266 if (nf_ct_tuple_equal(tuple, &h->tuple)) {
258 NF_CT_STAT_INC(net, found); 267 NF_CT_STAT_INC(net, found);
259 local_bh_enable(); 268 local_bh_enable();
@@ -261,6 +270,13 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
261 } 270 }
262 NF_CT_STAT_INC(net, searched); 271 NF_CT_STAT_INC(net, searched);
263 } 272 }
273 /*
274 * if the nulls value we got at the end of this lookup is
275 * not the expected one, we must restart lookup.
276 * We probably met an item that was moved to another chain.
277 */
278 if (get_nulls_value(n) != hash)
279 goto begin;
264 local_bh_enable(); 280 local_bh_enable();
265 281
266 return NULL; 282 return NULL;
@@ -275,11 +291,18 @@ nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
275 struct nf_conn *ct; 291 struct nf_conn *ct;
276 292
277 rcu_read_lock(); 293 rcu_read_lock();
294begin:
278 h = __nf_conntrack_find(net, tuple); 295 h = __nf_conntrack_find(net, tuple);
279 if (h) { 296 if (h) {
280 ct = nf_ct_tuplehash_to_ctrack(h); 297 ct = nf_ct_tuplehash_to_ctrack(h);
281 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) 298 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
282 h = NULL; 299 h = NULL;
300 else {
301 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
302 nf_ct_put(ct);
303 goto begin;
304 }
305 }
283 } 306 }
284 rcu_read_unlock(); 307 rcu_read_unlock();
285 308
@@ -293,9 +316,9 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
293{ 316{
294 struct net *net = nf_ct_net(ct); 317 struct net *net = nf_ct_net(ct);
295 318
296 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 319 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
297 &net->ct.hash[hash]); 320 &net->ct.hash[hash]);
298 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, 321 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
299 &net->ct.hash[repl_hash]); 322 &net->ct.hash[repl_hash]);
300} 323}
301 324
@@ -318,7 +341,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
318 struct nf_conntrack_tuple_hash *h; 341 struct nf_conntrack_tuple_hash *h;
319 struct nf_conn *ct; 342 struct nf_conn *ct;
320 struct nf_conn_help *help; 343 struct nf_conn_help *help;
321 struct hlist_node *n; 344 struct hlist_nulls_node *n;
322 enum ip_conntrack_info ctinfo; 345 enum ip_conntrack_info ctinfo;
323 struct net *net; 346 struct net *net;
324 347
@@ -350,17 +373,17 @@ __nf_conntrack_confirm(struct sk_buff *skb)
350 /* See if there's one in the list already, including reverse: 373 /* See if there's one in the list already, including reverse:
351 NAT could have grabbed it without realizing, since we're 374 NAT could have grabbed it without realizing, since we're
352 not in the hash. If there is, we lost race. */ 375 not in the hash. If there is, we lost race. */
353 hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode) 376 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
354 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 377 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
355 &h->tuple)) 378 &h->tuple))
356 goto out; 379 goto out;
357 hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode) 380 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
358 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 381 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
359 &h->tuple)) 382 &h->tuple))
360 goto out; 383 goto out;
361 384
362 /* Remove from unconfirmed list */ 385 /* Remove from unconfirmed list */
363 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); 386 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
364 387
365 __nf_conntrack_hash_insert(ct, hash, repl_hash); 388 __nf_conntrack_hash_insert(ct, hash, repl_hash);
366 /* Timer relative to confirmation time, not original 389 /* Timer relative to confirmation time, not original
@@ -399,14 +422,14 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
399{ 422{
400 struct net *net = nf_ct_net(ignored_conntrack); 423 struct net *net = nf_ct_net(ignored_conntrack);
401 struct nf_conntrack_tuple_hash *h; 424 struct nf_conntrack_tuple_hash *h;
402 struct hlist_node *n; 425 struct hlist_nulls_node *n;
403 unsigned int hash = hash_conntrack(tuple); 426 unsigned int hash = hash_conntrack(tuple);
404 427
405 /* Disable BHs the entire time since we need to disable them at 428 /* Disable BHs the entire time since we need to disable them at
406 * least once for the stats anyway. 429 * least once for the stats anyway.
407 */ 430 */
408 rcu_read_lock_bh(); 431 rcu_read_lock_bh();
409 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) { 432 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
410 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 433 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
411 nf_ct_tuple_equal(tuple, &h->tuple)) { 434 nf_ct_tuple_equal(tuple, &h->tuple)) {
412 NF_CT_STAT_INC(net, found); 435 NF_CT_STAT_INC(net, found);
@@ -430,14 +453,14 @@ static noinline int early_drop(struct net *net, unsigned int hash)
430 /* Use oldest entry, which is roughly LRU */ 453 /* Use oldest entry, which is roughly LRU */
431 struct nf_conntrack_tuple_hash *h; 454 struct nf_conntrack_tuple_hash *h;
432 struct nf_conn *ct = NULL, *tmp; 455 struct nf_conn *ct = NULL, *tmp;
433 struct hlist_node *n; 456 struct hlist_nulls_node *n;
434 unsigned int i, cnt = 0; 457 unsigned int i, cnt = 0;
435 int dropped = 0; 458 int dropped = 0;
436 459
437 rcu_read_lock(); 460 rcu_read_lock();
438 for (i = 0; i < nf_conntrack_htable_size; i++) { 461 for (i = 0; i < nf_conntrack_htable_size; i++) {
439 hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], 462 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
440 hnode) { 463 hnnode) {
441 tmp = nf_ct_tuplehash_to_ctrack(h); 464 tmp = nf_ct_tuplehash_to_ctrack(h);
442 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) 465 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
443 ct = tmp; 466 ct = tmp;
@@ -508,27 +531,19 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
508#ifdef CONFIG_NET_NS 531#ifdef CONFIG_NET_NS
509 ct->ct_net = net; 532 ct->ct_net = net;
510#endif 533#endif
511 INIT_RCU_HEAD(&ct->rcu);
512 534
513 return ct; 535 return ct;
514} 536}
515EXPORT_SYMBOL_GPL(nf_conntrack_alloc); 537EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
516 538
517static void nf_conntrack_free_rcu(struct rcu_head *head)
518{
519 struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
520
521 nf_ct_ext_free(ct);
522 kmem_cache_free(nf_conntrack_cachep, ct);
523}
524
525void nf_conntrack_free(struct nf_conn *ct) 539void nf_conntrack_free(struct nf_conn *ct)
526{ 540{
527 struct net *net = nf_ct_net(ct); 541 struct net *net = nf_ct_net(ct);
528 542
529 nf_ct_ext_destroy(ct); 543 nf_ct_ext_destroy(ct);
530 atomic_dec(&net->ct.count); 544 atomic_dec(&net->ct.count);
531 call_rcu(&ct->rcu, nf_conntrack_free_rcu); 545 nf_ct_ext_free(ct);
546 kmem_cache_free(nf_conntrack_cachep, ct);
532} 547}
533EXPORT_SYMBOL_GPL(nf_conntrack_free); 548EXPORT_SYMBOL_GPL(nf_conntrack_free);
534 549
@@ -594,7 +609,7 @@ init_conntrack(struct net *net,
594 } 609 }
595 610
596 /* Overload tuple linked list to put us in unconfirmed list. */ 611 /* Overload tuple linked list to put us in unconfirmed list. */
597 hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, 612 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
598 &net->ct.unconfirmed); 613 &net->ct.unconfirmed);
599 614
600 spin_unlock_bh(&nf_conntrack_lock); 615 spin_unlock_bh(&nf_conntrack_lock);
@@ -906,6 +921,12 @@ int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
906 return 0; 921 return 0;
907} 922}
908EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); 923EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
924
925int nf_ct_port_nlattr_tuple_size(void)
926{
927 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
928}
929EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
909#endif 930#endif
910 931
911/* Used by ipt_REJECT and ip6t_REJECT. */ 932/* Used by ipt_REJECT and ip6t_REJECT. */
@@ -934,17 +955,17 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
934{ 955{
935 struct nf_conntrack_tuple_hash *h; 956 struct nf_conntrack_tuple_hash *h;
936 struct nf_conn *ct; 957 struct nf_conn *ct;
937 struct hlist_node *n; 958 struct hlist_nulls_node *n;
938 959
939 spin_lock_bh(&nf_conntrack_lock); 960 spin_lock_bh(&nf_conntrack_lock);
940 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 961 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
941 hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) { 962 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
942 ct = nf_ct_tuplehash_to_ctrack(h); 963 ct = nf_ct_tuplehash_to_ctrack(h);
943 if (iter(ct, data)) 964 if (iter(ct, data))
944 goto found; 965 goto found;
945 } 966 }
946 } 967 }
947 hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) { 968 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
948 ct = nf_ct_tuplehash_to_ctrack(h); 969 ct = nf_ct_tuplehash_to_ctrack(h);
949 if (iter(ct, data)) 970 if (iter(ct, data))
950 set_bit(IPS_DYING_BIT, &ct->status); 971 set_bit(IPS_DYING_BIT, &ct->status);
@@ -992,7 +1013,7 @@ static int kill_all(struct nf_conn *i, void *data)
992 return 1; 1013 return 1;
993} 1014}
994 1015
995void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size) 1016void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
996{ 1017{
997 if (vmalloced) 1018 if (vmalloced)
998 vfree(hash); 1019 vfree(hash);
@@ -1060,26 +1081,28 @@ void nf_conntrack_cleanup(struct net *net)
1060 } 1081 }
1061} 1082}
1062 1083
1063struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced) 1084void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1064{ 1085{
1065 struct hlist_head *hash; 1086 struct hlist_nulls_head *hash;
1066 unsigned int size, i; 1087 unsigned int nr_slots, i;
1088 size_t sz;
1067 1089
1068 *vmalloced = 0; 1090 *vmalloced = 0;
1069 1091
1070 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head)); 1092 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1071 hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN, 1093 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1072 get_order(sizeof(struct hlist_head) 1094 sz = nr_slots * sizeof(struct hlist_nulls_head);
1073 * size)); 1095 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1096 get_order(sz));
1074 if (!hash) { 1097 if (!hash) {
1075 *vmalloced = 1; 1098 *vmalloced = 1;
1076 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1099 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1077 hash = vmalloc(sizeof(struct hlist_head) * size); 1100 hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
1078 } 1101 }
1079 1102
1080 if (hash) 1103 if (hash && nulls)
1081 for (i = 0; i < size; i++) 1104 for (i = 0; i < nr_slots; i++)
1082 INIT_HLIST_HEAD(&hash[i]); 1105 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1083 1106
1084 return hash; 1107 return hash;
1085} 1108}
@@ -1090,7 +1113,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1090 int i, bucket, vmalloced, old_vmalloced; 1113 int i, bucket, vmalloced, old_vmalloced;
1091 unsigned int hashsize, old_size; 1114 unsigned int hashsize, old_size;
1092 int rnd; 1115 int rnd;
1093 struct hlist_head *hash, *old_hash; 1116 struct hlist_nulls_head *hash, *old_hash;
1094 struct nf_conntrack_tuple_hash *h; 1117 struct nf_conntrack_tuple_hash *h;
1095 1118
1096 /* On boot, we can set this without any fancy locking. */ 1119 /* On boot, we can set this without any fancy locking. */
@@ -1101,7 +1124,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1101 if (!hashsize) 1124 if (!hashsize)
1102 return -EINVAL; 1125 return -EINVAL;
1103 1126
1104 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced); 1127 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
1105 if (!hash) 1128 if (!hash)
1106 return -ENOMEM; 1129 return -ENOMEM;
1107 1130
@@ -1116,12 +1139,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1116 */ 1139 */
1117 spin_lock_bh(&nf_conntrack_lock); 1140 spin_lock_bh(&nf_conntrack_lock);
1118 for (i = 0; i < nf_conntrack_htable_size; i++) { 1141 for (i = 0; i < nf_conntrack_htable_size; i++) {
1119 while (!hlist_empty(&init_net.ct.hash[i])) { 1142 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1120 h = hlist_entry(init_net.ct.hash[i].first, 1143 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1121 struct nf_conntrack_tuple_hash, hnode); 1144 struct nf_conntrack_tuple_hash, hnnode);
1122 hlist_del_rcu(&h->hnode); 1145 hlist_nulls_del_rcu(&h->hnnode);
1123 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1146 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1124 hlist_add_head(&h->hnode, &hash[bucket]); 1147 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1125 } 1148 }
1126 } 1149 }
1127 old_size = nf_conntrack_htable_size; 1150 old_size = nf_conntrack_htable_size;
@@ -1172,7 +1195,7 @@ static int nf_conntrack_init_init_net(void)
1172 1195
1173 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1196 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1174 sizeof(struct nf_conn), 1197 sizeof(struct nf_conn),
1175 0, 0, NULL); 1198 0, SLAB_DESTROY_BY_RCU, NULL);
1176 if (!nf_conntrack_cachep) { 1199 if (!nf_conntrack_cachep) {
1177 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1200 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1178 ret = -ENOMEM; 1201 ret = -ENOMEM;
@@ -1202,7 +1225,7 @@ static int nf_conntrack_init_net(struct net *net)
1202 int ret; 1225 int ret;
1203 1226
1204 atomic_set(&net->ct.count, 0); 1227 atomic_set(&net->ct.count, 0);
1205 INIT_HLIST_HEAD(&net->ct.unconfirmed); 1228 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
1206 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1229 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1207 if (!net->ct.stat) { 1230 if (!net->ct.stat) {
1208 ret = -ENOMEM; 1231 ret = -ENOMEM;
@@ -1212,7 +1235,7 @@ static int nf_conntrack_init_net(struct net *net)
1212 if (ret < 0) 1235 if (ret < 0)
1213 goto err_ecache; 1236 goto err_ecache;
1214 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1237 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1215 &net->ct.hash_vmalloc); 1238 &net->ct.hash_vmalloc, 1);
1216 if (!net->ct.hash) { 1239 if (!net->ct.hash) {
1217 ret = -ENOMEM; 1240 ret = -ENOMEM;
1218 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); 1241 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");