aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-07-08 01:28:14 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-11 01:17:40 -0400
commitf205c5e0c28aa7e0fb6eaaa66e97928f9d9e6994 (patch)
tree7ad04d827c5c2d24ac804346d95853ebceab0bfd
parent8e5105a0c36a059dfd0f0bb9e73ee7c97d306247 (diff)
[NETFILTER]: nf_conntrack: use hlists for conntrack hash
Convert conntrack hash to hlists to reduce its size and cache footprint. Since the default hashsize to max. entries ratio sucks (1:16), this patch doesn't reduce the amount of memory used for the hash by default, but instead uses a better ratio of 1:8, which results in the same max. entries value. One thing worth noting is early_drop. It really should use LRU, so it now has to iterate over the entire chain to find the last unconfirmed entry. Since chains shouldn't be very long and the entire operation is very rare this shouldn't be a problem. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/netfilter/nf_conntrack_core.h4
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h3
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c17
-rw-r--r--net/netfilter/nf_conntrack_core.c100
-rw-r--r--net/netfilter/nf_conntrack_helper.c5
-rw-r--r--net/netfilter/nf_conntrack_netlink.c6
-rw-r--r--net/netfilter/nf_conntrack_standalone.c17
7 files changed, 83 insertions, 69 deletions
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 3bf7d05ea64d..6351948654b3 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -84,9 +84,9 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
84 struct nf_conntrack_l3proto *l3proto, 84 struct nf_conntrack_l3proto *l3proto,
85 struct nf_conntrack_l4proto *proto); 85 struct nf_conntrack_l4proto *proto);
86 86
87extern struct list_head *nf_conntrack_hash; 87extern struct hlist_head *nf_conntrack_hash;
88extern struct list_head nf_conntrack_expect_list; 88extern struct list_head nf_conntrack_expect_list;
89extern rwlock_t nf_conntrack_lock ; 89extern rwlock_t nf_conntrack_lock ;
90extern struct list_head unconfirmed; 90extern struct hlist_head unconfirmed;
91 91
92#endif /* _NF_CONNTRACK_CORE_H */ 92#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 5d72b16e876f..d02ce876b4ca 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -125,8 +125,7 @@ DEBUGP("tuple %p: %u %u " NIP6_FMT " %hu -> " NIP6_FMT " %hu\n", \
125/* Connections have two entries in the hash table: one for each way */ 125/* Connections have two entries in the hash table: one for each way */
126struct nf_conntrack_tuple_hash 126struct nf_conntrack_tuple_hash
127{ 127{
128 struct list_head list; 128 struct hlist_node hnode;
129
130 struct nf_conntrack_tuple tuple; 129 struct nf_conntrack_tuple tuple;
131}; 130};
132 131
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 89f933e81035..888f27fd884f 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -41,35 +41,36 @@ struct ct_iter_state {
41 unsigned int bucket; 41 unsigned int bucket;
42}; 42};
43 43
44static struct list_head *ct_get_first(struct seq_file *seq) 44static struct hlist_node *ct_get_first(struct seq_file *seq)
45{ 45{
46 struct ct_iter_state *st = seq->private; 46 struct ct_iter_state *st = seq->private;
47 47
48 for (st->bucket = 0; 48 for (st->bucket = 0;
49 st->bucket < nf_conntrack_htable_size; 49 st->bucket < nf_conntrack_htable_size;
50 st->bucket++) { 50 st->bucket++) {
51 if (!list_empty(&nf_conntrack_hash[st->bucket])) 51 if (!hlist_empty(&nf_conntrack_hash[st->bucket]))
52 return nf_conntrack_hash[st->bucket].next; 52 return nf_conntrack_hash[st->bucket].first;
53 } 53 }
54 return NULL; 54 return NULL;
55} 55}
56 56
57static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head) 57static struct hlist_node *ct_get_next(struct seq_file *seq,
58 struct hlist_node *head)
58{ 59{
59 struct ct_iter_state *st = seq->private; 60 struct ct_iter_state *st = seq->private;
60 61
61 head = head->next; 62 head = head->next;
62 while (head == &nf_conntrack_hash[st->bucket]) { 63 while (head == NULL) {
63 if (++st->bucket >= nf_conntrack_htable_size) 64 if (++st->bucket >= nf_conntrack_htable_size)
64 return NULL; 65 return NULL;
65 head = nf_conntrack_hash[st->bucket].next; 66 head = nf_conntrack_hash[st->bucket].first;
66 } 67 }
67 return head; 68 return head;
68} 69}
69 70
70static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos) 71static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
71{ 72{
72 struct list_head *head = ct_get_first(seq); 73 struct hlist_node *head = ct_get_first(seq);
73 74
74 if (head) 75 if (head)
75 while (pos && (head = ct_get_next(seq, head))) 76 while (pos && (head = ct_get_next(seq, head)))
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 54acac5c6ea7..992d0ef31fa3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -59,14 +59,14 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
59int nf_conntrack_max __read_mostly; 59int nf_conntrack_max __read_mostly;
60EXPORT_SYMBOL_GPL(nf_conntrack_max); 60EXPORT_SYMBOL_GPL(nf_conntrack_max);
61 61
62struct list_head *nf_conntrack_hash __read_mostly; 62struct hlist_head *nf_conntrack_hash __read_mostly;
63EXPORT_SYMBOL_GPL(nf_conntrack_hash); 63EXPORT_SYMBOL_GPL(nf_conntrack_hash);
64 64
65struct nf_conn nf_conntrack_untracked __read_mostly; 65struct nf_conn nf_conntrack_untracked __read_mostly;
66EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 66EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
67 67
68unsigned int nf_ct_log_invalid __read_mostly; 68unsigned int nf_ct_log_invalid __read_mostly;
69LIST_HEAD(unconfirmed); 69HLIST_HEAD(unconfirmed);
70static int nf_conntrack_vmalloc __read_mostly; 70static int nf_conntrack_vmalloc __read_mostly;
71static struct kmem_cache *nf_conntrack_cachep __read_mostly; 71static struct kmem_cache *nf_conntrack_cachep __read_mostly;
72static unsigned int nf_conntrack_next_id; 72static unsigned int nf_conntrack_next_id;
@@ -142,8 +142,8 @@ static void
142clean_from_lists(struct nf_conn *ct) 142clean_from_lists(struct nf_conn *ct)
143{ 143{
144 DEBUGP("clean_from_lists(%p)\n", ct); 144 DEBUGP("clean_from_lists(%p)\n", ct);
145 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 145 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
146 list_del(&ct->tuplehash[IP_CT_DIR_REPLY].list); 146 hlist_del(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
147 147
148 /* Destroy all pending expectations */ 148 /* Destroy all pending expectations */
149 nf_ct_remove_expectations(ct); 149 nf_ct_remove_expectations(ct);
@@ -184,8 +184,8 @@ destroy_conntrack(struct nf_conntrack *nfct)
184 184
185 /* We overload first tuple to link into unconfirmed list. */ 185 /* We overload first tuple to link into unconfirmed list. */
186 if (!nf_ct_is_confirmed(ct)) { 186 if (!nf_ct_is_confirmed(ct)) {
187 BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list)); 187 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
188 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 188 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
189 } 189 }
190 190
191 NF_CT_STAT_INC(delete); 191 NF_CT_STAT_INC(delete);
@@ -226,9 +226,10 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
226 const struct nf_conn *ignored_conntrack) 226 const struct nf_conn *ignored_conntrack)
227{ 227{
228 struct nf_conntrack_tuple_hash *h; 228 struct nf_conntrack_tuple_hash *h;
229 struct hlist_node *n;
229 unsigned int hash = hash_conntrack(tuple); 230 unsigned int hash = hash_conntrack(tuple);
230 231
231 list_for_each_entry(h, &nf_conntrack_hash[hash], list) { 232 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
232 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 233 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
233 nf_ct_tuple_equal(tuple, &h->tuple)) { 234 nf_ct_tuple_equal(tuple, &h->tuple)) {
234 NF_CT_STAT_INC(found); 235 NF_CT_STAT_INC(found);
@@ -263,10 +264,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
263 unsigned int repl_hash) 264 unsigned int repl_hash)
264{ 265{
265 ct->id = ++nf_conntrack_next_id; 266 ct->id = ++nf_conntrack_next_id;
266 list_add(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list, 267 hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
267 &nf_conntrack_hash[hash]); 268 &nf_conntrack_hash[hash]);
268 list_add(&ct->tuplehash[IP_CT_DIR_REPLY].list, 269 hlist_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
269 &nf_conntrack_hash[repl_hash]); 270 &nf_conntrack_hash[repl_hash]);
270} 271}
271 272
272void nf_conntrack_hash_insert(struct nf_conn *ct) 273void nf_conntrack_hash_insert(struct nf_conn *ct)
@@ -290,6 +291,7 @@ __nf_conntrack_confirm(struct sk_buff **pskb)
290 struct nf_conntrack_tuple_hash *h; 291 struct nf_conntrack_tuple_hash *h;
291 struct nf_conn *ct; 292 struct nf_conn *ct;
292 struct nf_conn_help *help; 293 struct nf_conn_help *help;
294 struct hlist_node *n;
293 enum ip_conntrack_info ctinfo; 295 enum ip_conntrack_info ctinfo;
294 296
295 ct = nf_ct_get(*pskb, &ctinfo); 297 ct = nf_ct_get(*pskb, &ctinfo);
@@ -319,17 +321,17 @@ __nf_conntrack_confirm(struct sk_buff **pskb)
319 /* See if there's one in the list already, including reverse: 321 /* See if there's one in the list already, including reverse:
320 NAT could have grabbed it without realizing, since we're 322 NAT could have grabbed it without realizing, since we're
321 not in the hash. If there is, we lost race. */ 323 not in the hash. If there is, we lost race. */
322 list_for_each_entry(h, &nf_conntrack_hash[hash], list) 324 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode)
323 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 325 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
324 &h->tuple)) 326 &h->tuple))
325 goto out; 327 goto out;
326 list_for_each_entry(h, &nf_conntrack_hash[repl_hash], list) 328 hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode)
327 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, 329 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
328 &h->tuple)) 330 &h->tuple))
329 goto out; 331 goto out;
330 332
331 /* Remove from unconfirmed list */ 333 /* Remove from unconfirmed list */
332 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list); 334 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
333 335
334 __nf_conntrack_hash_insert(ct, hash, repl_hash); 336 __nf_conntrack_hash_insert(ct, hash, repl_hash);
335 /* Timer relative to confirmation time, not original 337 /* Timer relative to confirmation time, not original
@@ -378,22 +380,22 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
378 380
379/* There's a small race here where we may free a just-assured 381/* There's a small race here where we may free a just-assured
380 connection. Too bad: we're in trouble anyway. */ 382 connection. Too bad: we're in trouble anyway. */
381static int early_drop(struct list_head *chain) 383static int early_drop(struct hlist_head *chain)
382{ 384{
383 /* Traverse backwards: gives us oldest, which is roughly LRU */ 385 /* Use oldest entry, which is roughly LRU */
384 struct nf_conntrack_tuple_hash *h; 386 struct nf_conntrack_tuple_hash *h;
385 struct nf_conn *ct = NULL, *tmp; 387 struct nf_conn *ct = NULL, *tmp;
388 struct hlist_node *n;
386 int dropped = 0; 389 int dropped = 0;
387 390
388 read_lock_bh(&nf_conntrack_lock); 391 read_lock_bh(&nf_conntrack_lock);
389 list_for_each_entry_reverse(h, chain, list) { 392 hlist_for_each_entry(h, n, chain, hnode) {
390 tmp = nf_ct_tuplehash_to_ctrack(h); 393 tmp = nf_ct_tuplehash_to_ctrack(h);
391 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) { 394 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
392 ct = tmp; 395 ct = tmp;
393 atomic_inc(&ct->ct_general.use);
394 break;
395 }
396 } 396 }
397 if (ct)
398 atomic_inc(&ct->ct_general.use);
397 read_unlock_bh(&nf_conntrack_lock); 399 read_unlock_bh(&nf_conntrack_lock);
398 400
399 if (!ct) 401 if (!ct)
@@ -535,7 +537,8 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
535 } 537 }
536 538
537 /* Overload tuple linked list to put us in unconfirmed list. */ 539 /* Overload tuple linked list to put us in unconfirmed list. */
538 list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed); 540 hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
541 &unconfirmed);
539 542
540 write_unlock_bh(&nf_conntrack_lock); 543 write_unlock_bh(&nf_conntrack_lock);
541 544
@@ -873,16 +876,17 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
873{ 876{
874 struct nf_conntrack_tuple_hash *h; 877 struct nf_conntrack_tuple_hash *h;
875 struct nf_conn *ct; 878 struct nf_conn *ct;
879 struct hlist_node *n;
876 880
877 write_lock_bh(&nf_conntrack_lock); 881 write_lock_bh(&nf_conntrack_lock);
878 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 882 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
879 list_for_each_entry(h, &nf_conntrack_hash[*bucket], list) { 883 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) {
880 ct = nf_ct_tuplehash_to_ctrack(h); 884 ct = nf_ct_tuplehash_to_ctrack(h);
881 if (iter(ct, data)) 885 if (iter(ct, data))
882 goto found; 886 goto found;
883 } 887 }
884 } 888 }
885 list_for_each_entry(h, &unconfirmed, list) { 889 hlist_for_each_entry(h, n, &unconfirmed, hnode) {
886 ct = nf_ct_tuplehash_to_ctrack(h); 890 ct = nf_ct_tuplehash_to_ctrack(h);
887 if (iter(ct, data)) 891 if (iter(ct, data))
888 set_bit(IPS_DYING_BIT, &ct->status); 892 set_bit(IPS_DYING_BIT, &ct->status);
@@ -917,13 +921,14 @@ static int kill_all(struct nf_conn *i, void *data)
917 return 1; 921 return 1;
918} 922}
919 923
920static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size) 924static void free_conntrack_hash(struct hlist_head *hash, int vmalloced,
925 int size)
921{ 926{
922 if (vmalloced) 927 if (vmalloced)
923 vfree(hash); 928 vfree(hash);
924 else 929 else
925 free_pages((unsigned long)hash, 930 free_pages((unsigned long)hash,
926 get_order(sizeof(struct list_head) * size)); 931 get_order(sizeof(struct hlist_head) * size));
927} 932}
928 933
929void nf_conntrack_flush(void) 934void nf_conntrack_flush(void)
@@ -965,26 +970,26 @@ void nf_conntrack_cleanup(void)
965 nf_conntrack_helper_fini(); 970 nf_conntrack_helper_fini();
966} 971}
967 972
968static struct list_head *alloc_hashtable(int *sizep, int *vmalloced) 973static struct hlist_head *alloc_hashtable(int *sizep, int *vmalloced)
969{ 974{
970 struct list_head *hash; 975 struct hlist_head *hash;
971 unsigned int size, i; 976 unsigned int size, i;
972 977
973 *vmalloced = 0; 978 *vmalloced = 0;
974 979
975 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct list_head)); 980 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
976 hash = (void*)__get_free_pages(GFP_KERNEL, 981 hash = (void*)__get_free_pages(GFP_KERNEL,
977 get_order(sizeof(struct list_head) 982 get_order(sizeof(struct hlist_head)
978 * size)); 983 * size));
979 if (!hash) { 984 if (!hash) {
980 *vmalloced = 1; 985 *vmalloced = 1;
981 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 986 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
982 hash = vmalloc(sizeof(struct list_head) * size); 987 hash = vmalloc(sizeof(struct hlist_head) * size);
983 } 988 }
984 989
985 if (hash) 990 if (hash)
986 for (i = 0; i < size; i++) 991 for (i = 0; i < size; i++)
987 INIT_LIST_HEAD(&hash[i]); 992 INIT_HLIST_HEAD(&hash[i]);
988 993
989 return hash; 994 return hash;
990} 995}
@@ -994,7 +999,7 @@ int set_hashsize(const char *val, struct kernel_param *kp)
994 int i, bucket, hashsize, vmalloced; 999 int i, bucket, hashsize, vmalloced;
995 int old_vmalloced, old_size; 1000 int old_vmalloced, old_size;
996 int rnd; 1001 int rnd;
997 struct list_head *hash, *old_hash; 1002 struct hlist_head *hash, *old_hash;
998 struct nf_conntrack_tuple_hash *h; 1003 struct nf_conntrack_tuple_hash *h;
999 1004
1000 /* On boot, we can set this without any fancy locking. */ 1005 /* On boot, we can set this without any fancy locking. */
@@ -1015,12 +1020,12 @@ int set_hashsize(const char *val, struct kernel_param *kp)
1015 1020
1016 write_lock_bh(&nf_conntrack_lock); 1021 write_lock_bh(&nf_conntrack_lock);
1017 for (i = 0; i < nf_conntrack_htable_size; i++) { 1022 for (i = 0; i < nf_conntrack_htable_size; i++) {
1018 while (!list_empty(&nf_conntrack_hash[i])) { 1023 while (!hlist_empty(&nf_conntrack_hash[i])) {
1019 h = list_entry(nf_conntrack_hash[i].next, 1024 h = hlist_entry(nf_conntrack_hash[i].first,
1020 struct nf_conntrack_tuple_hash, list); 1025 struct nf_conntrack_tuple_hash, hnode);
1021 list_del(&h->list); 1026 hlist_del(&h->hnode);
1022 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1027 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1023 list_add_tail(&h->list, &hash[bucket]); 1028 hlist_add_head(&h->hnode, &hash[bucket]);
1024 } 1029 }
1025 } 1030 }
1026 old_size = nf_conntrack_htable_size; 1031 old_size = nf_conntrack_htable_size;
@@ -1042,18 +1047,25 @@ module_param_call(hashsize, set_hashsize, param_get_uint,
1042 1047
1043int __init nf_conntrack_init(void) 1048int __init nf_conntrack_init(void)
1044{ 1049{
1050 int max_factor = 8;
1045 int ret; 1051 int ret;
1046 1052
1047 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1053 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1048 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ 1054 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1049 if (!nf_conntrack_htable_size) { 1055 if (!nf_conntrack_htable_size) {
1050 nf_conntrack_htable_size 1056 nf_conntrack_htable_size
1051 = (((num_physpages << PAGE_SHIFT) / 16384) 1057 = (((num_physpages << PAGE_SHIFT) / 16384)
1052 / sizeof(struct list_head)); 1058 / sizeof(struct hlist_head));
1053 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE)) 1059 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1054 nf_conntrack_htable_size = 8192; 1060 nf_conntrack_htable_size = 16384;
1055 if (nf_conntrack_htable_size < 16) 1061 if (nf_conntrack_htable_size < 32)
1056 nf_conntrack_htable_size = 16; 1062 nf_conntrack_htable_size = 32;
1063
1064 /* Use a max. factor of four by default to get the same max as
1065 * with the old struct list_heads. When a table size is given
1066 * we use the old value of 8 to avoid reducing the max.
1067 * entries. */
1068 max_factor = 4;
1057 } 1069 }
1058 nf_conntrack_hash = alloc_hashtable(&nf_conntrack_htable_size, 1070 nf_conntrack_hash = alloc_hashtable(&nf_conntrack_htable_size,
1059 &nf_conntrack_vmalloc); 1071 &nf_conntrack_vmalloc);
@@ -1062,7 +1074,7 @@ int __init nf_conntrack_init(void)
1062 goto err_out; 1074 goto err_out;
1063 } 1075 }
1064 1076
1065 nf_conntrack_max = 8 * nf_conntrack_htable_size; 1077 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1066 1078
1067 printk("nf_conntrack version %s (%u buckets, %d max)\n", 1079 printk("nf_conntrack version %s (%u buckets, %d max)\n",
1068 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1080 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index dc352f509477..3fc6e9f0de1a 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -116,6 +116,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
116 unsigned int i; 116 unsigned int i;
117 struct nf_conntrack_tuple_hash *h; 117 struct nf_conntrack_tuple_hash *h;
118 struct nf_conntrack_expect *exp, *tmp; 118 struct nf_conntrack_expect *exp, *tmp;
119 struct hlist_node *n;
119 120
120 /* Need write lock here, to delete helper. */ 121 /* Need write lock here, to delete helper. */
121 write_lock_bh(&nf_conntrack_lock); 122 write_lock_bh(&nf_conntrack_lock);
@@ -132,10 +133,10 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
132 } 133 }
133 134
134 /* Get rid of expecteds, set helpers to NULL. */ 135 /* Get rid of expecteds, set helpers to NULL. */
135 list_for_each_entry(h, &unconfirmed, list) 136 hlist_for_each_entry(h, n, &unconfirmed, hnode)
136 unhelp(h, me); 137 unhelp(h, me);
137 for (i = 0; i < nf_conntrack_htable_size; i++) { 138 for (i = 0; i < nf_conntrack_htable_size; i++) {
138 list_for_each_entry(h, &nf_conntrack_hash[i], list) 139 hlist_for_each_entry(h, n, &nf_conntrack_hash[i], hnode)
139 unhelp(h, me); 140 unhelp(h, me);
140 } 141 }
141 write_unlock_bh(&nf_conntrack_lock); 142 write_unlock_bh(&nf_conntrack_lock);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 3d56f36074f7..0627559ca470 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -428,7 +428,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
428{ 428{
429 struct nf_conn *ct, *last; 429 struct nf_conn *ct, *last;
430 struct nf_conntrack_tuple_hash *h; 430 struct nf_conntrack_tuple_hash *h;
431 struct list_head *i; 431 struct hlist_node *n;
432 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); 432 struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
433 u_int8_t l3proto = nfmsg->nfgen_family; 433 u_int8_t l3proto = nfmsg->nfgen_family;
434 434
@@ -436,8 +436,8 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
436 last = (struct nf_conn *)cb->args[1]; 436 last = (struct nf_conn *)cb->args[1];
437 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 437 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
438restart: 438restart:
439 list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) { 439 hlist_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
440 h = (struct nf_conntrack_tuple_hash *) i; 440 hnode) {
441 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 441 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
442 continue; 442 continue;
443 ct = nf_ct_tuplehash_to_ctrack(h); 443 ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 45baeb0e30f9..fe536b20408b 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -60,35 +60,36 @@ struct ct_iter_state {
60 unsigned int bucket; 60 unsigned int bucket;
61}; 61};
62 62
63static struct list_head *ct_get_first(struct seq_file *seq) 63static struct hlist_node *ct_get_first(struct seq_file *seq)
64{ 64{
65 struct ct_iter_state *st = seq->private; 65 struct ct_iter_state *st = seq->private;
66 66
67 for (st->bucket = 0; 67 for (st->bucket = 0;
68 st->bucket < nf_conntrack_htable_size; 68 st->bucket < nf_conntrack_htable_size;
69 st->bucket++) { 69 st->bucket++) {
70 if (!list_empty(&nf_conntrack_hash[st->bucket])) 70 if (!hlist_empty(&nf_conntrack_hash[st->bucket]))
71 return nf_conntrack_hash[st->bucket].next; 71 return nf_conntrack_hash[st->bucket].first;
72 } 72 }
73 return NULL; 73 return NULL;
74} 74}
75 75
76static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head) 76static struct hlist_node *ct_get_next(struct seq_file *seq,
77 struct hlist_node *head)
77{ 78{
78 struct ct_iter_state *st = seq->private; 79 struct ct_iter_state *st = seq->private;
79 80
80 head = head->next; 81 head = head->next;
81 while (head == &nf_conntrack_hash[st->bucket]) { 82 while (head == NULL) {
82 if (++st->bucket >= nf_conntrack_htable_size) 83 if (++st->bucket >= nf_conntrack_htable_size)
83 return NULL; 84 return NULL;
84 head = nf_conntrack_hash[st->bucket].next; 85 head = nf_conntrack_hash[st->bucket].first;
85 } 86 }
86 return head; 87 return head;
87} 88}
88 89
89static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos) 90static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos)
90{ 91{
91 struct list_head *head = ct_get_first(seq); 92 struct hlist_node *head = ct_get_first(seq);
92 93
93 if (head) 94 if (head)
94 while (pos && (head = ct_get_next(seq, head))) 95 while (pos && (head = ct_get_next(seq, head)))