diff options
-rw-r--r-- | include/net/netfilter/nf_conntrack.h | 2 | ||||
-rw-r--r-- | net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | 18 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 63 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_netlink.c | 11 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_standalone.c | 18 | ||||
-rw-r--r-- | net/netfilter/xt_connlimit.c | 4 |
6 files changed, 73 insertions, 43 deletions
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index dada0411abd1..561ae7658f55 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
@@ -129,6 +129,8 @@ struct nf_conn | |||
129 | 129 | ||
130 | /* Extensions */ | 130 | /* Extensions */ |
131 | struct nf_ct_ext *ext; | 131 | struct nf_ct_ext *ext; |
132 | |||
133 | struct rcu_head rcu; | ||
132 | }; | 134 | }; |
133 | 135 | ||
134 | static inline struct nf_conn * | 136 | static inline struct nf_conn * |
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index 2fdcd9233a03..0ee87edbd286 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | |||
@@ -39,12 +39,14 @@ struct ct_iter_state { | |||
39 | static struct hlist_node *ct_get_first(struct seq_file *seq) | 39 | static struct hlist_node *ct_get_first(struct seq_file *seq) |
40 | { | 40 | { |
41 | struct ct_iter_state *st = seq->private; | 41 | struct ct_iter_state *st = seq->private; |
42 | struct hlist_node *n; | ||
42 | 43 | ||
43 | for (st->bucket = 0; | 44 | for (st->bucket = 0; |
44 | st->bucket < nf_conntrack_htable_size; | 45 | st->bucket < nf_conntrack_htable_size; |
45 | st->bucket++) { | 46 | st->bucket++) { |
46 | if (!hlist_empty(&nf_conntrack_hash[st->bucket])) | 47 | n = rcu_dereference(nf_conntrack_hash[st->bucket].first); |
47 | return nf_conntrack_hash[st->bucket].first; | 48 | if (n) |
49 | return n; | ||
48 | } | 50 | } |
49 | return NULL; | 51 | return NULL; |
50 | } | 52 | } |
@@ -54,11 +56,11 @@ static struct hlist_node *ct_get_next(struct seq_file *seq, | |||
54 | { | 56 | { |
55 | struct ct_iter_state *st = seq->private; | 57 | struct ct_iter_state *st = seq->private; |
56 | 58 | ||
57 | head = head->next; | 59 | head = rcu_dereference(head->next); |
58 | while (head == NULL) { | 60 | while (head == NULL) { |
59 | if (++st->bucket >= nf_conntrack_htable_size) | 61 | if (++st->bucket >= nf_conntrack_htable_size) |
60 | return NULL; | 62 | return NULL; |
61 | head = nf_conntrack_hash[st->bucket].first; | 63 | head = rcu_dereference(nf_conntrack_hash[st->bucket].first); |
62 | } | 64 | } |
63 | return head; | 65 | return head; |
64 | } | 66 | } |
@@ -74,8 +76,9 @@ static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos) | |||
74 | } | 76 | } |
75 | 77 | ||
76 | static void *ct_seq_start(struct seq_file *seq, loff_t *pos) | 78 | static void *ct_seq_start(struct seq_file *seq, loff_t *pos) |
79 | __acquires(RCU) | ||
77 | { | 80 | { |
78 | read_lock_bh(&nf_conntrack_lock); | 81 | rcu_read_lock(); |
79 | return ct_get_idx(seq, *pos); | 82 | return ct_get_idx(seq, *pos); |
80 | } | 83 | } |
81 | 84 | ||
@@ -86,8 +89,9 @@ static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
86 | } | 89 | } |
87 | 90 | ||
88 | static void ct_seq_stop(struct seq_file *s, void *v) | 91 | static void ct_seq_stop(struct seq_file *s, void *v) |
92 | __releases(RCU) | ||
89 | { | 93 | { |
90 | read_unlock_bh(&nf_conntrack_lock); | 94 | rcu_read_unlock(); |
91 | } | 95 | } |
92 | 96 | ||
93 | static int ct_seq_show(struct seq_file *s, void *v) | 97 | static int ct_seq_show(struct seq_file *s, void *v) |
@@ -226,6 +230,7 @@ static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos) | |||
226 | } | 230 | } |
227 | 231 | ||
228 | static void *exp_seq_start(struct seq_file *seq, loff_t *pos) | 232 | static void *exp_seq_start(struct seq_file *seq, loff_t *pos) |
233 | __acquires(RCU) | ||
229 | { | 234 | { |
230 | rcu_read_lock(); | 235 | rcu_read_lock(); |
231 | return ct_expect_get_idx(seq, *pos); | 236 | return ct_expect_get_idx(seq, *pos); |
@@ -238,6 +243,7 @@ static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
238 | } | 243 | } |
239 | 244 | ||
240 | static void exp_seq_stop(struct seq_file *seq, void *v) | 245 | static void exp_seq_stop(struct seq_file *seq, void *v) |
246 | __releases(RCU) | ||
241 | { | 247 | { |
242 | rcu_read_unlock(); | 248 | rcu_read_unlock(); |
243 | } | 249 | } |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 10256079e634..a54bfec61e79 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -166,8 +166,8 @@ static void | |||
166 | clean_from_lists(struct nf_conn *ct) | 166 | clean_from_lists(struct nf_conn *ct) |
167 | { | 167 | { |
168 | pr_debug("clean_from_lists(%p)\n", ct); | 168 | pr_debug("clean_from_lists(%p)\n", ct); |
169 | hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); | 169 | hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); |
170 | hlist_del(&ct->tuplehash[IP_CT_DIR_REPLY].hnode); | 170 | hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode); |
171 | 171 | ||
172 | /* Destroy all pending expectations */ | 172 | /* Destroy all pending expectations */ |
173 | nf_ct_remove_expectations(ct); | 173 | nf_ct_remove_expectations(ct); |
@@ -253,7 +253,7 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple, | |||
253 | struct hlist_node *n; | 253 | struct hlist_node *n; |
254 | unsigned int hash = hash_conntrack(tuple); | 254 | unsigned int hash = hash_conntrack(tuple); |
255 | 255 | ||
256 | hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) { | 256 | hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { |
257 | if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && | 257 | if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && |
258 | nf_ct_tuple_equal(tuple, &h->tuple)) { | 258 | nf_ct_tuple_equal(tuple, &h->tuple)) { |
259 | NF_CT_STAT_INC(found); | 259 | NF_CT_STAT_INC(found); |
@@ -271,12 +271,16 @@ struct nf_conntrack_tuple_hash * | |||
271 | nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple) | 271 | nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple) |
272 | { | 272 | { |
273 | struct nf_conntrack_tuple_hash *h; | 273 | struct nf_conntrack_tuple_hash *h; |
274 | struct nf_conn *ct; | ||
274 | 275 | ||
275 | read_lock_bh(&nf_conntrack_lock); | 276 | rcu_read_lock(); |
276 | h = __nf_conntrack_find(tuple, NULL); | 277 | h = __nf_conntrack_find(tuple, NULL); |
277 | if (h) | 278 | if (h) { |
278 | atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use); | 279 | ct = nf_ct_tuplehash_to_ctrack(h); |
279 | read_unlock_bh(&nf_conntrack_lock); | 280 | if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) |
281 | h = NULL; | ||
282 | } | ||
283 | rcu_read_unlock(); | ||
280 | 284 | ||
281 | return h; | 285 | return h; |
282 | } | 286 | } |
@@ -286,10 +290,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, | |||
286 | unsigned int hash, | 290 | unsigned int hash, |
287 | unsigned int repl_hash) | 291 | unsigned int repl_hash) |
288 | { | 292 | { |
289 | hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, | 293 | hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, |
290 | &nf_conntrack_hash[hash]); | 294 | &nf_conntrack_hash[hash]); |
291 | hlist_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, | 295 | hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, |
292 | &nf_conntrack_hash[repl_hash]); | 296 | &nf_conntrack_hash[repl_hash]); |
293 | } | 297 | } |
294 | 298 | ||
295 | void nf_conntrack_hash_insert(struct nf_conn *ct) | 299 | void nf_conntrack_hash_insert(struct nf_conn *ct) |
@@ -392,9 +396,9 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |||
392 | { | 396 | { |
393 | struct nf_conntrack_tuple_hash *h; | 397 | struct nf_conntrack_tuple_hash *h; |
394 | 398 | ||
395 | read_lock_bh(&nf_conntrack_lock); | 399 | rcu_read_lock(); |
396 | h = __nf_conntrack_find(tuple, ignored_conntrack); | 400 | h = __nf_conntrack_find(tuple, ignored_conntrack); |
397 | read_unlock_bh(&nf_conntrack_lock); | 401 | rcu_read_unlock(); |
398 | 402 | ||
399 | return h != NULL; | 403 | return h != NULL; |
400 | } | 404 | } |
@@ -413,21 +417,23 @@ static int early_drop(unsigned int hash) | |||
413 | unsigned int i, cnt = 0; | 417 | unsigned int i, cnt = 0; |
414 | int dropped = 0; | 418 | int dropped = 0; |
415 | 419 | ||
416 | read_lock_bh(&nf_conntrack_lock); | 420 | rcu_read_lock(); |
417 | for (i = 0; i < nf_conntrack_htable_size; i++) { | 421 | for (i = 0; i < nf_conntrack_htable_size; i++) { |
418 | hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) { | 422 | hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], |
423 | hnode) { | ||
419 | tmp = nf_ct_tuplehash_to_ctrack(h); | 424 | tmp = nf_ct_tuplehash_to_ctrack(h); |
420 | if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) | 425 | if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) |
421 | ct = tmp; | 426 | ct = tmp; |
422 | cnt++; | 427 | cnt++; |
423 | } | 428 | } |
429 | |||
430 | if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) | ||
431 | ct = NULL; | ||
424 | if (ct || cnt >= NF_CT_EVICTION_RANGE) | 432 | if (ct || cnt >= NF_CT_EVICTION_RANGE) |
425 | break; | 433 | break; |
426 | hash = (hash + 1) % nf_conntrack_htable_size; | 434 | hash = (hash + 1) % nf_conntrack_htable_size; |
427 | } | 435 | } |
428 | if (ct) | 436 | rcu_read_unlock(); |
429 | atomic_inc(&ct->ct_general.use); | ||
430 | read_unlock_bh(&nf_conntrack_lock); | ||
431 | 437 | ||
432 | if (!ct) | 438 | if (!ct) |
433 | return dropped; | 439 | return dropped; |
@@ -480,17 +486,25 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | |||
480 | /* Don't set timer yet: wait for confirmation */ | 486 | /* Don't set timer yet: wait for confirmation */ |
481 | setup_timer(&conntrack->timeout, death_by_timeout, | 487 | setup_timer(&conntrack->timeout, death_by_timeout, |
482 | (unsigned long)conntrack); | 488 | (unsigned long)conntrack); |
489 | INIT_RCU_HEAD(&conntrack->rcu); | ||
483 | 490 | ||
484 | return conntrack; | 491 | return conntrack; |
485 | } | 492 | } |
486 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | 493 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); |
487 | 494 | ||
488 | void nf_conntrack_free(struct nf_conn *conntrack) | 495 | static void nf_conntrack_free_rcu(struct rcu_head *head) |
489 | { | 496 | { |
490 | nf_ct_ext_free(conntrack); | 497 | struct nf_conn *ct = container_of(head, struct nf_conn, rcu); |
491 | kmem_cache_free(nf_conntrack_cachep, conntrack); | 498 | |
499 | nf_ct_ext_free(ct); | ||
500 | kmem_cache_free(nf_conntrack_cachep, ct); | ||
492 | atomic_dec(&nf_conntrack_count); | 501 | atomic_dec(&nf_conntrack_count); |
493 | } | 502 | } |
503 | |||
504 | void nf_conntrack_free(struct nf_conn *conntrack) | ||
505 | { | ||
506 | call_rcu(&conntrack->rcu, nf_conntrack_free_rcu); | ||
507 | } | ||
494 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | 508 | EXPORT_SYMBOL_GPL(nf_conntrack_free); |
495 | 509 | ||
496 | /* Allocate a new conntrack: we return -ENOMEM if classification | 510 | /* Allocate a new conntrack: we return -ENOMEM if classification |
@@ -1036,12 +1050,17 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) | |||
1036 | * use a newrandom seed */ | 1050 | * use a newrandom seed */ |
1037 | get_random_bytes(&rnd, 4); | 1051 | get_random_bytes(&rnd, 4); |
1038 | 1052 | ||
1053 | /* Lookups in the old hash might happen in parallel, which means we | ||
1054 | * might get false negatives during connection lookup. New connections | ||
1055 | * created because of a false negative won't make it into the hash | ||
1056 | * though since that required taking the lock. | ||
1057 | */ | ||
1039 | write_lock_bh(&nf_conntrack_lock); | 1058 | write_lock_bh(&nf_conntrack_lock); |
1040 | for (i = 0; i < nf_conntrack_htable_size; i++) { | 1059 | for (i = 0; i < nf_conntrack_htable_size; i++) { |
1041 | while (!hlist_empty(&nf_conntrack_hash[i])) { | 1060 | while (!hlist_empty(&nf_conntrack_hash[i])) { |
1042 | h = hlist_entry(nf_conntrack_hash[i].first, | 1061 | h = hlist_entry(nf_conntrack_hash[i].first, |
1043 | struct nf_conntrack_tuple_hash, hnode); | 1062 | struct nf_conntrack_tuple_hash, hnode); |
1044 | hlist_del(&h->hnode); | 1063 | hlist_del_rcu(&h->hnode); |
1045 | bucket = __hash_conntrack(&h->tuple, hashsize, rnd); | 1064 | bucket = __hash_conntrack(&h->tuple, hashsize, rnd); |
1046 | hlist_add_head(&h->hnode, &hash[bucket]); | 1065 | hlist_add_head(&h->hnode, &hash[bucket]); |
1047 | } | 1066 | } |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 557f47137da0..b701dcce0e69 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -545,12 +545,12 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | |||
545 | struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); | 545 | struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh); |
546 | u_int8_t l3proto = nfmsg->nfgen_family; | 546 | u_int8_t l3proto = nfmsg->nfgen_family; |
547 | 547 | ||
548 | read_lock_bh(&nf_conntrack_lock); | 548 | rcu_read_lock(); |
549 | last = (struct nf_conn *)cb->args[1]; | 549 | last = (struct nf_conn *)cb->args[1]; |
550 | for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { | 550 | for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { |
551 | restart: | 551 | restart: |
552 | hlist_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], | 552 | hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[cb->args[0]], |
553 | hnode) { | 553 | hnode) { |
554 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) | 554 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) |
555 | continue; | 555 | continue; |
556 | ct = nf_ct_tuplehash_to_ctrack(h); | 556 | ct = nf_ct_tuplehash_to_ctrack(h); |
@@ -568,7 +568,8 @@ restart: | |||
568 | cb->nlh->nlmsg_seq, | 568 | cb->nlh->nlmsg_seq, |
569 | IPCTNL_MSG_CT_NEW, | 569 | IPCTNL_MSG_CT_NEW, |
570 | 1, ct) < 0) { | 570 | 1, ct) < 0) { |
571 | nf_conntrack_get(&ct->ct_general); | 571 | if (!atomic_inc_not_zero(&ct->ct_general.use)) |
572 | continue; | ||
572 | cb->args[1] = (unsigned long)ct; | 573 | cb->args[1] = (unsigned long)ct; |
573 | goto out; | 574 | goto out; |
574 | } | 575 | } |
@@ -584,7 +585,7 @@ restart: | |||
584 | } | 585 | } |
585 | } | 586 | } |
586 | out: | 587 | out: |
587 | read_unlock_bh(&nf_conntrack_lock); | 588 | rcu_read_unlock(); |
588 | if (last) | 589 | if (last) |
589 | nf_ct_put(last); | 590 | nf_ct_put(last); |
590 | 591 | ||
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 28c5ae8f5625..98f0cd31150d 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -58,12 +58,14 @@ struct ct_iter_state { | |||
58 | static struct hlist_node *ct_get_first(struct seq_file *seq) | 58 | static struct hlist_node *ct_get_first(struct seq_file *seq) |
59 | { | 59 | { |
60 | struct ct_iter_state *st = seq->private; | 60 | struct ct_iter_state *st = seq->private; |
61 | struct hlist_node *n; | ||
61 | 62 | ||
62 | for (st->bucket = 0; | 63 | for (st->bucket = 0; |
63 | st->bucket < nf_conntrack_htable_size; | 64 | st->bucket < nf_conntrack_htable_size; |
64 | st->bucket++) { | 65 | st->bucket++) { |
65 | if (!hlist_empty(&nf_conntrack_hash[st->bucket])) | 66 | n = rcu_dereference(nf_conntrack_hash[st->bucket].first); |
66 | return nf_conntrack_hash[st->bucket].first; | 67 | if (n) |
68 | return n; | ||
67 | } | 69 | } |
68 | return NULL; | 70 | return NULL; |
69 | } | 71 | } |
@@ -73,11 +75,11 @@ static struct hlist_node *ct_get_next(struct seq_file *seq, | |||
73 | { | 75 | { |
74 | struct ct_iter_state *st = seq->private; | 76 | struct ct_iter_state *st = seq->private; |
75 | 77 | ||
76 | head = head->next; | 78 | head = rcu_dereference(head->next); |
77 | while (head == NULL) { | 79 | while (head == NULL) { |
78 | if (++st->bucket >= nf_conntrack_htable_size) | 80 | if (++st->bucket >= nf_conntrack_htable_size) |
79 | return NULL; | 81 | return NULL; |
80 | head = nf_conntrack_hash[st->bucket].first; | 82 | head = rcu_dereference(nf_conntrack_hash[st->bucket].first); |
81 | } | 83 | } |
82 | return head; | 84 | return head; |
83 | } | 85 | } |
@@ -93,9 +95,9 @@ static struct hlist_node *ct_get_idx(struct seq_file *seq, loff_t pos) | |||
93 | } | 95 | } |
94 | 96 | ||
95 | static void *ct_seq_start(struct seq_file *seq, loff_t *pos) | 97 | static void *ct_seq_start(struct seq_file *seq, loff_t *pos) |
96 | __acquires(nf_conntrack_lock) | 98 | __acquires(RCU) |
97 | { | 99 | { |
98 | read_lock_bh(&nf_conntrack_lock); | 100 | rcu_read_lock(); |
99 | return ct_get_idx(seq, *pos); | 101 | return ct_get_idx(seq, *pos); |
100 | } | 102 | } |
101 | 103 | ||
@@ -106,9 +108,9 @@ static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) | |||
106 | } | 108 | } |
107 | 109 | ||
108 | static void ct_seq_stop(struct seq_file *s, void *v) | 110 | static void ct_seq_stop(struct seq_file *s, void *v) |
109 | __releases(nf_conntrack_lock) | 111 | __releases(RCU) |
110 | { | 112 | { |
111 | read_unlock_bh(&nf_conntrack_lock); | 113 | rcu_read_unlock(); |
112 | } | 114 | } |
113 | 115 | ||
114 | /* return 0 on success, 1 in case of error */ | 116 | /* return 0 on success, 1 in case of error */ |
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index e00ecd974fa3..f9b59a6753ee 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c | |||
@@ -120,7 +120,7 @@ static int count_them(struct xt_connlimit_data *data, | |||
120 | else | 120 | else |
121 | hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)]; | 121 | hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)]; |
122 | 122 | ||
123 | read_lock_bh(&nf_conntrack_lock); | 123 | rcu_read_lock(); |
124 | 124 | ||
125 | /* check the saved connections */ | 125 | /* check the saved connections */ |
126 | list_for_each_entry_safe(conn, tmp, hash, list) { | 126 | list_for_each_entry_safe(conn, tmp, hash, list) { |
@@ -163,7 +163,7 @@ static int count_them(struct xt_connlimit_data *data, | |||
163 | ++matches; | 163 | ++matches; |
164 | } | 164 | } |
165 | 165 | ||
166 | read_unlock_bh(&nf_conntrack_lock); | 166 | rcu_read_unlock(); |
167 | 167 | ||
168 | if (addit) { | 168 | if (addit) { |
169 | /* save the new connection in our list */ | 169 | /* save the new connection in our list */ |