aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2008-02-27 15:07:47 -0500
committerDavid S. Miller <davem@davemloft.net>2008-02-27 15:07:47 -0500
commit4e29e9ec7e0707d3925f5dcc29af0d3f04e49833 (patch)
treedd857d9bd6a4321048e63a520436584a00d41234 /net
parent3bdfe7ec08b4256121a8894cd978e74fcf7031d7 (diff)
[NETFILTER]: nf_conntrack: fix smp_processor_id() in preemptible code warning
Since we're using RCU for the conntrack hash now, we need to avoid getting preempted or interrupted by BHs while changing the stats. Fixes warning reported by Tilman Schmidt <tilman@imap.cc> when using preemptible RCU: [ 48.180297] BUG: using smp_processor_id() in preemptible [00000000] code: ntpdate/3562 [ 48.180297] caller is __nf_conntrack_find+0x9b/0xeb [nf_conntrack] [ 48.180297] Pid: 3562, comm: ntpdate Not tainted 2.6.25-rc2-mm1-testing #1 [ 48.180297] [<c02015b9>] debug_smp_processor_id+0x99/0xb0 [ 48.180297] [<fac643a7>] __nf_conntrack_find+0x9b/0xeb [nf_conntrack] Tested-by: Tilman Schmidt <tilman@imap.cc> Tested-by: Christian Casteyde <casteyde.christian@free.fr> [Bugzilla #10097] Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_conntrack_core.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 327e847d2702..b77eb56a87e3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -256,13 +256,19 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple)
256 struct hlist_node *n; 256 struct hlist_node *n;
257 unsigned int hash = hash_conntrack(tuple); 257 unsigned int hash = hash_conntrack(tuple);
258 258
259 /* Disable BHs the entire time since we normally need to disable them
260 * at least once for the stats anyway.
261 */
262 local_bh_disable();
259 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { 263 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
260 if (nf_ct_tuple_equal(tuple, &h->tuple)) { 264 if (nf_ct_tuple_equal(tuple, &h->tuple)) {
261 NF_CT_STAT_INC(found); 265 NF_CT_STAT_INC(found);
266 local_bh_enable();
262 return h; 267 return h;
263 } 268 }
264 NF_CT_STAT_INC(searched); 269 NF_CT_STAT_INC(searched);
265 } 270 }
271 local_bh_enable();
266 272
267 return NULL; 273 return NULL;
268} 274}
@@ -400,17 +406,20 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
400 struct hlist_node *n; 406 struct hlist_node *n;
401 unsigned int hash = hash_conntrack(tuple); 407 unsigned int hash = hash_conntrack(tuple);
402 408
403 rcu_read_lock(); 409 /* Disable BHs the entire time since we need to disable them at
410 * least once for the stats anyway.
411 */
412 rcu_read_lock_bh();
404 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { 413 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
405 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && 414 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
406 nf_ct_tuple_equal(tuple, &h->tuple)) { 415 nf_ct_tuple_equal(tuple, &h->tuple)) {
407 NF_CT_STAT_INC(found); 416 NF_CT_STAT_INC(found);
408 rcu_read_unlock(); 417 rcu_read_unlock_bh();
409 return 1; 418 return 1;
410 } 419 }
411 NF_CT_STAT_INC(searched); 420 NF_CT_STAT_INC(searched);
412 } 421 }
413 rcu_read_unlock(); 422 rcu_read_unlock_bh();
414 423
415 return 0; 424 return 0;
416} 425}