aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChangli Gao <xiaosuo@gmail.com>2011-01-04 23:23:23 -0500
committerDavid S. Miller <davem@davemloft.net>2011-01-06 14:22:20 -0500
commitf682cefa5ad204d3bfaa54a58046c66d2d035ac1 (patch)
tree8c55a0b722df666bb0a75f5749c7457cd5d44f3b /net
parent6623e3b24a5ebb07e81648c478d286a1329ab891 (diff)
netfilter: fix the race when initializing nf_ct_expect_hash_rnd
Since nf_ct_expect_dst_hash() may be called without nf_conntrack_lock locked, nf_ct_expect_hash_rnd should be initialized in the atomic way. In this patch, we use nf_conntrack_hash_rnd instead of nf_ct_expect_hash_rnd. Signed-off-by: Changli Gao <xiaosuo@gmail.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_conntrack_core.c30
-rw-r--r--net/netfilter/nf_conntrack_expect.c10
2 files changed, 20 insertions, 20 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 27a5ea6b6a0f..e61511929c66 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
65DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 65DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
66EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); 66EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
67 67
68static unsigned int nf_conntrack_hash_rnd __read_mostly; 68unsigned int nf_conntrack_hash_rnd __read_mostly;
69 69
70static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) 70static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
71{ 71{
@@ -596,6 +596,21 @@ static noinline int early_drop(struct net *net, unsigned int hash)
596 return dropped; 596 return dropped;
597} 597}
598 598
599void init_nf_conntrack_hash_rnd(void)
600{
601 unsigned int rand;
602
603 /*
604 * Why not initialize nf_conntrack_rnd in a "init()" function ?
605 * Because there isn't enough entropy when system initializing,
606 * and we initialize it as late as possible.
607 */
608 do {
609 get_random_bytes(&rand, sizeof(rand));
610 } while (!rand);
611 cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
612}
613
599static struct nf_conn * 614static struct nf_conn *
600__nf_conntrack_alloc(struct net *net, u16 zone, 615__nf_conntrack_alloc(struct net *net, u16 zone,
601 const struct nf_conntrack_tuple *orig, 616 const struct nf_conntrack_tuple *orig,
@@ -605,18 +620,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
605 struct nf_conn *ct; 620 struct nf_conn *ct;
606 621
607 if (unlikely(!nf_conntrack_hash_rnd)) { 622 if (unlikely(!nf_conntrack_hash_rnd)) {
608 unsigned int rand; 623 init_nf_conntrack_hash_rnd();
609
610 /*
611 * Why not initialize nf_conntrack_rnd in a "init()" function ?
612 * Because there isn't enough entropy when system initializing,
613 * and we initialize it as late as possible.
614 */
615 do {
616 get_random_bytes(&rand, sizeof(rand));
617 } while (!rand);
618 cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
619
620 /* recompute the hash as nf_conntrack_hash_rnd is initialized */ 624 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
621 hash = hash_conntrack_raw(orig, zone); 625 hash = hash_conntrack_raw(orig, zone);
622 } 626 }
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 46e8966912b1..a20fb0bd1efe 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -32,9 +32,7 @@
32unsigned int nf_ct_expect_hsize __read_mostly; 32unsigned int nf_ct_expect_hsize __read_mostly;
33EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); 33EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
34 34
35static unsigned int nf_ct_expect_hash_rnd __read_mostly;
36unsigned int nf_ct_expect_max __read_mostly; 35unsigned int nf_ct_expect_max __read_mostly;
37static int nf_ct_expect_hash_rnd_initted __read_mostly;
38 36
39static struct kmem_cache *nf_ct_expect_cachep __read_mostly; 37static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
40 38
@@ -77,15 +75,13 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
77{ 75{
78 unsigned int hash; 76 unsigned int hash;
79 77
80 if (unlikely(!nf_ct_expect_hash_rnd_initted)) { 78 if (unlikely(!nf_conntrack_hash_rnd)) {
81 get_random_bytes(&nf_ct_expect_hash_rnd, 79 init_nf_conntrack_hash_rnd();
82 sizeof(nf_ct_expect_hash_rnd));
83 nf_ct_expect_hash_rnd_initted = 1;
84 } 80 }
85 81
86 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), 82 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
87 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | 83 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
88 (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd); 84 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
89 return ((u64)hash * nf_ct_expect_hsize) >> 32; 85 return ((u64)hash * nf_ct_expect_hsize) >> 32;
90} 86}
91 87