diff options
author | Pablo Neira Ayuso <pablo@netfilter.org> | 2006-09-20 15:01:06 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-09-22 18:19:54 -0400 |
commit | 5251e2d2125407bbff0c39394a4011be9ed8b5d0 (patch) | |
tree | 3dda0aeb90d80a2ddd0e7a4215bfe9eaa8209033 /net | |
parent | 01f348484dd8509254d045e3ad49029716eca6a1 (diff) |
[NETFILTER]: conntrack: fix race condition in early_drop
On SMP environments the maximum number of conntracks can be overpassed
under heavy stress situations due to an existing race condition.
CPU A CPU B
atomic_read() ...
early_drop() ...
... atomic_read()
allocate conntrack allocate conntrack
atomic_inc() atomic_inc()
This patch moves the counter incrementation before the early drop stage.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_core.c | 9 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 10 |
2 files changed, 14 insertions, 5 deletions
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 2568d480e9a9..422a662194cc 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -622,11 +622,15 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig, | |||
622 | ip_conntrack_hash_rnd_initted = 1; | 622 | ip_conntrack_hash_rnd_initted = 1; |
623 | } | 623 | } |
624 | 624 | ||
625 | /* We don't want any race condition at early drop stage */ | ||
626 | atomic_inc(&ip_conntrack_count); | ||
627 | |||
625 | if (ip_conntrack_max | 628 | if (ip_conntrack_max |
626 | && atomic_read(&ip_conntrack_count) >= ip_conntrack_max) { | 629 | && atomic_read(&ip_conntrack_count) > ip_conntrack_max) { |
627 | unsigned int hash = hash_conntrack(orig); | 630 | unsigned int hash = hash_conntrack(orig); |
628 | /* Try dropping from this hash chain. */ | 631 | /* Try dropping from this hash chain. */ |
629 | if (!early_drop(&ip_conntrack_hash[hash])) { | 632 | if (!early_drop(&ip_conntrack_hash[hash])) { |
633 | atomic_dec(&ip_conntrack_count); | ||
630 | if (net_ratelimit()) | 634 | if (net_ratelimit()) |
631 | printk(KERN_WARNING | 635 | printk(KERN_WARNING |
632 | "ip_conntrack: table full, dropping" | 636 | "ip_conntrack: table full, dropping" |
@@ -638,6 +642,7 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig, | |||
638 | conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC); | 642 | conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC); |
639 | if (!conntrack) { | 643 | if (!conntrack) { |
640 | DEBUGP("Can't allocate conntrack.\n"); | 644 | DEBUGP("Can't allocate conntrack.\n"); |
645 | atomic_dec(&ip_conntrack_count); | ||
641 | return ERR_PTR(-ENOMEM); | 646 | return ERR_PTR(-ENOMEM); |
642 | } | 647 | } |
643 | 648 | ||
@@ -651,8 +656,6 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig, | |||
651 | conntrack->timeout.data = (unsigned long)conntrack; | 656 | conntrack->timeout.data = (unsigned long)conntrack; |
652 | conntrack->timeout.function = death_by_timeout; | 657 | conntrack->timeout.function = death_by_timeout; |
653 | 658 | ||
654 | atomic_inc(&ip_conntrack_count); | ||
655 | |||
656 | return conntrack; | 659 | return conntrack; |
657 | } | 660 | } |
658 | 661 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 927137b8b3b5..adeafa2cc339 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -848,11 +848,15 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | |||
848 | nf_conntrack_hash_rnd_initted = 1; | 848 | nf_conntrack_hash_rnd_initted = 1; |
849 | } | 849 | } |
850 | 850 | ||
851 | /* We don't want any race condition at early drop stage */ | ||
852 | atomic_inc(&nf_conntrack_count); | ||
853 | |||
851 | if (nf_conntrack_max | 854 | if (nf_conntrack_max |
852 | && atomic_read(&nf_conntrack_count) >= nf_conntrack_max) { | 855 | && atomic_read(&nf_conntrack_count) > nf_conntrack_max) { |
853 | unsigned int hash = hash_conntrack(orig); | 856 | unsigned int hash = hash_conntrack(orig); |
854 | /* Try dropping from this hash chain. */ | 857 | /* Try dropping from this hash chain. */ |
855 | if (!early_drop(&nf_conntrack_hash[hash])) { | 858 | if (!early_drop(&nf_conntrack_hash[hash])) { |
859 | atomic_dec(&nf_conntrack_count); | ||
856 | if (net_ratelimit()) | 860 | if (net_ratelimit()) |
857 | printk(KERN_WARNING | 861 | printk(KERN_WARNING |
858 | "nf_conntrack: table full, dropping" | 862 | "nf_conntrack: table full, dropping" |
@@ -903,10 +907,12 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, | |||
903 | init_timer(&conntrack->timeout); | 907 | init_timer(&conntrack->timeout); |
904 | conntrack->timeout.data = (unsigned long)conntrack; | 908 | conntrack->timeout.data = (unsigned long)conntrack; |
905 | conntrack->timeout.function = death_by_timeout; | 909 | conntrack->timeout.function = death_by_timeout; |
910 | read_unlock_bh(&nf_ct_cache_lock); | ||
906 | 911 | ||
907 | atomic_inc(&nf_conntrack_count); | 912 | return conntrack; |
908 | out: | 913 | out: |
909 | read_unlock_bh(&nf_ct_cache_lock); | 914 | read_unlock_bh(&nf_ct_cache_lock); |
915 | atomic_dec(&nf_conntrack_count); | ||
910 | return conntrack; | 916 | return conntrack; |
911 | } | 917 | } |
912 | 918 | ||