aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2006-09-20 15:01:06 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-09-22 18:19:54 -0400
commit5251e2d2125407bbff0c39394a4011be9ed8b5d0 (patch)
tree3dda0aeb90d80a2ddd0e7a4215bfe9eaa8209033 /net/ipv4
parent01f348484dd8509254d045e3ad49029716eca6a1 (diff)
[NETFILTER]: conntrack: fix race condition in early_drop
On SMP environments the maximum number of conntracks can be overpassed under heavy stress situations due to an existing race condition. CPU A CPU B atomic_read() ... early_drop() ... ... atomic_read() allocate conntrack allocate conntrack atomic_inc() atomic_inc() This patch moves the counter incrementation before the early drop stage. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 2568d480e9a9..422a662194cc 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -622,11 +622,15 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
622 ip_conntrack_hash_rnd_initted = 1; 622 ip_conntrack_hash_rnd_initted = 1;
623 } 623 }
624 624
625 /* We don't want any race condition at early drop stage */
626 atomic_inc(&ip_conntrack_count);
627
625 if (ip_conntrack_max 628 if (ip_conntrack_max
626 && atomic_read(&ip_conntrack_count) >= ip_conntrack_max) { 629 && atomic_read(&ip_conntrack_count) > ip_conntrack_max) {
627 unsigned int hash = hash_conntrack(orig); 630 unsigned int hash = hash_conntrack(orig);
628 /* Try dropping from this hash chain. */ 631 /* Try dropping from this hash chain. */
629 if (!early_drop(&ip_conntrack_hash[hash])) { 632 if (!early_drop(&ip_conntrack_hash[hash])) {
633 atomic_dec(&ip_conntrack_count);
630 if (net_ratelimit()) 634 if (net_ratelimit())
631 printk(KERN_WARNING 635 printk(KERN_WARNING
632 "ip_conntrack: table full, dropping" 636 "ip_conntrack: table full, dropping"
@@ -638,6 +642,7 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
638 conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC); 642 conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
639 if (!conntrack) { 643 if (!conntrack) {
640 DEBUGP("Can't allocate conntrack.\n"); 644 DEBUGP("Can't allocate conntrack.\n");
645 atomic_dec(&ip_conntrack_count);
641 return ERR_PTR(-ENOMEM); 646 return ERR_PTR(-ENOMEM);
642 } 647 }
643 648
@@ -651,8 +656,6 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
651 conntrack->timeout.data = (unsigned long)conntrack; 656 conntrack->timeout.data = (unsigned long)conntrack;
652 conntrack->timeout.function = death_by_timeout; 657 conntrack->timeout.function = death_by_timeout;
653 658
654 atomic_inc(&ip_conntrack_count);
655
656 return conntrack; 659 return conntrack;
657} 660}
658 661