aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-07-08 01:37:38 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-11 01:18:15 -0400
commit7ae7730fd6d98be1afe8ad9ea77813de607ec970 (patch)
treecb0cebe96de34a5d6116e4f76377e6ef4ca43547
parentec59a1110aee6846adada8979915cacae64042ce (diff)
[NETFILTER]: nf_conntrack: early_drop improvement
When the maximum number of conntrack entries is reached and a new one needs to be allocated, conntrack tries to drop an unassured connection from the same hash bucket the new conntrack would hash to. Since with a properly sized hash the average number of entries per bucket is 1, the chances of actually finding one are not very good. This patch makes it walk the hash until a minimum number of 8 entries are checked. Based on patch by Vasily Averin <vvs@sw.ru>. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/netfilter/nf_conntrack_core.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index d1fc019760a1..472396dac05c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -377,21 +377,30 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
377} 377}
378EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); 378EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
379 379
380#define NF_CT_EVICTION_RANGE 8
381
380/* There's a small race here where we may free a just-assured 382/* There's a small race here where we may free a just-assured
381 connection. Too bad: we're in trouble anyway. */ 383 connection. Too bad: we're in trouble anyway. */
382static int early_drop(struct hlist_head *chain) 384static int early_drop(unsigned int hash)
383{ 385{
384 /* Use oldest entry, which is roughly LRU */ 386 /* Use oldest entry, which is roughly LRU */
385 struct nf_conntrack_tuple_hash *h; 387 struct nf_conntrack_tuple_hash *h;
386 struct nf_conn *ct = NULL, *tmp; 388 struct nf_conn *ct = NULL, *tmp;
387 struct hlist_node *n; 389 struct hlist_node *n;
390 unsigned int i, cnt = 0;
388 int dropped = 0; 391 int dropped = 0;
389 392
390 read_lock_bh(&nf_conntrack_lock); 393 read_lock_bh(&nf_conntrack_lock);
391 hlist_for_each_entry(h, n, chain, hnode) { 394 for (i = 0; i < nf_conntrack_htable_size; i++) {
392 tmp = nf_ct_tuplehash_to_ctrack(h); 395 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
393 if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) 396 tmp = nf_ct_tuplehash_to_ctrack(h);
394 ct = tmp; 397 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
398 ct = tmp;
399 cnt++;
400 }
401 if (ct || cnt >= NF_CT_EVICTION_RANGE)
402 break;
403 hash = (hash + 1) % nf_conntrack_htable_size;
395 } 404 }
396 if (ct) 405 if (ct)
397 atomic_inc(&ct->ct_general.use); 406 atomic_inc(&ct->ct_general.use);
@@ -425,8 +434,7 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
425 if (nf_conntrack_max 434 if (nf_conntrack_max
426 && atomic_read(&nf_conntrack_count) > nf_conntrack_max) { 435 && atomic_read(&nf_conntrack_count) > nf_conntrack_max) {
427 unsigned int hash = hash_conntrack(orig); 436 unsigned int hash = hash_conntrack(orig);
428 /* Try dropping from this hash chain. */ 437 if (!early_drop(hash)) {
429 if (!early_drop(&nf_conntrack_hash[hash])) {
430 atomic_dec(&nf_conntrack_count); 438 atomic_dec(&nf_conntrack_count);
431 if (net_ratelimit()) 439 if (net_ratelimit())
432 printk(KERN_WARNING 440 printk(KERN_WARNING