diff options
author | Harald Welte <laforge@netfilter.org> | 2005-10-16 08:22:59 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@mandriva.com> | 2005-10-25 22:19:27 -0400 |
commit | eed75f191d8318a2b144da8aae9774e1cfcae492 (patch) | |
tree | 512e66c1a83911c5944270a68bf950ddc1cbde0d /net/ipv4 | |
parent | d50a6b56f0f239cf061630c85add121dc3555339 (diff) |
[NETFILTER] ip_conntrack: Make "hashsize" conntrack parameter writable
It's fairly simple to resize the hash table, but currently you need to
remove and reinsert the module. That's bad (we lose connection
state). Harald has even offered to write a daemon which sets this
based on load.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Harald Welte <laforge@netfilter.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_core.c | 132 |
1 files changed, 95 insertions, 37 deletions
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index 07a80b56e8dc..422ab68ee7fb 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #include <linux/netfilter_ipv4/ip_conntrack_core.h> | 50 | #include <linux/netfilter_ipv4/ip_conntrack_core.h> |
51 | #include <linux/netfilter_ipv4/listhelp.h> | 51 | #include <linux/netfilter_ipv4/listhelp.h> |
52 | 52 | ||
53 | #define IP_CONNTRACK_VERSION "2.3" | 53 | #define IP_CONNTRACK_VERSION "2.4" |
54 | 54 | ||
55 | #if 0 | 55 | #if 0 |
56 | #define DEBUGP printk | 56 | #define DEBUGP printk |
@@ -148,16 +148,20 @@ DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); | |||
148 | static int ip_conntrack_hash_rnd_initted; | 148 | static int ip_conntrack_hash_rnd_initted; |
149 | static unsigned int ip_conntrack_hash_rnd; | 149 | static unsigned int ip_conntrack_hash_rnd; |
150 | 150 | ||
151 | static u_int32_t | 151 | static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple, |
152 | hash_conntrack(const struct ip_conntrack_tuple *tuple) | 152 | unsigned int size, unsigned int rnd) |
153 | { | 153 | { |
154 | #if 0 | ||
155 | dump_tuple(tuple); | ||
156 | #endif | ||
157 | return (jhash_3words(tuple->src.ip, | 154 | return (jhash_3words(tuple->src.ip, |
158 | (tuple->dst.ip ^ tuple->dst.protonum), | 155 | (tuple->dst.ip ^ tuple->dst.protonum), |
159 | (tuple->src.u.all | (tuple->dst.u.all << 16)), | 156 | (tuple->src.u.all | (tuple->dst.u.all << 16)), |
160 | ip_conntrack_hash_rnd) % ip_conntrack_htable_size); | 157 | rnd) % size); |
158 | } | ||
159 | |||
160 | static u_int32_t | ||
161 | hash_conntrack(const struct ip_conntrack_tuple *tuple) | ||
162 | { | ||
163 | return __hash_conntrack(tuple, ip_conntrack_htable_size, | ||
164 | ip_conntrack_hash_rnd); | ||
161 | } | 165 | } |
162 | 166 | ||
163 | int | 167 | int |
@@ -1341,14 +1345,13 @@ static int kill_all(struct ip_conntrack *i, void *data) | |||
1341 | return 1; | 1345 | return 1; |
1342 | } | 1346 | } |
1343 | 1347 | ||
1344 | static void free_conntrack_hash(void) | 1348 | static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size) |
1345 | { | 1349 | { |
1346 | if (ip_conntrack_vmalloc) | 1350 | if (vmalloced) |
1347 | vfree(ip_conntrack_hash); | 1351 | vfree(hash); |
1348 | else | 1352 | else |
1349 | free_pages((unsigned long)ip_conntrack_hash, | 1353 | free_pages((unsigned long)hash, |
1350 | get_order(sizeof(struct list_head) | 1354 | get_order(sizeof(struct list_head) * size)); |
1351 | * ip_conntrack_htable_size)); | ||
1352 | } | 1355 | } |
1353 | 1356 | ||
1354 | void ip_conntrack_flush() | 1357 | void ip_conntrack_flush() |
@@ -1378,12 +1381,83 @@ void ip_conntrack_cleanup(void) | |||
1378 | ip_conntrack_flush(); | 1381 | ip_conntrack_flush(); |
1379 | kmem_cache_destroy(ip_conntrack_cachep); | 1382 | kmem_cache_destroy(ip_conntrack_cachep); |
1380 | kmem_cache_destroy(ip_conntrack_expect_cachep); | 1383 | kmem_cache_destroy(ip_conntrack_expect_cachep); |
1381 | free_conntrack_hash(); | 1384 | free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, |
1385 | ip_conntrack_htable_size); | ||
1382 | nf_unregister_sockopt(&so_getorigdst); | 1386 | nf_unregister_sockopt(&so_getorigdst); |
1383 | } | 1387 | } |
1384 | 1388 | ||
1385 | static int hashsize; | 1389 | static struct list_head *alloc_hashtable(int size, int *vmalloced) |
1386 | module_param(hashsize, int, 0400); | 1390 | { |
1391 | struct list_head *hash; | ||
1392 | unsigned int i; | ||
1393 | |||
1394 | *vmalloced = 0; | ||
1395 | hash = (void*)__get_free_pages(GFP_KERNEL, | ||
1396 | get_order(sizeof(struct list_head) | ||
1397 | * size)); | ||
1398 | if (!hash) { | ||
1399 | *vmalloced = 1; | ||
1400 | printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n"); | ||
1401 | hash = vmalloc(sizeof(struct list_head) * size); | ||
1402 | } | ||
1403 | |||
1404 | if (hash) | ||
1405 | for (i = 0; i < size; i++) | ||
1406 | INIT_LIST_HEAD(&hash[i]); | ||
1407 | |||
1408 | return hash; | ||
1409 | } | ||
1410 | |||
1411 | int set_hashsize(const char *val, struct kernel_param *kp) | ||
1412 | { | ||
1413 | int i, bucket, hashsize, vmalloced; | ||
1414 | int old_vmalloced, old_size; | ||
1415 | int rnd; | ||
1416 | struct list_head *hash, *old_hash; | ||
1417 | struct ip_conntrack_tuple_hash *h; | ||
1418 | |||
1419 | /* On boot, we can set this without any fancy locking. */ | ||
1420 | if (!ip_conntrack_htable_size) | ||
1421 | return param_set_int(val, kp); | ||
1422 | |||
1423 | hashsize = simple_strtol(val, NULL, 0); | ||
1424 | if (!hashsize) | ||
1425 | return -EINVAL; | ||
1426 | |||
1427 | hash = alloc_hashtable(hashsize, &vmalloced); | ||
1428 | if (!hash) | ||
1429 | return -ENOMEM; | ||
1430 | |||
1431 | /* We have to rehash for the new table anyway, so we also can | ||
1432 | * use a new random seed */ | ||
1433 | get_random_bytes(&rnd, 4); | ||
1434 | |||
1435 | write_lock_bh(&ip_conntrack_lock); | ||
1436 | for (i = 0; i < ip_conntrack_htable_size; i++) { | ||
1437 | while (!list_empty(&ip_conntrack_hash[i])) { | ||
1438 | h = list_entry(ip_conntrack_hash[i].next, | ||
1439 | struct ip_conntrack_tuple_hash, list); | ||
1440 | list_del(&h->list); | ||
1441 | bucket = __hash_conntrack(&h->tuple, hashsize, rnd); | ||
1442 | list_add_tail(&h->list, &hash[bucket]); | ||
1443 | } | ||
1444 | } | ||
1445 | old_size = ip_conntrack_htable_size; | ||
1446 | old_vmalloced = ip_conntrack_vmalloc; | ||
1447 | old_hash = ip_conntrack_hash; | ||
1448 | |||
1449 | ip_conntrack_htable_size = hashsize; | ||
1450 | ip_conntrack_vmalloc = vmalloced; | ||
1451 | ip_conntrack_hash = hash; | ||
1452 | ip_conntrack_hash_rnd = rnd; | ||
1453 | write_unlock_bh(&ip_conntrack_lock); | ||
1454 | |||
1455 | free_conntrack_hash(old_hash, old_vmalloced, old_size); | ||
1456 | return 0; | ||
1457 | } | ||
1458 | |||
1459 | module_param_call(hashsize, set_hashsize, param_get_uint, | ||
1460 | &ip_conntrack_htable_size, 0600); | ||
1387 | 1461 | ||
1388 | int __init ip_conntrack_init(void) | 1462 | int __init ip_conntrack_init(void) |
1389 | { | 1463 | { |
@@ -1392,9 +1466,7 @@ int __init ip_conntrack_init(void) | |||
1392 | 1466 | ||
1393 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB | 1467 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB |
1394 | * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ | 1468 | * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ |
1395 | if (hashsize) { | 1469 | if (!ip_conntrack_htable_size) { |
1396 | ip_conntrack_htable_size = hashsize; | ||
1397 | } else { | ||
1398 | ip_conntrack_htable_size | 1470 | ip_conntrack_htable_size |
1399 | = (((num_physpages << PAGE_SHIFT) / 16384) | 1471 | = (((num_physpages << PAGE_SHIFT) / 16384) |
1400 | / sizeof(struct list_head)); | 1472 | / sizeof(struct list_head)); |
@@ -1416,20 +1488,8 @@ int __init ip_conntrack_init(void) | |||
1416 | return ret; | 1488 | return ret; |
1417 | } | 1489 | } |
1418 | 1490 | ||
1419 | /* AK: the hash table is twice as big than needed because it | 1491 | ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size, |
1420 | uses list_head. it would be much nicer to caches to use a | 1492 | &ip_conntrack_vmalloc); |
1421 | single pointer list head here. */ | ||
1422 | ip_conntrack_vmalloc = 0; | ||
1423 | ip_conntrack_hash | ||
1424 | =(void*)__get_free_pages(GFP_KERNEL, | ||
1425 | get_order(sizeof(struct list_head) | ||
1426 | *ip_conntrack_htable_size)); | ||
1427 | if (!ip_conntrack_hash) { | ||
1428 | ip_conntrack_vmalloc = 1; | ||
1429 | printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n"); | ||
1430 | ip_conntrack_hash = vmalloc(sizeof(struct list_head) | ||
1431 | * ip_conntrack_htable_size); | ||
1432 | } | ||
1433 | if (!ip_conntrack_hash) { | 1493 | if (!ip_conntrack_hash) { |
1434 | printk(KERN_ERR "Unable to create ip_conntrack_hash\n"); | 1494 | printk(KERN_ERR "Unable to create ip_conntrack_hash\n"); |
1435 | goto err_unreg_sockopt; | 1495 | goto err_unreg_sockopt; |
@@ -1461,9 +1521,6 @@ int __init ip_conntrack_init(void) | |||
1461 | ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp; | 1521 | ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp; |
1462 | write_unlock_bh(&ip_conntrack_lock); | 1522 | write_unlock_bh(&ip_conntrack_lock); |
1463 | 1523 | ||
1464 | for (i = 0; i < ip_conntrack_htable_size; i++) | ||
1465 | INIT_LIST_HEAD(&ip_conntrack_hash[i]); | ||
1466 | |||
1467 | /* For use by ipt_REJECT */ | 1524 | /* For use by ipt_REJECT */ |
1468 | ip_ct_attach = ip_conntrack_attach; | 1525 | ip_ct_attach = ip_conntrack_attach; |
1469 | 1526 | ||
@@ -1478,7 +1535,8 @@ int __init ip_conntrack_init(void) | |||
1478 | err_free_conntrack_slab: | 1535 | err_free_conntrack_slab: |
1479 | kmem_cache_destroy(ip_conntrack_cachep); | 1536 | kmem_cache_destroy(ip_conntrack_cachep); |
1480 | err_free_hash: | 1537 | err_free_hash: |
1481 | free_conntrack_hash(); | 1538 | free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, |
1539 | ip_conntrack_htable_size); | ||
1482 | err_unreg_sockopt: | 1540 | err_unreg_sockopt: |
1483 | nf_unregister_sockopt(&so_getorigdst); | 1541 | nf_unregister_sockopt(&so_getorigdst); |
1484 | 1542 | ||