aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 23:07:35 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:41:44 -0400
commit6e04e02165a7209a71db553b7bc48d68421e5ebf (patch)
tree004157924013e6c099cacac59f39d3dd61f3e0e5 /net/ipv4/tcp.c
parent2d8c4ce51903636ce0f60addc8134aa50ab8fa76 (diff)
[INET]: Move tcp_port_rover to inet_hashinfo
Also expose all of the tcp_hashinfo members, i.e. killing those tcp_ehash, etc macros, this will more clearly expose already generic functions and some that need just a bit of work to become generic, as we'll see in the upcoming changesets. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c42
1 files changed, 21 insertions, 21 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 38c04c1a754c..2f4b1a374bb7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2257,11 +2257,11 @@ void __init tcp_init(void)
2257 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), 2257 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2258 sizeof(skb->cb)); 2258 sizeof(skb->cb));
2259 2259
2260 tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", 2260 tcp_hashinfo.bind_bucket_cachep =
2261 sizeof(struct inet_bind_bucket), 2261 kmem_cache_create("tcp_bind_bucket",
2262 0, SLAB_HWCACHE_ALIGN, 2262 sizeof(struct inet_bind_bucket), 0,
2263 NULL, NULL); 2263 SLAB_HWCACHE_ALIGN, NULL, NULL);
2264 if (!tcp_bucket_cachep) 2264 if (!tcp_hashinfo.bind_bucket_cachep)
2265 panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); 2265 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2266 2266
2267 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket", 2267 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
@@ -2276,7 +2276,7 @@ void __init tcp_init(void)
2276 * 2276 *
2277 * The methodology is similar to that of the buffer cache. 2277 * The methodology is similar to that of the buffer cache.
2278 */ 2278 */
2279 tcp_ehash = 2279 tcp_hashinfo.ehash =
2280 alloc_large_system_hash("TCP established", 2280 alloc_large_system_hash("TCP established",
2281 sizeof(struct inet_ehash_bucket), 2281 sizeof(struct inet_ehash_bucket),
2282 thash_entries, 2282 thash_entries,
@@ -2284,37 +2284,37 @@ void __init tcp_init(void)
2284 (25 - PAGE_SHIFT) : 2284 (25 - PAGE_SHIFT) :
2285 (27 - PAGE_SHIFT), 2285 (27 - PAGE_SHIFT),
2286 HASH_HIGHMEM, 2286 HASH_HIGHMEM,
2287 &tcp_ehash_size, 2287 &tcp_hashinfo.ehash_size,
2288 NULL, 2288 NULL,
2289 0); 2289 0);
2290 tcp_ehash_size = (1 << tcp_ehash_size) >> 1; 2290 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2291 for (i = 0; i < (tcp_ehash_size << 1); i++) { 2291 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2292 rwlock_init(&tcp_ehash[i].lock); 2292 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2293 INIT_HLIST_HEAD(&tcp_ehash[i].chain); 2293 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2294 } 2294 }
2295 2295
2296 tcp_bhash = 2296 tcp_hashinfo.bhash =
2297 alloc_large_system_hash("TCP bind", 2297 alloc_large_system_hash("TCP bind",
2298 sizeof(struct inet_bind_hashbucket), 2298 sizeof(struct inet_bind_hashbucket),
2299 tcp_ehash_size, 2299 tcp_hashinfo.ehash_size,
2300 (num_physpages >= 128 * 1024) ? 2300 (num_physpages >= 128 * 1024) ?
2301 (25 - PAGE_SHIFT) : 2301 (25 - PAGE_SHIFT) :
2302 (27 - PAGE_SHIFT), 2302 (27 - PAGE_SHIFT),
2303 HASH_HIGHMEM, 2303 HASH_HIGHMEM,
2304 &tcp_bhash_size, 2304 &tcp_hashinfo.bhash_size,
2305 NULL, 2305 NULL,
2306 64 * 1024); 2306 64 * 1024);
2307 tcp_bhash_size = 1 << tcp_bhash_size; 2307 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2308 for (i = 0; i < tcp_bhash_size; i++) { 2308 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2309 spin_lock_init(&tcp_bhash[i].lock); 2309 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2310 INIT_HLIST_HEAD(&tcp_bhash[i].chain); 2310 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2311 } 2311 }
2312 2312
2313 /* Try to be a bit smarter and adjust defaults depending 2313 /* Try to be a bit smarter and adjust defaults depending
2314 * on available memory. 2314 * on available memory.
2315 */ 2315 */
2316 for (order = 0; ((1 << order) << PAGE_SHIFT) < 2316 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2317 (tcp_bhash_size * sizeof(struct inet_bind_hashbucket)); 2317 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2318 order++) 2318 order++)
2319 ; 2319 ;
2320 if (order >= 4) { 2320 if (order >= 4) {
@@ -2329,7 +2329,7 @@ void __init tcp_init(void)
2329 sysctl_tcp_max_orphans >>= (3 - order); 2329 sysctl_tcp_max_orphans >>= (3 - order);
2330 sysctl_max_syn_backlog = 128; 2330 sysctl_max_syn_backlog = 128;
2331 } 2331 }
2332 tcp_port_rover = sysctl_local_port_range[0] - 1; 2332 tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;
2333 2333
2334 sysctl_tcp_mem[0] = 768 << order; 2334 sysctl_tcp_mem[0] = 768 << order;
2335 sysctl_tcp_mem[1] = 1024 << order; 2335 sysctl_tcp_mem[1] = 1024 << order;
@@ -2344,7 +2344,7 @@ void __init tcp_init(void)
2344 2344
2345 printk(KERN_INFO "TCP: Hash tables configured " 2345 printk(KERN_INFO "TCP: Hash tables configured "
2346 "(established %d bind %d)\n", 2346 "(established %d bind %d)\n",
2347 tcp_ehash_size << 1, tcp_bhash_size); 2347 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
2348 2348
2349 tcp_register_congestion_control(&tcp_reno); 2349 tcp_register_congestion_control(&tcp_reno);
2350} 2350}