aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 23:07:35 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:41:44 -0400
commit6e04e02165a7209a71db553b7bc48d68421e5ebf (patch)
tree004157924013e6c099cacac59f39d3dd61f3e0e5 /net/ipv4
parent2d8c4ce51903636ce0f60addc8134aa50ab8fa76 (diff)
[INET]: Move tcp_port_rover to inet_hashinfo
Also expose all of the tcp_hashinfo members, i.e. killing those tcp_ehash, etc macros, this will more clearly expose already generic functions and some that need just a bit of work to become generic, as we'll see in the upcoming changesets. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp.c42
-rw-r--r--net/ipv4/tcp_diag.c8
-rw-r--r--net/ipv4/tcp_ipv4.c101
-rw-r--r--net/ipv4/tcp_minisocks.c15
4 files changed, 82 insertions, 84 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 38c04c1a754c..2f4b1a374bb7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2257,11 +2257,11 @@ void __init tcp_init(void)
2257 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), 2257 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2258 sizeof(skb->cb)); 2258 sizeof(skb->cb));
2259 2259
2260 tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", 2260 tcp_hashinfo.bind_bucket_cachep =
2261 sizeof(struct inet_bind_bucket), 2261 kmem_cache_create("tcp_bind_bucket",
2262 0, SLAB_HWCACHE_ALIGN, 2262 sizeof(struct inet_bind_bucket), 0,
2263 NULL, NULL); 2263 SLAB_HWCACHE_ALIGN, NULL, NULL);
2264 if (!tcp_bucket_cachep) 2264 if (!tcp_hashinfo.bind_bucket_cachep)
2265 panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); 2265 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2266 2266
2267 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket", 2267 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
@@ -2276,7 +2276,7 @@ void __init tcp_init(void)
2276 * 2276 *
2277 * The methodology is similar to that of the buffer cache. 2277 * The methodology is similar to that of the buffer cache.
2278 */ 2278 */
2279 tcp_ehash = 2279 tcp_hashinfo.ehash =
2280 alloc_large_system_hash("TCP established", 2280 alloc_large_system_hash("TCP established",
2281 sizeof(struct inet_ehash_bucket), 2281 sizeof(struct inet_ehash_bucket),
2282 thash_entries, 2282 thash_entries,
@@ -2284,37 +2284,37 @@ void __init tcp_init(void)
2284 (25 - PAGE_SHIFT) : 2284 (25 - PAGE_SHIFT) :
2285 (27 - PAGE_SHIFT), 2285 (27 - PAGE_SHIFT),
2286 HASH_HIGHMEM, 2286 HASH_HIGHMEM,
2287 &tcp_ehash_size, 2287 &tcp_hashinfo.ehash_size,
2288 NULL, 2288 NULL,
2289 0); 2289 0);
2290 tcp_ehash_size = (1 << tcp_ehash_size) >> 1; 2290 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2291 for (i = 0; i < (tcp_ehash_size << 1); i++) { 2291 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2292 rwlock_init(&tcp_ehash[i].lock); 2292 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2293 INIT_HLIST_HEAD(&tcp_ehash[i].chain); 2293 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2294 } 2294 }
2295 2295
2296 tcp_bhash = 2296 tcp_hashinfo.bhash =
2297 alloc_large_system_hash("TCP bind", 2297 alloc_large_system_hash("TCP bind",
2298 sizeof(struct inet_bind_hashbucket), 2298 sizeof(struct inet_bind_hashbucket),
2299 tcp_ehash_size, 2299 tcp_hashinfo.ehash_size,
2300 (num_physpages >= 128 * 1024) ? 2300 (num_physpages >= 128 * 1024) ?
2301 (25 - PAGE_SHIFT) : 2301 (25 - PAGE_SHIFT) :
2302 (27 - PAGE_SHIFT), 2302 (27 - PAGE_SHIFT),
2303 HASH_HIGHMEM, 2303 HASH_HIGHMEM,
2304 &tcp_bhash_size, 2304 &tcp_hashinfo.bhash_size,
2305 NULL, 2305 NULL,
2306 64 * 1024); 2306 64 * 1024);
2307 tcp_bhash_size = 1 << tcp_bhash_size; 2307 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2308 for (i = 0; i < tcp_bhash_size; i++) { 2308 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2309 spin_lock_init(&tcp_bhash[i].lock); 2309 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2310 INIT_HLIST_HEAD(&tcp_bhash[i].chain); 2310 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2311 } 2311 }
2312 2312
2313 /* Try to be a bit smarter and adjust defaults depending 2313 /* Try to be a bit smarter and adjust defaults depending
2314 * on available memory. 2314 * on available memory.
2315 */ 2315 */
2316 for (order = 0; ((1 << order) << PAGE_SHIFT) < 2316 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2317 (tcp_bhash_size * sizeof(struct inet_bind_hashbucket)); 2317 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2318 order++) 2318 order++)
2319 ; 2319 ;
2320 if (order >= 4) { 2320 if (order >= 4) {
@@ -2329,7 +2329,7 @@ void __init tcp_init(void)
2329 sysctl_tcp_max_orphans >>= (3 - order); 2329 sysctl_tcp_max_orphans >>= (3 - order);
2330 sysctl_max_syn_backlog = 128; 2330 sysctl_max_syn_backlog = 128;
2331 } 2331 }
2332 tcp_port_rover = sysctl_local_port_range[0] - 1; 2332 tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;
2333 2333
2334 sysctl_tcp_mem[0] = 768 << order; 2334 sysctl_tcp_mem[0] = 768 << order;
2335 sysctl_tcp_mem[1] = 1024 << order; 2335 sysctl_tcp_mem[1] = 1024 << order;
@@ -2344,7 +2344,7 @@ void __init tcp_init(void)
2344 2344
2345 printk(KERN_INFO "TCP: Hash tables configured " 2345 printk(KERN_INFO "TCP: Hash tables configured "
2346 "(established %d bind %d)\n", 2346 "(established %d bind %d)\n",
2347 tcp_ehash_size << 1, tcp_bhash_size); 2347 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
2348 2348
2349 tcp_register_congestion_control(&tcp_reno); 2349 tcp_register_congestion_control(&tcp_reno);
2350} 2350}
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 5bb6a0f1c77b..0ae738b455f0 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -595,7 +595,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
595 struct hlist_node *node; 595 struct hlist_node *node;
596 596
597 num = 0; 597 num = 0;
598 sk_for_each(sk, node, &tcp_listening_hash[i]) { 598 sk_for_each(sk, node, &tcp_hashinfo.listening_hash[i]) {
599 struct inet_sock *inet = inet_sk(sk); 599 struct inet_sock *inet = inet_sk(sk);
600 600
601 if (num < s_num) { 601 if (num < s_num) {
@@ -645,8 +645,8 @@ skip_listen_ht:
645 if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV))) 645 if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV)))
646 return skb->len; 646 return skb->len;
647 647
648 for (i = s_i; i < tcp_ehash_size; i++) { 648 for (i = s_i; i < tcp_hashinfo.ehash_size; i++) {
649 struct inet_ehash_bucket *head = &tcp_ehash[i]; 649 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[i];
650 struct sock *sk; 650 struct sock *sk;
651 struct hlist_node *node; 651 struct hlist_node *node;
652 652
@@ -678,7 +678,7 @@ next_normal:
678 678
679 if (r->tcpdiag_states&TCPF_TIME_WAIT) { 679 if (r->tcpdiag_states&TCPF_TIME_WAIT) {
680 sk_for_each(sk, node, 680 sk_for_each(sk, node,
681 &tcp_ehash[i + tcp_ehash_size].chain) { 681 &tcp_hashinfo.ehash[i + tcp_hashinfo.ehash_size].chain) {
682 struct inet_sock *inet = inet_sk(sk); 682 struct inet_sock *inet = inet_sk(sk);
683 683
684 if (num < s_num) 684 if (num < s_num)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 40fe4f5fca1c..f5373f9f00ac 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -94,6 +94,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
94 .lhash_users = ATOMIC_INIT(0), 94 .lhash_users = ATOMIC_INIT(0),
95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), 95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
96 .portalloc_lock = SPIN_LOCK_UNLOCKED, 96 .portalloc_lock = SPIN_LOCK_UNLOCKED,
97 .port_rover = 1024 - 1,
97}; 98};
98 99
99/* 100/*
@@ -102,7 +103,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
102 * 32768-61000 103 * 32768-61000
103 */ 104 */
104int sysctl_local_port_range[2] = { 1024, 4999 }; 105int sysctl_local_port_range[2] = { 1024, 4999 };
105int tcp_port_rover = 1024 - 1;
106 106
107static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) 107static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
108{ 108{
@@ -146,16 +146,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
146 int remaining = (high - low) + 1; 146 int remaining = (high - low) + 1;
147 int rover; 147 int rover;
148 148
149 spin_lock(&tcp_portalloc_lock); 149 spin_lock(&tcp_hashinfo.portalloc_lock);
150 if (tcp_port_rover < low) 150 if (tcp_hashinfo.port_rover < low)
151 rover = low; 151 rover = low;
152 else 152 else
153 rover = tcp_port_rover; 153 rover = tcp_hashinfo.port_rover;
154 do { 154 do {
155 rover++; 155 rover++;
156 if (rover > high) 156 if (rover > high)
157 rover = low; 157 rover = low;
158 head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; 158 head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
159 spin_lock(&head->lock); 159 spin_lock(&head->lock);
160 inet_bind_bucket_for_each(tb, node, &head->chain) 160 inet_bind_bucket_for_each(tb, node, &head->chain)
161 if (tb->port == rover) 161 if (tb->port == rover)
@@ -164,8 +164,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
164 next: 164 next:
165 spin_unlock(&head->lock); 165 spin_unlock(&head->lock);
166 } while (--remaining > 0); 166 } while (--remaining > 0);
167 tcp_port_rover = rover; 167 tcp_hashinfo.port_rover = rover;
168 spin_unlock(&tcp_portalloc_lock); 168 spin_unlock(&tcp_hashinfo.portalloc_lock);
169 169
170 /* Exhausted local port range during search? It is not 170 /* Exhausted local port range during search? It is not
171 * possible for us to be holding one of the bind hash 171 * possible for us to be holding one of the bind hash
@@ -182,7 +182,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
182 */ 182 */
183 snum = rover; 183 snum = rover;
184 } else { 184 } else {
185 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; 185 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
186 spin_lock(&head->lock); 186 spin_lock(&head->lock);
187 inet_bind_bucket_for_each(tb, node, &head->chain) 187 inet_bind_bucket_for_each(tb, node, &head->chain)
188 if (tb->port == snum) 188 if (tb->port == snum)
@@ -205,7 +205,7 @@ tb_found:
205 } 205 }
206tb_not_found: 206tb_not_found:
207 ret = 1; 207 ret = 1;
208 if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) 208 if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL)
209 goto fail_unlock; 209 goto fail_unlock;
210 if (hlist_empty(&tb->owners)) { 210 if (hlist_empty(&tb->owners)) {
211 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) 211 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
@@ -237,22 +237,22 @@ fail:
237 237
238void tcp_listen_wlock(void) 238void tcp_listen_wlock(void)
239{ 239{
240 write_lock(&tcp_lhash_lock); 240 write_lock(&tcp_hashinfo.lhash_lock);
241 241
242 if (atomic_read(&tcp_lhash_users)) { 242 if (atomic_read(&tcp_hashinfo.lhash_users)) {
243 DEFINE_WAIT(wait); 243 DEFINE_WAIT(wait);
244 244
245 for (;;) { 245 for (;;) {
246 prepare_to_wait_exclusive(&tcp_lhash_wait, 246 prepare_to_wait_exclusive(&tcp_hashinfo.lhash_wait,
247 &wait, TASK_UNINTERRUPTIBLE); 247 &wait, TASK_UNINTERRUPTIBLE);
248 if (!atomic_read(&tcp_lhash_users)) 248 if (!atomic_read(&tcp_hashinfo.lhash_users))
249 break; 249 break;
250 write_unlock_bh(&tcp_lhash_lock); 250 write_unlock_bh(&tcp_hashinfo.lhash_lock);
251 schedule(); 251 schedule();
252 write_lock_bh(&tcp_lhash_lock); 252 write_lock_bh(&tcp_hashinfo.lhash_lock);
253 } 253 }
254 254
255 finish_wait(&tcp_lhash_wait, &wait); 255 finish_wait(&tcp_hashinfo.lhash_wait, &wait);
256 } 256 }
257} 257}
258 258
@@ -263,20 +263,20 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
263 263
264 BUG_TRAP(sk_unhashed(sk)); 264 BUG_TRAP(sk_unhashed(sk));
265 if (listen_possible && sk->sk_state == TCP_LISTEN) { 265 if (listen_possible && sk->sk_state == TCP_LISTEN) {
266 list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; 266 list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
267 lock = &tcp_lhash_lock; 267 lock = &tcp_hashinfo.lhash_lock;
268 tcp_listen_wlock(); 268 tcp_listen_wlock();
269 } else { 269 } else {
270 sk->sk_hashent = inet_sk_ehashfn(sk, tcp_ehash_size); 270 sk->sk_hashent = inet_sk_ehashfn(sk, tcp_hashinfo.ehash_size);
271 list = &tcp_ehash[sk->sk_hashent].chain; 271 list = &tcp_hashinfo.ehash[sk->sk_hashent].chain;
272 lock = &tcp_ehash[sk->sk_hashent].lock; 272 lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock;
273 write_lock(lock); 273 write_lock(lock);
274 } 274 }
275 __sk_add_node(sk, list); 275 __sk_add_node(sk, list);
276 sock_prot_inc_use(sk->sk_prot); 276 sock_prot_inc_use(sk->sk_prot);
277 write_unlock(lock); 277 write_unlock(lock);
278 if (listen_possible && sk->sk_state == TCP_LISTEN) 278 if (listen_possible && sk->sk_state == TCP_LISTEN)
279 wake_up(&tcp_lhash_wait); 279 wake_up(&tcp_hashinfo.lhash_wait);
280} 280}
281 281
282static void tcp_v4_hash(struct sock *sk) 282static void tcp_v4_hash(struct sock *sk)
@@ -298,9 +298,9 @@ void tcp_unhash(struct sock *sk)
298 if (sk->sk_state == TCP_LISTEN) { 298 if (sk->sk_state == TCP_LISTEN) {
299 local_bh_disable(); 299 local_bh_disable();
300 tcp_listen_wlock(); 300 tcp_listen_wlock();
301 lock = &tcp_lhash_lock; 301 lock = &tcp_hashinfo.lhash_lock;
302 } else { 302 } else {
303 struct inet_ehash_bucket *head = &tcp_ehash[sk->sk_hashent]; 303 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[sk->sk_hashent];
304 lock = &head->lock; 304 lock = &head->lock;
305 write_lock_bh(&head->lock); 305 write_lock_bh(&head->lock);
306 } 306 }
@@ -311,7 +311,7 @@ void tcp_unhash(struct sock *sk)
311 311
312 ende: 312 ende:
313 if (sk->sk_state == TCP_LISTEN) 313 if (sk->sk_state == TCP_LISTEN)
314 wake_up(&tcp_lhash_wait); 314 wake_up(&tcp_hashinfo.lhash_wait);
315} 315}
316 316
317/* Don't inline this cruft. Here are some nice properties to 317/* Don't inline this cruft. Here are some nice properties to
@@ -366,8 +366,8 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
366 struct sock *sk = NULL; 366 struct sock *sk = NULL;
367 struct hlist_head *head; 367 struct hlist_head *head;
368 368
369 read_lock(&tcp_lhash_lock); 369 read_lock(&tcp_hashinfo.lhash_lock);
370 head = &tcp_listening_hash[inet_lhashfn(hnum)]; 370 head = &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)];
371 if (!hlist_empty(head)) { 371 if (!hlist_empty(head)) {
372 struct inet_sock *inet = inet_sk((sk = __sk_head(head))); 372 struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
373 373
@@ -382,7 +382,7 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
382sherry_cache: 382sherry_cache:
383 sock_hold(sk); 383 sock_hold(sk);
384 } 384 }
385 read_unlock(&tcp_lhash_lock); 385 read_unlock(&tcp_hashinfo.lhash_lock);
386 return sk; 386 return sk;
387} 387}
388 388
@@ -406,8 +406,8 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
406 /* Optimize here for direct hit, only listening connections can 406 /* Optimize here for direct hit, only listening connections can
407 * have wildcards anyways. 407 * have wildcards anyways.
408 */ 408 */
409 const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_ehash_size); 409 const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_hashinfo.ehash_size);
410 head = &tcp_ehash[hash]; 410 head = &tcp_hashinfo.ehash[hash];
411 read_lock(&head->lock); 411 read_lock(&head->lock);
412 sk_for_each(sk, node, &head->chain) { 412 sk_for_each(sk, node, &head->chain) {
413 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) 413 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
@@ -415,7 +415,7 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
415 } 415 }
416 416
417 /* Must check for a TIME_WAIT'er before going to listener hash. */ 417 /* Must check for a TIME_WAIT'er before going to listener hash. */
418 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) { 418 sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
419 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) 419 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
420 goto hit; 420 goto hit;
421 } 421 }
@@ -469,8 +469,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
469 int dif = sk->sk_bound_dev_if; 469 int dif = sk->sk_bound_dev_if;
470 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) 470 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
471 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); 471 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
472 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size); 472 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
473 struct inet_ehash_bucket *head = &tcp_ehash[hash]; 473 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
474 struct sock *sk2; 474 struct sock *sk2;
475 struct hlist_node *node; 475 struct hlist_node *node;
476 struct tcp_tw_bucket *tw; 476 struct tcp_tw_bucket *tw;
@@ -478,7 +478,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
478 write_lock(&head->lock); 478 write_lock(&head->lock);
479 479
480 /* Check TIME-WAIT sockets first. */ 480 /* Check TIME-WAIT sockets first. */
481 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { 481 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
482 tw = (struct tcp_tw_bucket *)sk2; 482 tw = (struct tcp_tw_bucket *)sk2;
483 483
484 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { 484 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
@@ -582,7 +582,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
582 local_bh_disable(); 582 local_bh_disable();
583 for (i = 1; i <= range; i++) { 583 for (i = 1; i <= range; i++) {
584 port = low + (i + offset) % range; 584 port = low + (i + offset) % range;
585 head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; 585 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
586 spin_lock(&head->lock); 586 spin_lock(&head->lock);
587 587
588 /* Does not bother with rcv_saddr checks, 588 /* Does not bother with rcv_saddr checks,
@@ -602,7 +602,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
602 } 602 }
603 } 603 }
604 604
605 tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); 605 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
606 if (!tb) { 606 if (!tb) {
607 spin_unlock(&head->lock); 607 spin_unlock(&head->lock);
608 break; 608 break;
@@ -637,7 +637,7 @@ ok:
637 goto out; 637 goto out;
638 } 638 }
639 639
640 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; 640 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
641 tb = inet_sk(sk)->bind_hash; 641 tb = inet_sk(sk)->bind_hash;
642 spin_lock_bh(&head->lock); 642 spin_lock_bh(&head->lock);
643 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 643 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
@@ -1926,7 +1926,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
1926 1926
1927 if (!sk) { 1927 if (!sk) {
1928 st->bucket = 0; 1928 st->bucket = 0;
1929 sk = sk_head(&tcp_listening_hash[0]); 1929 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1930 goto get_sk; 1930 goto get_sk;
1931 } 1931 }
1932 1932
@@ -1980,7 +1980,7 @@ start_req:
1980 read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1980 read_unlock_bh(&tp->accept_queue.syn_wait_lock);
1981 } 1981 }
1982 if (++st->bucket < INET_LHTABLE_SIZE) { 1982 if (++st->bucket < INET_LHTABLE_SIZE) {
1983 sk = sk_head(&tcp_listening_hash[st->bucket]); 1983 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1984 goto get_sk; 1984 goto get_sk;
1985 } 1985 }
1986 cur = NULL; 1986 cur = NULL;
@@ -2004,7 +2004,7 @@ static void *established_get_first(struct seq_file *seq)
2004 struct tcp_iter_state* st = seq->private; 2004 struct tcp_iter_state* st = seq->private;
2005 void *rc = NULL; 2005 void *rc = NULL;
2006 2006
2007 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) { 2007 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
2008 struct sock *sk; 2008 struct sock *sk;
2009 struct hlist_node *node; 2009 struct hlist_node *node;
2010 struct tcp_tw_bucket *tw; 2010 struct tcp_tw_bucket *tw;
@@ -2012,8 +2012,8 @@ static void *established_get_first(struct seq_file *seq)
2012 /* We can reschedule _before_ having picked the target: */ 2012 /* We can reschedule _before_ having picked the target: */
2013 cond_resched_softirq(); 2013 cond_resched_softirq();
2014 2014
2015 read_lock(&tcp_ehash[st->bucket].lock); 2015 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
2016 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) { 2016 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2017 if (sk->sk_family != st->family) { 2017 if (sk->sk_family != st->family) {
2018 continue; 2018 continue;
2019 } 2019 }
@@ -2022,14 +2022,14 @@ static void *established_get_first(struct seq_file *seq)
2022 } 2022 }
2023 st->state = TCP_SEQ_STATE_TIME_WAIT; 2023 st->state = TCP_SEQ_STATE_TIME_WAIT;
2024 tw_for_each(tw, node, 2024 tw_for_each(tw, node,
2025 &tcp_ehash[st->bucket + tcp_ehash_size].chain) { 2025 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
2026 if (tw->tw_family != st->family) { 2026 if (tw->tw_family != st->family) {
2027 continue; 2027 continue;
2028 } 2028 }
2029 rc = tw; 2029 rc = tw;
2030 goto out; 2030 goto out;
2031 } 2031 }
2032 read_unlock(&tcp_ehash[st->bucket].lock); 2032 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
2033 st->state = TCP_SEQ_STATE_ESTABLISHED; 2033 st->state = TCP_SEQ_STATE_ESTABLISHED;
2034 } 2034 }
2035out: 2035out:
@@ -2056,15 +2056,15 @@ get_tw:
2056 cur = tw; 2056 cur = tw;
2057 goto out; 2057 goto out;
2058 } 2058 }
2059 read_unlock(&tcp_ehash[st->bucket].lock); 2059 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
2060 st->state = TCP_SEQ_STATE_ESTABLISHED; 2060 st->state = TCP_SEQ_STATE_ESTABLISHED;
2061 2061
2062 /* We can reschedule between buckets: */ 2062 /* We can reschedule between buckets: */
2063 cond_resched_softirq(); 2063 cond_resched_softirq();
2064 2064
2065 if (++st->bucket < tcp_ehash_size) { 2065 if (++st->bucket < tcp_hashinfo.ehash_size) {
2066 read_lock(&tcp_ehash[st->bucket].lock); 2066 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
2067 sk = sk_head(&tcp_ehash[st->bucket].chain); 2067 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2068 } else { 2068 } else {
2069 cur = NULL; 2069 cur = NULL;
2070 goto out; 2070 goto out;
@@ -2078,7 +2078,7 @@ get_tw:
2078 } 2078 }
2079 2079
2080 st->state = TCP_SEQ_STATE_TIME_WAIT; 2080 st->state = TCP_SEQ_STATE_TIME_WAIT;
2081 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain); 2081 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
2082 goto get_tw; 2082 goto get_tw;
2083found: 2083found:
2084 cur = sk; 2084 cur = sk;
@@ -2173,7 +2173,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2173 case TCP_SEQ_STATE_TIME_WAIT: 2173 case TCP_SEQ_STATE_TIME_WAIT:
2174 case TCP_SEQ_STATE_ESTABLISHED: 2174 case TCP_SEQ_STATE_ESTABLISHED:
2175 if (v) 2175 if (v)
2176 read_unlock(&tcp_ehash[st->bucket].lock); 2176 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
2177 local_bh_enable(); 2177 local_bh_enable();
2178 break; 2178 break;
2179 } 2179 }
@@ -2432,7 +2432,6 @@ EXPORT_SYMBOL(ipv4_specific);
2432EXPORT_SYMBOL(inet_bind_bucket_create); 2432EXPORT_SYMBOL(inet_bind_bucket_create);
2433EXPORT_SYMBOL(tcp_hashinfo); 2433EXPORT_SYMBOL(tcp_hashinfo);
2434EXPORT_SYMBOL(tcp_listen_wlock); 2434EXPORT_SYMBOL(tcp_listen_wlock);
2435EXPORT_SYMBOL(tcp_port_rover);
2436EXPORT_SYMBOL(tcp_prot); 2435EXPORT_SYMBOL(tcp_prot);
2437EXPORT_SYMBOL(tcp_unhash); 2436EXPORT_SYMBOL(tcp_unhash);
2438EXPORT_SYMBOL(tcp_v4_conn_request); 2437EXPORT_SYMBOL(tcp_v4_conn_request);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 267cea1087e5..f29e2f6ebe1b 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -60,12 +60,11 @@ int tcp_tw_count;
60/* Must be called with locally disabled BHs. */ 60/* Must be called with locally disabled BHs. */
61static void tcp_timewait_kill(struct tcp_tw_bucket *tw) 61static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
62{ 62{
63 struct inet_ehash_bucket *ehead;
64 struct inet_bind_hashbucket *bhead; 63 struct inet_bind_hashbucket *bhead;
65 struct inet_bind_bucket *tb; 64 struct inet_bind_bucket *tb;
66
67 /* Unlink from established hashes. */ 65 /* Unlink from established hashes. */
68 ehead = &tcp_ehash[tw->tw_hashent]; 66 struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[tw->tw_hashent];
67
69 write_lock(&ehead->lock); 68 write_lock(&ehead->lock);
70 if (hlist_unhashed(&tw->tw_node)) { 69 if (hlist_unhashed(&tw->tw_node)) {
71 write_unlock(&ehead->lock); 70 write_unlock(&ehead->lock);
@@ -76,12 +75,12 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
76 write_unlock(&ehead->lock); 75 write_unlock(&ehead->lock);
77 76
78 /* Disassociate with bind bucket. */ 77 /* Disassociate with bind bucket. */
79 bhead = &tcp_bhash[inet_bhashfn(tw->tw_num, tcp_bhash_size)]; 78 bhead = &tcp_hashinfo.bhash[inet_bhashfn(tw->tw_num, tcp_hashinfo.bhash_size)];
80 spin_lock(&bhead->lock); 79 spin_lock(&bhead->lock);
81 tb = tw->tw_tb; 80 tb = tw->tw_tb;
82 __hlist_del(&tw->tw_bind_node); 81 __hlist_del(&tw->tw_bind_node);
83 tw->tw_tb = NULL; 82 tw->tw_tb = NULL;
84 inet_bind_bucket_destroy(tcp_bucket_cachep, tb); 83 inet_bind_bucket_destroy(tcp_hashinfo.bind_bucket_cachep, tb);
85 spin_unlock(&bhead->lock); 84 spin_unlock(&bhead->lock);
86 85
87#ifdef SOCK_REFCNT_DEBUG 86#ifdef SOCK_REFCNT_DEBUG
@@ -297,13 +296,13 @@ kill:
297static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) 296static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
298{ 297{
299 const struct inet_sock *inet = inet_sk(sk); 298 const struct inet_sock *inet = inet_sk(sk);
300 struct inet_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent]; 299 struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent];
301 struct inet_bind_hashbucket *bhead; 300 struct inet_bind_hashbucket *bhead;
302 /* Step 1: Put TW into bind hash. Original socket stays there too. 301 /* Step 1: Put TW into bind hash. Original socket stays there too.
303 Note, that any socket with inet->num != 0 MUST be bound in 302 Note, that any socket with inet->num != 0 MUST be bound in
304 binding cache, even if it is closed. 303 binding cache, even if it is closed.
305 */ 304 */
306 bhead = &tcp_bhash[inet_bhashfn(inet->num, tcp_bhash_size)]; 305 bhead = &tcp_hashinfo.bhash[inet_bhashfn(inet->num, tcp_hashinfo.bhash_size)];
307 spin_lock(&bhead->lock); 306 spin_lock(&bhead->lock);
308 tw->tw_tb = inet->bind_hash; 307 tw->tw_tb = inet->bind_hash;
309 BUG_TRAP(inet->bind_hash); 308 BUG_TRAP(inet->bind_hash);
@@ -317,7 +316,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
317 sock_prot_dec_use(sk->sk_prot); 316 sock_prot_dec_use(sk->sk_prot);
318 317
319 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ 318 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
320 tw_add_node(tw, &(ehead + tcp_ehash_size)->chain); 319 tw_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain);
321 atomic_inc(&tw->tw_refcnt); 320 atomic_inc(&tw->tw_refcnt);
322 321
323 write_unlock(&ehead->lock); 322 write_unlock(&ehead->lock);