diff options
| -rw-r--r-- | include/net/inet_hashtables.h | 1 | ||||
| -rw-r--r-- | include/net/sock.h | 2 | ||||
| -rw-r--r-- | include/net/tcp.h | 26 | ||||
| -rw-r--r-- | net/ipv4/tcp.c | 42 | ||||
| -rw-r--r-- | net/ipv4/tcp_diag.c | 8 | ||||
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 101 | ||||
| -rw-r--r-- | net/ipv4/tcp_minisocks.c | 15 | ||||
| -rw-r--r-- | net/ipv6/tcp_ipv6.c | 51 | 
8 files changed, 118 insertions, 128 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index da9705525f15..da07411b36d2 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h  | |||
| @@ -117,6 +117,7 @@ struct inet_hashinfo { | |||
| 117 | wait_queue_head_t lhash_wait; | 117 | wait_queue_head_t lhash_wait; | 
| 118 | spinlock_t portalloc_lock; | 118 | spinlock_t portalloc_lock; | 
| 119 | kmem_cache_t *bind_bucket_cachep; | 119 | kmem_cache_t *bind_bucket_cachep; | 
| 120 | int port_rover; | ||
| 120 | }; | 121 | }; | 
| 121 | 122 | ||
| 122 | static inline int inet_ehashfn(const __u32 laddr, const __u16 lport, | 123 | static inline int inet_ehashfn(const __u32 laddr, const __u16 lport, | 
diff --git a/include/net/sock.h b/include/net/sock.h index 69d869e41c35..391d00b5b7b4 100644 --- a/include/net/sock.h +++ b/include/net/sock.h  | |||
| @@ -136,7 +136,7 @@ struct sock_common { | |||
| 136 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets | 136 | * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets | 
| 137 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) | 137 | * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) | 
| 138 | * @sk_lingertime: %SO_LINGER l_linger setting | 138 | * @sk_lingertime: %SO_LINGER l_linger setting | 
| 139 | * @sk_hashent: hash entry in several tables (e.g. tcp_ehash) | 139 | * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash) | 
| 140 | * @sk_backlog: always used with the per-socket spinlock held | 140 | * @sk_backlog: always used with the per-socket spinlock held | 
| 141 | * @sk_callback_lock: used with the callbacks in the end of this struct | 141 | * @sk_callback_lock: used with the callbacks in the end of this struct | 
| 142 | * @sk_error_queue: rarely used | 142 | * @sk_error_queue: rarely used | 
diff --git a/include/net/tcp.h b/include/net/tcp.h index 9eb8ff7c911e..99e47695d4b6 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h  | |||
| @@ -41,19 +41,7 @@ | |||
| 41 | #endif | 41 | #endif | 
| 42 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> | 
| 43 | 43 | ||
| 44 | extern struct inet_hashinfo tcp_hashinfo; | 44 | extern struct inet_hashinfo tcp_hashinfo; | 
| 45 | #define tcp_ehash (tcp_hashinfo.ehash) | ||
| 46 | #define tcp_bhash (tcp_hashinfo.bhash) | ||
| 47 | #define tcp_ehash_size (tcp_hashinfo.ehash_size) | ||
| 48 | #define tcp_bhash_size (tcp_hashinfo.bhash_size) | ||
| 49 | #define tcp_listening_hash (tcp_hashinfo.listening_hash) | ||
| 50 | #define tcp_lhash_lock (tcp_hashinfo.lhash_lock) | ||
| 51 | #define tcp_lhash_users (tcp_hashinfo.lhash_users) | ||
| 52 | #define tcp_lhash_wait (tcp_hashinfo.lhash_wait) | ||
| 53 | #define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock) | ||
| 54 | #define tcp_bucket_cachep (tcp_hashinfo.bind_bucket_cachep) | ||
| 55 | |||
| 56 | extern int tcp_port_rover; | ||
| 57 | 45 | ||
| 58 | #if (BITS_PER_LONG == 64) | 46 | #if (BITS_PER_LONG == 64) | 
| 59 | #define TCP_ADDRCMP_ALIGN_BYTES 8 | 47 | #define TCP_ADDRCMP_ALIGN_BYTES 8 | 
| @@ -1463,21 +1451,21 @@ extern void tcp_listen_wlock(void); | |||
| 1463 | 1451 | ||
| 1464 | /* - We may sleep inside this lock. | 1452 | /* - We may sleep inside this lock. | 
| 1465 | * - If sleeping is not required (or called from BH), | 1453 | * - If sleeping is not required (or called from BH), | 
| 1466 | * use plain read_(un)lock(&tcp_lhash_lock). | 1454 | * use plain read_(un)lock(&inet_hashinfo.lhash_lock). | 
| 1467 | */ | 1455 | */ | 
| 1468 | 1456 | ||
| 1469 | static inline void tcp_listen_lock(void) | 1457 | static inline void tcp_listen_lock(void) | 
| 1470 | { | 1458 | { | 
| 1471 | /* read_lock synchronizes to candidates to writers */ | 1459 | /* read_lock synchronizes to candidates to writers */ | 
| 1472 | read_lock(&tcp_lhash_lock); | 1460 | read_lock(&tcp_hashinfo.lhash_lock); | 
| 1473 | atomic_inc(&tcp_lhash_users); | 1461 | atomic_inc(&tcp_hashinfo.lhash_users); | 
| 1474 | read_unlock(&tcp_lhash_lock); | 1462 | read_unlock(&tcp_hashinfo.lhash_lock); | 
| 1475 | } | 1463 | } | 
| 1476 | 1464 | ||
| 1477 | static inline void tcp_listen_unlock(void) | 1465 | static inline void tcp_listen_unlock(void) | 
| 1478 | { | 1466 | { | 
| 1479 | if (atomic_dec_and_test(&tcp_lhash_users)) | 1467 | if (atomic_dec_and_test(&tcp_hashinfo.lhash_users)) | 
| 1480 | wake_up(&tcp_lhash_wait); | 1468 | wake_up(&tcp_hashinfo.lhash_wait); | 
| 1481 | } | 1469 | } | 
| 1482 | 1470 | ||
| 1483 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) | 1471 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) | 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 38c04c1a754c..2f4b1a374bb7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c  | |||
| @@ -2257,11 +2257,11 @@ void __init tcp_init(void) | |||
| 2257 | __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), | 2257 | __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), | 
| 2258 | sizeof(skb->cb)); | 2258 | sizeof(skb->cb)); | 
| 2259 | 2259 | ||
| 2260 | tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", | 2260 | tcp_hashinfo.bind_bucket_cachep = | 
| 2261 | sizeof(struct inet_bind_bucket), | 2261 | kmem_cache_create("tcp_bind_bucket", | 
| 2262 | 0, SLAB_HWCACHE_ALIGN, | 2262 | sizeof(struct inet_bind_bucket), 0, | 
| 2263 | NULL, NULL); | 2263 | SLAB_HWCACHE_ALIGN, NULL, NULL); | 
| 2264 | if (!tcp_bucket_cachep) | 2264 | if (!tcp_hashinfo.bind_bucket_cachep) | 
| 2265 | panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); | 2265 | panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); | 
| 2266 | 2266 | ||
| 2267 | tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket", | 2267 | tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket", | 
| @@ -2276,7 +2276,7 @@ void __init tcp_init(void) | |||
| 2276 | * | 2276 | * | 
| 2277 | * The methodology is similar to that of the buffer cache. | 2277 | * The methodology is similar to that of the buffer cache. | 
| 2278 | */ | 2278 | */ | 
| 2279 | tcp_ehash = | 2279 | tcp_hashinfo.ehash = | 
| 2280 | alloc_large_system_hash("TCP established", | 2280 | alloc_large_system_hash("TCP established", | 
| 2281 | sizeof(struct inet_ehash_bucket), | 2281 | sizeof(struct inet_ehash_bucket), | 
| 2282 | thash_entries, | 2282 | thash_entries, | 
| @@ -2284,37 +2284,37 @@ void __init tcp_init(void) | |||
| 2284 | (25 - PAGE_SHIFT) : | 2284 | (25 - PAGE_SHIFT) : | 
| 2285 | (27 - PAGE_SHIFT), | 2285 | (27 - PAGE_SHIFT), | 
| 2286 | HASH_HIGHMEM, | 2286 | HASH_HIGHMEM, | 
| 2287 | &tcp_ehash_size, | 2287 | &tcp_hashinfo.ehash_size, | 
| 2288 | NULL, | 2288 | NULL, | 
| 2289 | 0); | 2289 | 0); | 
| 2290 | tcp_ehash_size = (1 << tcp_ehash_size) >> 1; | 2290 | tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1; | 
| 2291 | for (i = 0; i < (tcp_ehash_size << 1); i++) { | 2291 | for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) { | 
| 2292 | rwlock_init(&tcp_ehash[i].lock); | 2292 | rwlock_init(&tcp_hashinfo.ehash[i].lock); | 
| 2293 | INIT_HLIST_HEAD(&tcp_ehash[i].chain); | 2293 | INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain); | 
| 2294 | } | 2294 | } | 
| 2295 | 2295 | ||
| 2296 | tcp_bhash = | 2296 | tcp_hashinfo.bhash = | 
| 2297 | alloc_large_system_hash("TCP bind", | 2297 | alloc_large_system_hash("TCP bind", | 
| 2298 | sizeof(struct inet_bind_hashbucket), | 2298 | sizeof(struct inet_bind_hashbucket), | 
| 2299 | tcp_ehash_size, | 2299 | tcp_hashinfo.ehash_size, | 
| 2300 | (num_physpages >= 128 * 1024) ? | 2300 | (num_physpages >= 128 * 1024) ? | 
| 2301 | (25 - PAGE_SHIFT) : | 2301 | (25 - PAGE_SHIFT) : | 
| 2302 | (27 - PAGE_SHIFT), | 2302 | (27 - PAGE_SHIFT), | 
| 2303 | HASH_HIGHMEM, | 2303 | HASH_HIGHMEM, | 
| 2304 | &tcp_bhash_size, | 2304 | &tcp_hashinfo.bhash_size, | 
| 2305 | NULL, | 2305 | NULL, | 
| 2306 | 64 * 1024); | 2306 | 64 * 1024); | 
| 2307 | tcp_bhash_size = 1 << tcp_bhash_size; | 2307 | tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; | 
| 2308 | for (i = 0; i < tcp_bhash_size; i++) { | 2308 | for (i = 0; i < tcp_hashinfo.bhash_size; i++) { | 
| 2309 | spin_lock_init(&tcp_bhash[i].lock); | 2309 | spin_lock_init(&tcp_hashinfo.bhash[i].lock); | 
| 2310 | INIT_HLIST_HEAD(&tcp_bhash[i].chain); | 2310 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); | 
| 2311 | } | 2311 | } | 
| 2312 | 2312 | ||
| 2313 | /* Try to be a bit smarter and adjust defaults depending | 2313 | /* Try to be a bit smarter and adjust defaults depending | 
| 2314 | * on available memory. | 2314 | * on available memory. | 
| 2315 | */ | 2315 | */ | 
| 2316 | for (order = 0; ((1 << order) << PAGE_SHIFT) < | 2316 | for (order = 0; ((1 << order) << PAGE_SHIFT) < | 
| 2317 | (tcp_bhash_size * sizeof(struct inet_bind_hashbucket)); | 2317 | (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); | 
| 2318 | order++) | 2318 | order++) | 
| 2319 | ; | 2319 | ; | 
| 2320 | if (order >= 4) { | 2320 | if (order >= 4) { | 
| @@ -2329,7 +2329,7 @@ void __init tcp_init(void) | |||
| 2329 | sysctl_tcp_max_orphans >>= (3 - order); | 2329 | sysctl_tcp_max_orphans >>= (3 - order); | 
| 2330 | sysctl_max_syn_backlog = 128; | 2330 | sysctl_max_syn_backlog = 128; | 
| 2331 | } | 2331 | } | 
| 2332 | tcp_port_rover = sysctl_local_port_range[0] - 1; | 2332 | tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1; | 
| 2333 | 2333 | ||
| 2334 | sysctl_tcp_mem[0] = 768 << order; | 2334 | sysctl_tcp_mem[0] = 768 << order; | 
| 2335 | sysctl_tcp_mem[1] = 1024 << order; | 2335 | sysctl_tcp_mem[1] = 1024 << order; | 
| @@ -2344,7 +2344,7 @@ void __init tcp_init(void) | |||
| 2344 | 2344 | ||
| 2345 | printk(KERN_INFO "TCP: Hash tables configured " | 2345 | printk(KERN_INFO "TCP: Hash tables configured " | 
| 2346 | "(established %d bind %d)\n", | 2346 | "(established %d bind %d)\n", | 
| 2347 | tcp_ehash_size << 1, tcp_bhash_size); | 2347 | tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size); | 
| 2348 | 2348 | ||
| 2349 | tcp_register_congestion_control(&tcp_reno); | 2349 | tcp_register_congestion_control(&tcp_reno); | 
| 2350 | } | 2350 | } | 
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 5bb6a0f1c77b..0ae738b455f0 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c  | |||
| @@ -595,7 +595,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 595 | struct hlist_node *node; | 595 | struct hlist_node *node; | 
| 596 | 596 | ||
| 597 | num = 0; | 597 | num = 0; | 
| 598 | sk_for_each(sk, node, &tcp_listening_hash[i]) { | 598 | sk_for_each(sk, node, &tcp_hashinfo.listening_hash[i]) { | 
| 599 | struct inet_sock *inet = inet_sk(sk); | 599 | struct inet_sock *inet = inet_sk(sk); | 
| 600 | 600 | ||
| 601 | if (num < s_num) { | 601 | if (num < s_num) { | 
| @@ -645,8 +645,8 @@ skip_listen_ht: | |||
| 645 | if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV))) | 645 | if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV))) | 
| 646 | return skb->len; | 646 | return skb->len; | 
| 647 | 647 | ||
| 648 | for (i = s_i; i < tcp_ehash_size; i++) { | 648 | for (i = s_i; i < tcp_hashinfo.ehash_size; i++) { | 
| 649 | struct inet_ehash_bucket *head = &tcp_ehash[i]; | 649 | struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[i]; | 
| 650 | struct sock *sk; | 650 | struct sock *sk; | 
| 651 | struct hlist_node *node; | 651 | struct hlist_node *node; | 
| 652 | 652 | ||
| @@ -678,7 +678,7 @@ next_normal: | |||
| 678 | 678 | ||
| 679 | if (r->tcpdiag_states&TCPF_TIME_WAIT) { | 679 | if (r->tcpdiag_states&TCPF_TIME_WAIT) { | 
| 680 | sk_for_each(sk, node, | 680 | sk_for_each(sk, node, | 
| 681 | &tcp_ehash[i + tcp_ehash_size].chain) { | 681 | &tcp_hashinfo.ehash[i + tcp_hashinfo.ehash_size].chain) { | 
| 682 | struct inet_sock *inet = inet_sk(sk); | 682 | struct inet_sock *inet = inet_sk(sk); | 
| 683 | 683 | ||
| 684 | if (num < s_num) | 684 | if (num < s_num) | 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 40fe4f5fca1c..f5373f9f00ac 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c  | |||
| @@ -94,6 +94,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | |||
| 94 | .lhash_users = ATOMIC_INIT(0), | 94 | .lhash_users = ATOMIC_INIT(0), | 
| 95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), | 95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), | 
| 96 | .portalloc_lock = SPIN_LOCK_UNLOCKED, | 96 | .portalloc_lock = SPIN_LOCK_UNLOCKED, | 
| 97 | .port_rover = 1024 - 1, | ||
| 97 | }; | 98 | }; | 
| 98 | 99 | ||
| 99 | /* | 100 | /* | 
| @@ -102,7 +103,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | |||
| 102 | * 32768-61000 | 103 | * 32768-61000 | 
| 103 | */ | 104 | */ | 
| 104 | int sysctl_local_port_range[2] = { 1024, 4999 }; | 105 | int sysctl_local_port_range[2] = { 1024, 4999 }; | 
| 105 | int tcp_port_rover = 1024 - 1; | ||
| 106 | 106 | ||
| 107 | static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) | 107 | static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) | 
| 108 | { | 108 | { | 
| @@ -146,16 +146,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
| 146 | int remaining = (high - low) + 1; | 146 | int remaining = (high - low) + 1; | 
| 147 | int rover; | 147 | int rover; | 
| 148 | 148 | ||
| 149 | spin_lock(&tcp_portalloc_lock); | 149 | spin_lock(&tcp_hashinfo.portalloc_lock); | 
| 150 | if (tcp_port_rover < low) | 150 | if (tcp_hashinfo.port_rover < low) | 
| 151 | rover = low; | 151 | rover = low; | 
| 152 | else | 152 | else | 
| 153 | rover = tcp_port_rover; | 153 | rover = tcp_hashinfo.port_rover; | 
| 154 | do { | 154 | do { | 
| 155 | rover++; | 155 | rover++; | 
| 156 | if (rover > high) | 156 | if (rover > high) | 
| 157 | rover = low; | 157 | rover = low; | 
| 158 | head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; | 158 | head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)]; | 
| 159 | spin_lock(&head->lock); | 159 | spin_lock(&head->lock); | 
| 160 | inet_bind_bucket_for_each(tb, node, &head->chain) | 160 | inet_bind_bucket_for_each(tb, node, &head->chain) | 
| 161 | if (tb->port == rover) | 161 | if (tb->port == rover) | 
| @@ -164,8 +164,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
| 164 | next: | 164 | next: | 
| 165 | spin_unlock(&head->lock); | 165 | spin_unlock(&head->lock); | 
| 166 | } while (--remaining > 0); | 166 | } while (--remaining > 0); | 
| 167 | tcp_port_rover = rover; | 167 | tcp_hashinfo.port_rover = rover; | 
| 168 | spin_unlock(&tcp_portalloc_lock); | 168 | spin_unlock(&tcp_hashinfo.portalloc_lock); | 
| 169 | 169 | ||
| 170 | /* Exhausted local port range during search? It is not | 170 | /* Exhausted local port range during search? It is not | 
| 171 | * possible for us to be holding one of the bind hash | 171 | * possible for us to be holding one of the bind hash | 
| @@ -182,7 +182,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
| 182 | */ | 182 | */ | 
| 183 | snum = rover; | 183 | snum = rover; | 
| 184 | } else { | 184 | } else { | 
| 185 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; | 185 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | 
| 186 | spin_lock(&head->lock); | 186 | spin_lock(&head->lock); | 
| 187 | inet_bind_bucket_for_each(tb, node, &head->chain) | 187 | inet_bind_bucket_for_each(tb, node, &head->chain) | 
| 188 | if (tb->port == snum) | 188 | if (tb->port == snum) | 
| @@ -205,7 +205,7 @@ tb_found: | |||
| 205 | } | 205 | } | 
| 206 | tb_not_found: | 206 | tb_not_found: | 
| 207 | ret = 1; | 207 | ret = 1; | 
| 208 | if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) | 208 | if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL) | 
| 209 | goto fail_unlock; | 209 | goto fail_unlock; | 
| 210 | if (hlist_empty(&tb->owners)) { | 210 | if (hlist_empty(&tb->owners)) { | 
| 211 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 211 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 
| @@ -237,22 +237,22 @@ fail: | |||
| 237 | 237 | ||
| 238 | void tcp_listen_wlock(void) | 238 | void tcp_listen_wlock(void) | 
| 239 | { | 239 | { | 
| 240 | write_lock(&tcp_lhash_lock); | 240 | write_lock(&tcp_hashinfo.lhash_lock); | 
| 241 | 241 | ||
| 242 | if (atomic_read(&tcp_lhash_users)) { | 242 | if (atomic_read(&tcp_hashinfo.lhash_users)) { | 
| 243 | DEFINE_WAIT(wait); | 243 | DEFINE_WAIT(wait); | 
| 244 | 244 | ||
| 245 | for (;;) { | 245 | for (;;) { | 
| 246 | prepare_to_wait_exclusive(&tcp_lhash_wait, | 246 | prepare_to_wait_exclusive(&tcp_hashinfo.lhash_wait, | 
| 247 | &wait, TASK_UNINTERRUPTIBLE); | 247 | &wait, TASK_UNINTERRUPTIBLE); | 
| 248 | if (!atomic_read(&tcp_lhash_users)) | 248 | if (!atomic_read(&tcp_hashinfo.lhash_users)) | 
| 249 | break; | 249 | break; | 
| 250 | write_unlock_bh(&tcp_lhash_lock); | 250 | write_unlock_bh(&tcp_hashinfo.lhash_lock); | 
| 251 | schedule(); | 251 | schedule(); | 
| 252 | write_lock_bh(&tcp_lhash_lock); | 252 | write_lock_bh(&tcp_hashinfo.lhash_lock); | 
| 253 | } | 253 | } | 
| 254 | 254 | ||
| 255 | finish_wait(&tcp_lhash_wait, &wait); | 255 | finish_wait(&tcp_hashinfo.lhash_wait, &wait); | 
| 256 | } | 256 | } | 
| 257 | } | 257 | } | 
| 258 | 258 | ||
| @@ -263,20 +263,20 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible) | |||
| 263 | 263 | ||
| 264 | BUG_TRAP(sk_unhashed(sk)); | 264 | BUG_TRAP(sk_unhashed(sk)); | 
| 265 | if (listen_possible && sk->sk_state == TCP_LISTEN) { | 265 | if (listen_possible && sk->sk_state == TCP_LISTEN) { | 
| 266 | list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; | 266 | list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)]; | 
| 267 | lock = &tcp_lhash_lock; | 267 | lock = &tcp_hashinfo.lhash_lock; | 
| 268 | tcp_listen_wlock(); | 268 | tcp_listen_wlock(); | 
| 269 | } else { | 269 | } else { | 
| 270 | sk->sk_hashent = inet_sk_ehashfn(sk, tcp_ehash_size); | 270 | sk->sk_hashent = inet_sk_ehashfn(sk, tcp_hashinfo.ehash_size); | 
| 271 | list = &tcp_ehash[sk->sk_hashent].chain; | 271 | list = &tcp_hashinfo.ehash[sk->sk_hashent].chain; | 
| 272 | lock = &tcp_ehash[sk->sk_hashent].lock; | 272 | lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock; | 
| 273 | write_lock(lock); | 273 | write_lock(lock); | 
| 274 | } | 274 | } | 
| 275 | __sk_add_node(sk, list); | 275 | __sk_add_node(sk, list); | 
| 276 | sock_prot_inc_use(sk->sk_prot); | 276 | sock_prot_inc_use(sk->sk_prot); | 
| 277 | write_unlock(lock); | 277 | write_unlock(lock); | 
| 278 | if (listen_possible && sk->sk_state == TCP_LISTEN) | 278 | if (listen_possible && sk->sk_state == TCP_LISTEN) | 
| 279 | wake_up(&tcp_lhash_wait); | 279 | wake_up(&tcp_hashinfo.lhash_wait); | 
| 280 | } | 280 | } | 
| 281 | 281 | ||
| 282 | static void tcp_v4_hash(struct sock *sk) | 282 | static void tcp_v4_hash(struct sock *sk) | 
| @@ -298,9 +298,9 @@ void tcp_unhash(struct sock *sk) | |||
| 298 | if (sk->sk_state == TCP_LISTEN) { | 298 | if (sk->sk_state == TCP_LISTEN) { | 
| 299 | local_bh_disable(); | 299 | local_bh_disable(); | 
| 300 | tcp_listen_wlock(); | 300 | tcp_listen_wlock(); | 
| 301 | lock = &tcp_lhash_lock; | 301 | lock = &tcp_hashinfo.lhash_lock; | 
| 302 | } else { | 302 | } else { | 
| 303 | struct inet_ehash_bucket *head = &tcp_ehash[sk->sk_hashent]; | 303 | struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[sk->sk_hashent]; | 
| 304 | lock = &head->lock; | 304 | lock = &head->lock; | 
| 305 | write_lock_bh(&head->lock); | 305 | write_lock_bh(&head->lock); | 
| 306 | } | 306 | } | 
| @@ -311,7 +311,7 @@ void tcp_unhash(struct sock *sk) | |||
| 311 | 311 | ||
| 312 | ende: | 312 | ende: | 
| 313 | if (sk->sk_state == TCP_LISTEN) | 313 | if (sk->sk_state == TCP_LISTEN) | 
| 314 | wake_up(&tcp_lhash_wait); | 314 | wake_up(&tcp_hashinfo.lhash_wait); | 
| 315 | } | 315 | } | 
| 316 | 316 | ||
| 317 | /* Don't inline this cruft. Here are some nice properties to | 317 | /* Don't inline this cruft. Here are some nice properties to | 
| @@ -366,8 +366,8 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr, | |||
| 366 | struct sock *sk = NULL; | 366 | struct sock *sk = NULL; | 
| 367 | struct hlist_head *head; | 367 | struct hlist_head *head; | 
| 368 | 368 | ||
| 369 | read_lock(&tcp_lhash_lock); | 369 | read_lock(&tcp_hashinfo.lhash_lock); | 
| 370 | head = &tcp_listening_hash[inet_lhashfn(hnum)]; | 370 | head = &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)]; | 
| 371 | if (!hlist_empty(head)) { | 371 | if (!hlist_empty(head)) { | 
| 372 | struct inet_sock *inet = inet_sk((sk = __sk_head(head))); | 372 | struct inet_sock *inet = inet_sk((sk = __sk_head(head))); | 
| 373 | 373 | ||
| @@ -382,7 +382,7 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr, | |||
| 382 | sherry_cache: | 382 | sherry_cache: | 
| 383 | sock_hold(sk); | 383 | sock_hold(sk); | 
| 384 | } | 384 | } | 
| 385 | read_unlock(&tcp_lhash_lock); | 385 | read_unlock(&tcp_hashinfo.lhash_lock); | 
| 386 | return sk; | 386 | return sk; | 
| 387 | } | 387 | } | 
| 388 | 388 | ||
| @@ -406,8 +406,8 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr, | |||
| 406 | /* Optimize here for direct hit, only listening connections can | 406 | /* Optimize here for direct hit, only listening connections can | 
| 407 | * have wildcards anyways. | 407 | * have wildcards anyways. | 
| 408 | */ | 408 | */ | 
| 409 | const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_ehash_size); | 409 | const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_hashinfo.ehash_size); | 
| 410 | head = &tcp_ehash[hash]; | 410 | head = &tcp_hashinfo.ehash[hash]; | 
| 411 | read_lock(&head->lock); | 411 | read_lock(&head->lock); | 
| 412 | sk_for_each(sk, node, &head->chain) { | 412 | sk_for_each(sk, node, &head->chain) { | 
| 413 | if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) | 413 | if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) | 
| @@ -415,7 +415,7 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr, | |||
| 415 | } | 415 | } | 
| 416 | 416 | ||
| 417 | /* Must check for a TIME_WAIT'er before going to listener hash. */ | 417 | /* Must check for a TIME_WAIT'er before going to listener hash. */ | 
| 418 | sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) { | 418 | sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) { | 
| 419 | if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) | 419 | if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) | 
| 420 | goto hit; | 420 | goto hit; | 
| 421 | } | 421 | } | 
| @@ -469,8 +469,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, | |||
| 469 | int dif = sk->sk_bound_dev_if; | 469 | int dif = sk->sk_bound_dev_if; | 
| 470 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) | 470 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) | 
| 471 | __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); | 471 | __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); | 
| 472 | const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size); | 472 | const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size); | 
| 473 | struct inet_ehash_bucket *head = &tcp_ehash[hash]; | 473 | struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; | 
| 474 | struct sock *sk2; | 474 | struct sock *sk2; | 
| 475 | struct hlist_node *node; | 475 | struct hlist_node *node; | 
| 476 | struct tcp_tw_bucket *tw; | 476 | struct tcp_tw_bucket *tw; | 
| @@ -478,7 +478,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, | |||
| 478 | write_lock(&head->lock); | 478 | write_lock(&head->lock); | 
| 479 | 479 | ||
| 480 | /* Check TIME-WAIT sockets first. */ | 480 | /* Check TIME-WAIT sockets first. */ | 
| 481 | sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { | 481 | sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { | 
| 482 | tw = (struct tcp_tw_bucket *)sk2; | 482 | tw = (struct tcp_tw_bucket *)sk2; | 
| 483 | 483 | ||
| 484 | if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { | 484 | if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { | 
| @@ -582,7 +582,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk) | |||
| 582 | local_bh_disable(); | 582 | local_bh_disable(); | 
| 583 | for (i = 1; i <= range; i++) { | 583 | for (i = 1; i <= range; i++) { | 
| 584 | port = low + (i + offset) % range; | 584 | port = low + (i + offset) % range; | 
| 585 | head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; | 585 | head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)]; | 
| 586 | spin_lock(&head->lock); | 586 | spin_lock(&head->lock); | 
| 587 | 587 | ||
| 588 | /* Does not bother with rcv_saddr checks, | 588 | /* Does not bother with rcv_saddr checks, | 
| @@ -602,7 +602,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk) | |||
| 602 | } | 602 | } | 
| 603 | } | 603 | } | 
| 604 | 604 | ||
| 605 | tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); | 605 | tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port); | 
| 606 | if (!tb) { | 606 | if (!tb) { | 
| 607 | spin_unlock(&head->lock); | 607 | spin_unlock(&head->lock); | 
| 608 | break; | 608 | break; | 
| @@ -637,7 +637,7 @@ ok: | |||
| 637 | goto out; | 637 | goto out; | 
| 638 | } | 638 | } | 
| 639 | 639 | ||
| 640 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; | 640 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | 
| 641 | tb = inet_sk(sk)->bind_hash; | 641 | tb = inet_sk(sk)->bind_hash; | 
| 642 | spin_lock_bh(&head->lock); | 642 | spin_lock_bh(&head->lock); | 
| 643 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 643 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 
| @@ -1926,7 +1926,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
| 1926 | 1926 | ||
| 1927 | if (!sk) { | 1927 | if (!sk) { | 
| 1928 | st->bucket = 0; | 1928 | st->bucket = 0; | 
| 1929 | sk = sk_head(&tcp_listening_hash[0]); | 1929 | sk = sk_head(&tcp_hashinfo.listening_hash[0]); | 
| 1930 | goto get_sk; | 1930 | goto get_sk; | 
| 1931 | } | 1931 | } | 
| 1932 | 1932 | ||
| @@ -1980,7 +1980,7 @@ start_req: | |||
| 1980 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 1980 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 
| 1981 | } | 1981 | } | 
| 1982 | if (++st->bucket < INET_LHTABLE_SIZE) { | 1982 | if (++st->bucket < INET_LHTABLE_SIZE) { | 
| 1983 | sk = sk_head(&tcp_listening_hash[st->bucket]); | 1983 | sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]); | 
| 1984 | goto get_sk; | 1984 | goto get_sk; | 
| 1985 | } | 1985 | } | 
| 1986 | cur = NULL; | 1986 | cur = NULL; | 
| @@ -2004,7 +2004,7 @@ static void *established_get_first(struct seq_file *seq) | |||
| 2004 | struct tcp_iter_state* st = seq->private; | 2004 | struct tcp_iter_state* st = seq->private; | 
| 2005 | void *rc = NULL; | 2005 | void *rc = NULL; | 
| 2006 | 2006 | ||
| 2007 | for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) { | 2007 | for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) { | 
| 2008 | struct sock *sk; | 2008 | struct sock *sk; | 
| 2009 | struct hlist_node *node; | 2009 | struct hlist_node *node; | 
| 2010 | struct tcp_tw_bucket *tw; | 2010 | struct tcp_tw_bucket *tw; | 
| @@ -2012,8 +2012,8 @@ static void *established_get_first(struct seq_file *seq) | |||
| 2012 | /* We can reschedule _before_ having picked the target: */ | 2012 | /* We can reschedule _before_ having picked the target: */ | 
| 2013 | cond_resched_softirq(); | 2013 | cond_resched_softirq(); | 
| 2014 | 2014 | ||
| 2015 | read_lock(&tcp_ehash[st->bucket].lock); | 2015 | read_lock(&tcp_hashinfo.ehash[st->bucket].lock); | 
| 2016 | sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) { | 2016 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { | 
| 2017 | if (sk->sk_family != st->family) { | 2017 | if (sk->sk_family != st->family) { | 
| 2018 | continue; | 2018 | continue; | 
| 2019 | } | 2019 | } | 
| @@ -2022,14 +2022,14 @@ static void *established_get_first(struct seq_file *seq) | |||
| 2022 | } | 2022 | } | 
| 2023 | st->state = TCP_SEQ_STATE_TIME_WAIT; | 2023 | st->state = TCP_SEQ_STATE_TIME_WAIT; | 
| 2024 | tw_for_each(tw, node, | 2024 | tw_for_each(tw, node, | 
| 2025 | &tcp_ehash[st->bucket + tcp_ehash_size].chain) { | 2025 | &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) { | 
| 2026 | if (tw->tw_family != st->family) { | 2026 | if (tw->tw_family != st->family) { | 
| 2027 | continue; | 2027 | continue; | 
| 2028 | } | 2028 | } | 
| 2029 | rc = tw; | 2029 | rc = tw; | 
| 2030 | goto out; | 2030 | goto out; | 
| 2031 | } | 2031 | } | 
| 2032 | read_unlock(&tcp_ehash[st->bucket].lock); | 2032 | read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); | 
| 2033 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2033 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 
| 2034 | } | 2034 | } | 
| 2035 | out: | 2035 | out: | 
| @@ -2056,15 +2056,15 @@ get_tw: | |||
| 2056 | cur = tw; | 2056 | cur = tw; | 
| 2057 | goto out; | 2057 | goto out; | 
| 2058 | } | 2058 | } | 
| 2059 | read_unlock(&tcp_ehash[st->bucket].lock); | 2059 | read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); | 
| 2060 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2060 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 
| 2061 | 2061 | ||
| 2062 | /* We can reschedule between buckets: */ | 2062 | /* We can reschedule between buckets: */ | 
| 2063 | cond_resched_softirq(); | 2063 | cond_resched_softirq(); | 
| 2064 | 2064 | ||
| 2065 | if (++st->bucket < tcp_ehash_size) { | 2065 | if (++st->bucket < tcp_hashinfo.ehash_size) { | 
| 2066 | read_lock(&tcp_ehash[st->bucket].lock); | 2066 | read_lock(&tcp_hashinfo.ehash[st->bucket].lock); | 
| 2067 | sk = sk_head(&tcp_ehash[st->bucket].chain); | 2067 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); | 
| 2068 | } else { | 2068 | } else { | 
| 2069 | cur = NULL; | 2069 | cur = NULL; | 
| 2070 | goto out; | 2070 | goto out; | 
| @@ -2078,7 +2078,7 @@ get_tw: | |||
| 2078 | } | 2078 | } | 
| 2079 | 2079 | ||
| 2080 | st->state = TCP_SEQ_STATE_TIME_WAIT; | 2080 | st->state = TCP_SEQ_STATE_TIME_WAIT; | 
| 2081 | tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain); | 2081 | tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain); | 
| 2082 | goto get_tw; | 2082 | goto get_tw; | 
| 2083 | found: | 2083 | found: | 
| 2084 | cur = sk; | 2084 | cur = sk; | 
| @@ -2173,7 +2173,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) | |||
| 2173 | case TCP_SEQ_STATE_TIME_WAIT: | 2173 | case TCP_SEQ_STATE_TIME_WAIT: | 
| 2174 | case TCP_SEQ_STATE_ESTABLISHED: | 2174 | case TCP_SEQ_STATE_ESTABLISHED: | 
| 2175 | if (v) | 2175 | if (v) | 
| 2176 | read_unlock(&tcp_ehash[st->bucket].lock); | 2176 | read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); | 
| 2177 | local_bh_enable(); | 2177 | local_bh_enable(); | 
| 2178 | break; | 2178 | break; | 
| 2179 | } | 2179 | } | 
| @@ -2432,7 +2432,6 @@ EXPORT_SYMBOL(ipv4_specific); | |||
| 2432 | EXPORT_SYMBOL(inet_bind_bucket_create); | 2432 | EXPORT_SYMBOL(inet_bind_bucket_create); | 
| 2433 | EXPORT_SYMBOL(tcp_hashinfo); | 2433 | EXPORT_SYMBOL(tcp_hashinfo); | 
| 2434 | EXPORT_SYMBOL(tcp_listen_wlock); | 2434 | EXPORT_SYMBOL(tcp_listen_wlock); | 
| 2435 | EXPORT_SYMBOL(tcp_port_rover); | ||
| 2436 | EXPORT_SYMBOL(tcp_prot); | 2435 | EXPORT_SYMBOL(tcp_prot); | 
| 2437 | EXPORT_SYMBOL(tcp_unhash); | 2436 | EXPORT_SYMBOL(tcp_unhash); | 
| 2438 | EXPORT_SYMBOL(tcp_v4_conn_request); | 2437 | EXPORT_SYMBOL(tcp_v4_conn_request); | 
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 267cea1087e5..f29e2f6ebe1b 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c  | |||
| @@ -60,12 +60,11 @@ int tcp_tw_count; | |||
| 60 | /* Must be called with locally disabled BHs. */ | 60 | /* Must be called with locally disabled BHs. */ | 
| 61 | static void tcp_timewait_kill(struct tcp_tw_bucket *tw) | 61 | static void tcp_timewait_kill(struct tcp_tw_bucket *tw) | 
| 62 | { | 62 | { | 
| 63 | struct inet_ehash_bucket *ehead; | ||
| 64 | struct inet_bind_hashbucket *bhead; | 63 | struct inet_bind_hashbucket *bhead; | 
| 65 | struct inet_bind_bucket *tb; | 64 | struct inet_bind_bucket *tb; | 
| 66 | |||
| 67 | /* Unlink from established hashes. */ | 65 | /* Unlink from established hashes. */ | 
| 68 | ehead = &tcp_ehash[tw->tw_hashent]; | 66 | struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[tw->tw_hashent]; | 
| 67 | |||
| 69 | write_lock(&ehead->lock); | 68 | write_lock(&ehead->lock); | 
| 70 | if (hlist_unhashed(&tw->tw_node)) { | 69 | if (hlist_unhashed(&tw->tw_node)) { | 
| 71 | write_unlock(&ehead->lock); | 70 | write_unlock(&ehead->lock); | 
| @@ -76,12 +75,12 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw) | |||
| 76 | write_unlock(&ehead->lock); | 75 | write_unlock(&ehead->lock); | 
| 77 | 76 | ||
| 78 | /* Disassociate with bind bucket. */ | 77 | /* Disassociate with bind bucket. */ | 
| 79 | bhead = &tcp_bhash[inet_bhashfn(tw->tw_num, tcp_bhash_size)]; | 78 | bhead = &tcp_hashinfo.bhash[inet_bhashfn(tw->tw_num, tcp_hashinfo.bhash_size)]; | 
| 80 | spin_lock(&bhead->lock); | 79 | spin_lock(&bhead->lock); | 
| 81 | tb = tw->tw_tb; | 80 | tb = tw->tw_tb; | 
| 82 | __hlist_del(&tw->tw_bind_node); | 81 | __hlist_del(&tw->tw_bind_node); | 
| 83 | tw->tw_tb = NULL; | 82 | tw->tw_tb = NULL; | 
| 84 | inet_bind_bucket_destroy(tcp_bucket_cachep, tb); | 83 | inet_bind_bucket_destroy(tcp_hashinfo.bind_bucket_cachep, tb); | 
| 85 | spin_unlock(&bhead->lock); | 84 | spin_unlock(&bhead->lock); | 
| 86 | 85 | ||
| 87 | #ifdef SOCK_REFCNT_DEBUG | 86 | #ifdef SOCK_REFCNT_DEBUG | 
| @@ -297,13 +296,13 @@ kill: | |||
| 297 | static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) | 296 | static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) | 
| 298 | { | 297 | { | 
| 299 | const struct inet_sock *inet = inet_sk(sk); | 298 | const struct inet_sock *inet = inet_sk(sk); | 
| 300 | struct inet_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent]; | 299 | struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent]; | 
| 301 | struct inet_bind_hashbucket *bhead; | 300 | struct inet_bind_hashbucket *bhead; | 
| 302 | /* Step 1: Put TW into bind hash. Original socket stays there too. | 301 | /* Step 1: Put TW into bind hash. Original socket stays there too. | 
| 303 | Note, that any socket with inet->num != 0 MUST be bound in | 302 | Note, that any socket with inet->num != 0 MUST be bound in | 
| 304 | binding cache, even if it is closed. | 303 | binding cache, even if it is closed. | 
| 305 | */ | 304 | */ | 
| 306 | bhead = &tcp_bhash[inet_bhashfn(inet->num, tcp_bhash_size)]; | 305 | bhead = &tcp_hashinfo.bhash[inet_bhashfn(inet->num, tcp_hashinfo.bhash_size)]; | 
| 307 | spin_lock(&bhead->lock); | 306 | spin_lock(&bhead->lock); | 
| 308 | tw->tw_tb = inet->bind_hash; | 307 | tw->tw_tb = inet->bind_hash; | 
| 309 | BUG_TRAP(inet->bind_hash); | 308 | BUG_TRAP(inet->bind_hash); | 
| @@ -317,7 +316,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) | |||
| 317 | sock_prot_dec_use(sk->sk_prot); | 316 | sock_prot_dec_use(sk->sk_prot); | 
| 318 | 317 | ||
| 319 | /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ | 318 | /* Step 3: Hash TW into TIMEWAIT half of established hash table. */ | 
| 320 | tw_add_node(tw, &(ehead + tcp_ehash_size)->chain); | 319 | tw_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain); | 
| 321 | atomic_inc(&tw->tw_refcnt); | 320 | atomic_inc(&tw->tw_refcnt); | 
| 322 | 321 | ||
| 323 | write_unlock(&ehead->lock); | 322 | write_unlock(&ehead->lock); | 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index bfbedb56bce2..362ef5a64062 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c  | |||
| @@ -84,7 +84,7 @@ static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport, | |||
| 84 | hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); | 84 | hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); | 
| 85 | hashent ^= hashent>>16; | 85 | hashent ^= hashent>>16; | 
| 86 | hashent ^= hashent>>8; | 86 | hashent ^= hashent>>8; | 
| 87 | return (hashent & (tcp_ehash_size - 1)); | 87 | return (hashent & (tcp_hashinfo.ehash_size - 1)); | 
| 88 | } | 88 | } | 
| 89 | 89 | ||
| 90 | static __inline__ int tcp_v6_sk_hashfn(struct sock *sk) | 90 | static __inline__ int tcp_v6_sk_hashfn(struct sock *sk) | 
| @@ -138,15 +138,15 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
| 138 | int remaining = (high - low) + 1; | 138 | int remaining = (high - low) + 1; | 
| 139 | int rover; | 139 | int rover; | 
| 140 | 140 | ||
| 141 | spin_lock(&tcp_portalloc_lock); | 141 | spin_lock(&tcp_hashinfo.portalloc_lock); | 
| 142 | if (tcp_port_rover < low) | 142 | if (tcp_hashinfo.port_rover < low) | 
| 143 | rover = low; | 143 | rover = low; | 
| 144 | else | 144 | else | 
| 145 | rover = tcp_port_rover; | 145 | rover = tcp_hashinfo.port_rover; | 
| 146 | do { rover++; | 146 | do { rover++; | 
| 147 | if (rover > high) | 147 | if (rover > high) | 
| 148 | rover = low; | 148 | rover = low; | 
| 149 | head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; | 149 | head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)]; | 
| 150 | spin_lock(&head->lock); | 150 | spin_lock(&head->lock); | 
| 151 | inet_bind_bucket_for_each(tb, node, &head->chain) | 151 | inet_bind_bucket_for_each(tb, node, &head->chain) | 
| 152 | if (tb->port == rover) | 152 | if (tb->port == rover) | 
| @@ -155,8 +155,8 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
| 155 | next: | 155 | next: | 
| 156 | spin_unlock(&head->lock); | 156 | spin_unlock(&head->lock); | 
| 157 | } while (--remaining > 0); | 157 | } while (--remaining > 0); | 
| 158 | tcp_port_rover = rover; | 158 | tcp_hashinfo.port_rover = rover; | 
| 159 | spin_unlock(&tcp_portalloc_lock); | 159 | spin_unlock(&tcp_hashinfo.portalloc_lock); | 
| 160 | 160 | ||
| 161 | /* Exhausted local port range during search? It is not | 161 | /* Exhausted local port range during search? It is not | 
| 162 | * possible for us to be holding one of the bind hash | 162 | * possible for us to be holding one of the bind hash | 
| @@ -171,7 +171,7 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
| 171 | /* OK, here is the one we will use. */ | 171 | /* OK, here is the one we will use. */ | 
| 172 | snum = rover; | 172 | snum = rover; | 
| 173 | } else { | 173 | } else { | 
| 174 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; | 174 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | 
| 175 | spin_lock(&head->lock); | 175 | spin_lock(&head->lock); | 
| 176 | inet_bind_bucket_for_each(tb, node, &head->chain) | 176 | inet_bind_bucket_for_each(tb, node, &head->chain) | 
| 177 | if (tb->port == snum) | 177 | if (tb->port == snum) | 
| @@ -192,8 +192,11 @@ tb_found: | |||
| 192 | } | 192 | } | 
| 193 | tb_not_found: | 193 | tb_not_found: | 
| 194 | ret = 1; | 194 | ret = 1; | 
| 195 | if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) | 195 | if (tb == NULL) { | 
| 196 | goto fail_unlock; | 196 | tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum); | 
| 197 | if (tb == NULL) | ||
| 198 | goto fail_unlock; | ||
| 199 | } | ||
| 197 | if (hlist_empty(&tb->owners)) { | 200 | if (hlist_empty(&tb->owners)) { | 
| 198 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 201 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 
| 199 | tb->fastreuse = 1; | 202 | tb->fastreuse = 1; | 
| @@ -224,13 +227,13 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) | |||
| 224 | BUG_TRAP(sk_unhashed(sk)); | 227 | BUG_TRAP(sk_unhashed(sk)); | 
| 225 | 228 | ||
| 226 | if (sk->sk_state == TCP_LISTEN) { | 229 | if (sk->sk_state == TCP_LISTEN) { | 
| 227 | list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; | 230 | list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)]; | 
| 228 | lock = &tcp_lhash_lock; | 231 | lock = &tcp_hashinfo.lhash_lock; | 
| 229 | tcp_listen_wlock(); | 232 | tcp_listen_wlock(); | 
| 230 | } else { | 233 | } else { | 
| 231 | sk->sk_hashent = tcp_v6_sk_hashfn(sk); | 234 | sk->sk_hashent = tcp_v6_sk_hashfn(sk); | 
| 232 | list = &tcp_ehash[sk->sk_hashent].chain; | 235 | list = &tcp_hashinfo.ehash[sk->sk_hashent].chain; | 
| 233 | lock = &tcp_ehash[sk->sk_hashent].lock; | 236 | lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock; | 
| 234 | write_lock(lock); | 237 | write_lock(lock); | 
| 235 | } | 238 | } | 
| 236 | 239 | ||
| @@ -263,8 +266,8 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor | |||
| 263 | int score, hiscore; | 266 | int score, hiscore; | 
| 264 | 267 | ||
| 265 | hiscore=0; | 268 | hiscore=0; | 
| 266 | read_lock(&tcp_lhash_lock); | 269 | read_lock(&tcp_hashinfo.lhash_lock); | 
| 267 | sk_for_each(sk, node, &tcp_listening_hash[inet_lhashfn(hnum)]) { | 270 | sk_for_each(sk, node, &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)]) { | 
| 268 | if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { | 271 | if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { | 
| 269 | struct ipv6_pinfo *np = inet6_sk(sk); | 272 | struct ipv6_pinfo *np = inet6_sk(sk); | 
| 270 | 273 | ||
| @@ -291,7 +294,7 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor | |||
| 291 | } | 294 | } | 
| 292 | if (result) | 295 | if (result) | 
| 293 | sock_hold(result); | 296 | sock_hold(result); | 
| 294 | read_unlock(&tcp_lhash_lock); | 297 | read_unlock(&tcp_hashinfo.lhash_lock); | 
| 295 | return result; | 298 | return result; | 
| 296 | } | 299 | } | 
| 297 | 300 | ||
| @@ -315,7 +318,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u | |||
| 315 | * have wildcards anyways. | 318 | * have wildcards anyways. | 
| 316 | */ | 319 | */ | 
| 317 | hash = tcp_v6_hashfn(daddr, hnum, saddr, sport); | 320 | hash = tcp_v6_hashfn(daddr, hnum, saddr, sport); | 
| 318 | head = &tcp_ehash[hash]; | 321 | head = &tcp_hashinfo.ehash[hash]; | 
| 319 | read_lock(&head->lock); | 322 | read_lock(&head->lock); | 
| 320 | sk_for_each(sk, node, &head->chain) { | 323 | sk_for_each(sk, node, &head->chain) { | 
| 321 | /* For IPV6 do the cheaper port and family tests first. */ | 324 | /* For IPV6 do the cheaper port and family tests first. */ | 
| @@ -323,7 +326,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u | |||
| 323 | goto hit; /* You sunk my battleship! */ | 326 | goto hit; /* You sunk my battleship! */ | 
| 324 | } | 327 | } | 
| 325 | /* Must check for a TIME_WAIT'er before going to listener hash. */ | 328 | /* Must check for a TIME_WAIT'er before going to listener hash. */ | 
| 326 | sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) { | 329 | sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) { | 
| 327 | /* FIXME: acme: check this... */ | 330 | /* FIXME: acme: check this... */ | 
| 328 | struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; | 331 | struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; | 
| 329 | 332 | ||
| @@ -461,7 +464,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport, | |||
| 461 | int dif = sk->sk_bound_dev_if; | 464 | int dif = sk->sk_bound_dev_if; | 
| 462 | u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); | 465 | u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); | 
| 463 | int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); | 466 | int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); | 
| 464 | struct inet_ehash_bucket *head = &tcp_ehash[hash]; | 467 | struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; | 
| 465 | struct sock *sk2; | 468 | struct sock *sk2; | 
| 466 | struct hlist_node *node; | 469 | struct hlist_node *node; | 
| 467 | struct tcp_tw_bucket *tw; | 470 | struct tcp_tw_bucket *tw; | 
| @@ -469,7 +472,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport, | |||
| 469 | write_lock(&head->lock); | 472 | write_lock(&head->lock); | 
| 470 | 473 | ||
| 471 | /* Check TIME-WAIT sockets first. */ | 474 | /* Check TIME-WAIT sockets first. */ | 
| 472 | sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { | 475 | sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { | 
| 473 | tw = (struct tcp_tw_bucket*)sk2; | 476 | tw = (struct tcp_tw_bucket*)sk2; | 
| 474 | 477 | ||
| 475 | if(*((__u32 *)&(tw->tw_dport)) == ports && | 478 | if(*((__u32 *)&(tw->tw_dport)) == ports && | 
| @@ -558,7 +561,7 @@ static int tcp_v6_hash_connect(struct sock *sk) | |||
| 558 | local_bh_disable(); | 561 | local_bh_disable(); | 
| 559 | for (i = 1; i <= range; i++) { | 562 | for (i = 1; i <= range; i++) { | 
| 560 | port = low + (i + offset) % range; | 563 | port = low + (i + offset) % range; | 
| 561 | head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; | 564 | head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)]; | 
| 562 | spin_lock(&head->lock); | 565 | spin_lock(&head->lock); | 
| 563 | 566 | ||
| 564 | /* Does not bother with rcv_saddr checks, | 567 | /* Does not bother with rcv_saddr checks, | 
| @@ -578,7 +581,7 @@ static int tcp_v6_hash_connect(struct sock *sk) | |||
| 578 | } | 581 | } | 
| 579 | } | 582 | } | 
| 580 | 583 | ||
| 581 | tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); | 584 | tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port); | 
| 582 | if (!tb) { | 585 | if (!tb) { | 
| 583 | spin_unlock(&head->lock); | 586 | spin_unlock(&head->lock); | 
| 584 | break; | 587 | break; | 
| @@ -613,7 +616,7 @@ ok: | |||
| 613 | goto out; | 616 | goto out; | 
| 614 | } | 617 | } | 
| 615 | 618 | ||
| 616 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; | 619 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | 
| 617 | tb = inet_sk(sk)->bind_hash; | 620 | tb = inet_sk(sk)->bind_hash; | 
| 618 | spin_lock_bh(&head->lock); | 621 | spin_lock_bh(&head->lock); | 
| 619 | 622 | ||
