diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-08-09 22:59:44 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:38:32 -0400 |
commit | 0f7ff9274e72fd254fbd1ab117bbc1db6e7cdb34 (patch) | |
tree | 95736729a2f5302666604c4287a2af97ececd734 /net | |
parent | 304a16180fb6d2b153b45f6fbbcec1fa814496e5 (diff) |
[INET]: Just rename the TCP hashtable functions/structs to inet_
This is to break down the complexity of the series of patches,
making it very clear that this one just does:
1. renames tcp_ prefixed hashtable functions and data structures that
were already mostly generic to inet_ to share it with DCCP and
other INET transport protocols.
2. Removes not used functions (__tb_head & tb_head)
3. Removes some leftover prototypes in the headers (tcp_bucket_unlock &
tcp_v4_build_header)
Next changesets will move tcp_sk(sk)->bind_hash to inet_sock so that we can
make functions such as tcp_inherit_port, __tcp_inherit_port, tcp_v4_get_port,
__tcp_put_port, generic and get others like tcp_destroy_sock closer to generic
(tcp_orphan_count will go to sk->sk_prot to allow this).
Eventually most of these functions will be used passing the transport protocol
inet_hashinfo structure.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp.c | 15 | ||||
-rw-r--r-- | net/ipv4/tcp_diag.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 106 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 16 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 42 |
5 files changed, 96 insertions, 87 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 20159a3dafb3..1ec03db7dcd9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -272,6 +272,9 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; | |||
272 | DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); | 272 | DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); |
273 | 273 | ||
274 | kmem_cache_t *tcp_bucket_cachep; | 274 | kmem_cache_t *tcp_bucket_cachep; |
275 | |||
276 | EXPORT_SYMBOL_GPL(tcp_bucket_cachep); | ||
277 | |||
275 | kmem_cache_t *tcp_timewait_cachep; | 278 | kmem_cache_t *tcp_timewait_cachep; |
276 | 279 | ||
277 | atomic_t tcp_orphan_count = ATOMIC_INIT(0); | 280 | atomic_t tcp_orphan_count = ATOMIC_INIT(0); |
@@ -2259,7 +2262,7 @@ void __init tcp_init(void) | |||
2259 | sizeof(skb->cb)); | 2262 | sizeof(skb->cb)); |
2260 | 2263 | ||
2261 | tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", | 2264 | tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket", |
2262 | sizeof(struct tcp_bind_bucket), | 2265 | sizeof(struct inet_bind_bucket), |
2263 | 0, SLAB_HWCACHE_ALIGN, | 2266 | 0, SLAB_HWCACHE_ALIGN, |
2264 | NULL, NULL); | 2267 | NULL, NULL); |
2265 | if (!tcp_bucket_cachep) | 2268 | if (!tcp_bucket_cachep) |
@@ -2277,9 +2280,9 @@ void __init tcp_init(void) | |||
2277 | * | 2280 | * |
2278 | * The methodology is similar to that of the buffer cache. | 2281 | * The methodology is similar to that of the buffer cache. |
2279 | */ | 2282 | */ |
2280 | tcp_ehash = (struct tcp_ehash_bucket *) | 2283 | tcp_ehash = |
2281 | alloc_large_system_hash("TCP established", | 2284 | alloc_large_system_hash("TCP established", |
2282 | sizeof(struct tcp_ehash_bucket), | 2285 | sizeof(struct inet_ehash_bucket), |
2283 | thash_entries, | 2286 | thash_entries, |
2284 | (num_physpages >= 128 * 1024) ? | 2287 | (num_physpages >= 128 * 1024) ? |
2285 | (25 - PAGE_SHIFT) : | 2288 | (25 - PAGE_SHIFT) : |
@@ -2294,9 +2297,9 @@ void __init tcp_init(void) | |||
2294 | INIT_HLIST_HEAD(&tcp_ehash[i].chain); | 2297 | INIT_HLIST_HEAD(&tcp_ehash[i].chain); |
2295 | } | 2298 | } |
2296 | 2299 | ||
2297 | tcp_bhash = (struct tcp_bind_hashbucket *) | 2300 | tcp_bhash = |
2298 | alloc_large_system_hash("TCP bind", | 2301 | alloc_large_system_hash("TCP bind", |
2299 | sizeof(struct tcp_bind_hashbucket), | 2302 | sizeof(struct inet_bind_hashbucket), |
2300 | tcp_ehash_size, | 2303 | tcp_ehash_size, |
2301 | (num_physpages >= 128 * 1024) ? | 2304 | (num_physpages >= 128 * 1024) ? |
2302 | (25 - PAGE_SHIFT) : | 2305 | (25 - PAGE_SHIFT) : |
@@ -2315,7 +2318,7 @@ void __init tcp_init(void) | |||
2315 | * on available memory. | 2318 | * on available memory. |
2316 | */ | 2319 | */ |
2317 | for (order = 0; ((1 << order) << PAGE_SHIFT) < | 2320 | for (order = 0; ((1 << order) << PAGE_SHIFT) < |
2318 | (tcp_bhash_size * sizeof(struct tcp_bind_hashbucket)); | 2321 | (tcp_bhash_size * sizeof(struct inet_bind_hashbucket)); |
2319 | order++) | 2322 | order++) |
2320 | ; | 2323 | ; |
2321 | if (order >= 4) { | 2324 | if (order >= 4) { |
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index f79bd11a4701..5bb6a0f1c77b 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c | |||
@@ -590,7 +590,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
590 | if (!(r->tcpdiag_states&(TCPF_LISTEN|TCPF_SYN_RECV))) | 590 | if (!(r->tcpdiag_states&(TCPF_LISTEN|TCPF_SYN_RECV))) |
591 | goto skip_listen_ht; | 591 | goto skip_listen_ht; |
592 | tcp_listen_lock(); | 592 | tcp_listen_lock(); |
593 | for (i = s_i; i < TCP_LHTABLE_SIZE; i++) { | 593 | for (i = s_i; i < INET_LHTABLE_SIZE; i++) { |
594 | struct sock *sk; | 594 | struct sock *sk; |
595 | struct hlist_node *node; | 595 | struct hlist_node *node; |
596 | 596 | ||
@@ -646,7 +646,7 @@ skip_listen_ht: | |||
646 | return skb->len; | 646 | return skb->len; |
647 | 647 | ||
648 | for (i = s_i; i < tcp_ehash_size; i++) { | 648 | for (i = s_i; i < tcp_ehash_size; i++) { |
649 | struct tcp_ehash_bucket *head = &tcp_ehash[i]; | 649 | struct inet_ehash_bucket *head = &tcp_ehash[i]; |
650 | struct sock *sk; | 650 | struct sock *sk; |
651 | struct hlist_node *node; | 651 | struct hlist_node *node; |
652 | 652 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c03d7e9688c8..4138630556e3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -89,12 +89,11 @@ static struct socket *tcp_socket; | |||
89 | void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, | 89 | void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, |
90 | struct sk_buff *skb); | 90 | struct sk_buff *skb); |
91 | 91 | ||
92 | struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = { | 92 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { |
93 | .__tcp_lhash_lock = RW_LOCK_UNLOCKED, | 93 | .lhash_lock = RW_LOCK_UNLOCKED, |
94 | .__tcp_lhash_users = ATOMIC_INIT(0), | 94 | .lhash_users = ATOMIC_INIT(0), |
95 | .__tcp_lhash_wait | 95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), |
96 | = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait), | 96 | .portalloc_lock = SPIN_LOCK_UNLOCKED, |
97 | .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED | ||
98 | }; | 97 | }; |
99 | 98 | ||
100 | /* | 99 | /* |
@@ -105,14 +104,14 @@ struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = { | |||
105 | int sysctl_local_port_range[2] = { 1024, 4999 }; | 104 | int sysctl_local_port_range[2] = { 1024, 4999 }; |
106 | int tcp_port_rover = 1024 - 1; | 105 | int tcp_port_rover = 1024 - 1; |
107 | 106 | ||
108 | /* Allocate and initialize a new TCP local port bind bucket. | 107 | /* Allocate and initialize a new local port bind bucket. |
109 | * The bindhash mutex for snum's hash chain must be held here. | 108 | * The bindhash mutex for snum's hash chain must be held here. |
110 | */ | 109 | */ |
111 | struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head, | 110 | struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, |
112 | unsigned short snum) | 111 | struct inet_bind_hashbucket *head, |
112 | const unsigned short snum) | ||
113 | { | 113 | { |
114 | struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep, | 114 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC); |
115 | SLAB_ATOMIC); | ||
116 | if (tb) { | 115 | if (tb) { |
117 | tb->port = snum; | 116 | tb->port = snum; |
118 | tb->fastreuse = 0; | 117 | tb->fastreuse = 0; |
@@ -123,20 +122,21 @@ struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head, | |||
123 | } | 122 | } |
124 | 123 | ||
125 | /* Caller must hold hashbucket lock for this tb with local BH disabled */ | 124 | /* Caller must hold hashbucket lock for this tb with local BH disabled */ |
126 | void tcp_bucket_destroy(struct tcp_bind_bucket *tb) | 125 | void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) |
127 | { | 126 | { |
128 | if (hlist_empty(&tb->owners)) { | 127 | if (hlist_empty(&tb->owners)) { |
129 | __hlist_del(&tb->node); | 128 | __hlist_del(&tb->node); |
130 | kmem_cache_free(tcp_bucket_cachep, tb); | 129 | kmem_cache_free(cachep, tb); |
131 | } | 130 | } |
132 | } | 131 | } |
133 | 132 | ||
134 | /* Caller must disable local BH processing. */ | 133 | /* Caller must disable local BH processing. */ |
135 | static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child) | 134 | static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child) |
136 | { | 135 | { |
137 | struct tcp_bind_hashbucket *head = | 136 | struct inet_bind_hashbucket *head = |
138 | &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)]; | 137 | &tcp_bhash[inet_bhashfn(inet_sk(child)->num, |
139 | struct tcp_bind_bucket *tb; | 138 | tcp_bhash_size)]; |
139 | struct inet_bind_bucket *tb; | ||
140 | 140 | ||
141 | spin_lock(&head->lock); | 141 | spin_lock(&head->lock); |
142 | tb = tcp_sk(sk)->bind_hash; | 142 | tb = tcp_sk(sk)->bind_hash; |
@@ -152,15 +152,15 @@ inline void tcp_inherit_port(struct sock *sk, struct sock *child) | |||
152 | local_bh_enable(); | 152 | local_bh_enable(); |
153 | } | 153 | } |
154 | 154 | ||
155 | void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb, | 155 | void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, |
156 | unsigned short snum) | 156 | const unsigned short snum) |
157 | { | 157 | { |
158 | inet_sk(sk)->num = snum; | 158 | inet_sk(sk)->num = snum; |
159 | sk_add_bind_node(sk, &tb->owners); | 159 | sk_add_bind_node(sk, &tb->owners); |
160 | tcp_sk(sk)->bind_hash = tb; | 160 | tcp_sk(sk)->bind_hash = tb; |
161 | } | 161 | } |
162 | 162 | ||
163 | static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb) | 163 | static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) |
164 | { | 164 | { |
165 | const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk); | 165 | const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk); |
166 | struct sock *sk2; | 166 | struct sock *sk2; |
@@ -190,9 +190,9 @@ static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb) | |||
190 | */ | 190 | */ |
191 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | 191 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) |
192 | { | 192 | { |
193 | struct tcp_bind_hashbucket *head; | 193 | struct inet_bind_hashbucket *head; |
194 | struct hlist_node *node; | 194 | struct hlist_node *node; |
195 | struct tcp_bind_bucket *tb; | 195 | struct inet_bind_bucket *tb; |
196 | int ret; | 196 | int ret; |
197 | 197 | ||
198 | local_bh_disable(); | 198 | local_bh_disable(); |
@@ -211,9 +211,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
211 | rover++; | 211 | rover++; |
212 | if (rover > high) | 212 | if (rover > high) |
213 | rover = low; | 213 | rover = low; |
214 | head = &tcp_bhash[tcp_bhashfn(rover)]; | 214 | head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; |
215 | spin_lock(&head->lock); | 215 | spin_lock(&head->lock); |
216 | tb_for_each(tb, node, &head->chain) | 216 | inet_bind_bucket_for_each(tb, node, &head->chain) |
217 | if (tb->port == rover) | 217 | if (tb->port == rover) |
218 | goto next; | 218 | goto next; |
219 | break; | 219 | break; |
@@ -238,9 +238,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
238 | */ | 238 | */ |
239 | snum = rover; | 239 | snum = rover; |
240 | } else { | 240 | } else { |
241 | head = &tcp_bhash[tcp_bhashfn(snum)]; | 241 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; |
242 | spin_lock(&head->lock); | 242 | spin_lock(&head->lock); |
243 | tb_for_each(tb, node, &head->chain) | 243 | inet_bind_bucket_for_each(tb, node, &head->chain) |
244 | if (tb->port == snum) | 244 | if (tb->port == snum) |
245 | goto tb_found; | 245 | goto tb_found; |
246 | } | 246 | } |
@@ -261,7 +261,7 @@ tb_found: | |||
261 | } | 261 | } |
262 | tb_not_found: | 262 | tb_not_found: |
263 | ret = 1; | 263 | ret = 1; |
264 | if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL) | 264 | if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) |
265 | goto fail_unlock; | 265 | goto fail_unlock; |
266 | if (hlist_empty(&tb->owners)) { | 266 | if (hlist_empty(&tb->owners)) { |
267 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 267 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) |
@@ -290,15 +290,16 @@ fail: | |||
290 | static void __tcp_put_port(struct sock *sk) | 290 | static void __tcp_put_port(struct sock *sk) |
291 | { | 291 | { |
292 | struct inet_sock *inet = inet_sk(sk); | 292 | struct inet_sock *inet = inet_sk(sk); |
293 | struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)]; | 293 | struct inet_bind_hashbucket *head = &tcp_bhash[inet_bhashfn(inet->num, |
294 | struct tcp_bind_bucket *tb; | 294 | tcp_bhash_size)]; |
295 | struct inet_bind_bucket *tb; | ||
295 | 296 | ||
296 | spin_lock(&head->lock); | 297 | spin_lock(&head->lock); |
297 | tb = tcp_sk(sk)->bind_hash; | 298 | tb = tcp_sk(sk)->bind_hash; |
298 | __sk_del_bind_node(sk); | 299 | __sk_del_bind_node(sk); |
299 | tcp_sk(sk)->bind_hash = NULL; | 300 | tcp_sk(sk)->bind_hash = NULL; |
300 | inet->num = 0; | 301 | inet->num = 0; |
301 | tcp_bucket_destroy(tb); | 302 | inet_bind_bucket_destroy(tcp_bucket_cachep, tb); |
302 | spin_unlock(&head->lock); | 303 | spin_unlock(&head->lock); |
303 | } | 304 | } |
304 | 305 | ||
@@ -344,7 +345,7 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible) | |||
344 | 345 | ||
345 | BUG_TRAP(sk_unhashed(sk)); | 346 | BUG_TRAP(sk_unhashed(sk)); |
346 | if (listen_possible && sk->sk_state == TCP_LISTEN) { | 347 | if (listen_possible && sk->sk_state == TCP_LISTEN) { |
347 | list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; | 348 | list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; |
348 | lock = &tcp_lhash_lock; | 349 | lock = &tcp_lhash_lock; |
349 | tcp_listen_wlock(); | 350 | tcp_listen_wlock(); |
350 | } else { | 351 | } else { |
@@ -381,7 +382,7 @@ void tcp_unhash(struct sock *sk) | |||
381 | tcp_listen_wlock(); | 382 | tcp_listen_wlock(); |
382 | lock = &tcp_lhash_lock; | 383 | lock = &tcp_lhash_lock; |
383 | } else { | 384 | } else { |
384 | struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent]; | 385 | struct inet_ehash_bucket *head = &tcp_ehash[sk->sk_hashent]; |
385 | lock = &head->lock; | 386 | lock = &head->lock; |
386 | write_lock_bh(&head->lock); | 387 | write_lock_bh(&head->lock); |
387 | } | 388 | } |
@@ -401,8 +402,10 @@ void tcp_unhash(struct sock *sk) | |||
401 | * connection. So always assume those are both wildcarded | 402 | * connection. So always assume those are both wildcarded |
402 | * during the search since they can never be otherwise. | 403 | * during the search since they can never be otherwise. |
403 | */ | 404 | */ |
404 | static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr, | 405 | static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, |
405 | unsigned short hnum, int dif) | 406 | const u32 daddr, |
407 | const unsigned short hnum, | ||
408 | const int dif) | ||
406 | { | 409 | { |
407 | struct sock *result = NULL, *sk; | 410 | struct sock *result = NULL, *sk; |
408 | struct hlist_node *node; | 411 | struct hlist_node *node; |
@@ -438,14 +441,15 @@ static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr, | |||
438 | } | 441 | } |
439 | 442 | ||
440 | /* Optimize the common listener case. */ | 443 | /* Optimize the common listener case. */ |
441 | static inline struct sock *tcp_v4_lookup_listener(u32 daddr, | 444 | static inline struct sock *tcp_v4_lookup_listener(const u32 daddr, |
442 | unsigned short hnum, int dif) | 445 | const unsigned short hnum, |
446 | const int dif) | ||
443 | { | 447 | { |
444 | struct sock *sk = NULL; | 448 | struct sock *sk = NULL; |
445 | struct hlist_head *head; | 449 | struct hlist_head *head; |
446 | 450 | ||
447 | read_lock(&tcp_lhash_lock); | 451 | read_lock(&tcp_lhash_lock); |
448 | head = &tcp_listening_hash[tcp_lhashfn(hnum)]; | 452 | head = &tcp_listening_hash[inet_lhashfn(hnum)]; |
449 | if (!hlist_empty(head)) { | 453 | if (!hlist_empty(head)) { |
450 | struct inet_sock *inet = inet_sk((sk = __sk_head(head))); | 454 | struct inet_sock *inet = inet_sk((sk = __sk_head(head))); |
451 | 455 | ||
@@ -470,11 +474,13 @@ sherry_cache: | |||
470 | * Local BH must be disabled here. | 474 | * Local BH must be disabled here. |
471 | */ | 475 | */ |
472 | 476 | ||
473 | static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport, | 477 | static inline struct sock *__tcp_v4_lookup_established(const u32 saddr, |
474 | u32 daddr, u16 hnum, | 478 | const u16 sport, |
475 | int dif) | 479 | const u32 daddr, |
480 | const u16 hnum, | ||
481 | const int dif) | ||
476 | { | 482 | { |
477 | struct tcp_ehash_bucket *head; | 483 | struct inet_ehash_bucket *head; |
478 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) | 484 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) |
479 | __u32 ports = TCP_COMBINED_PORTS(sport, hnum); | 485 | __u32 ports = TCP_COMBINED_PORTS(sport, hnum); |
480 | struct sock *sk; | 486 | struct sock *sk; |
@@ -546,7 +552,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, | |||
546 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) | 552 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) |
547 | __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); | 553 | __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); |
548 | const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size); | 554 | const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size); |
549 | struct tcp_ehash_bucket *head = &tcp_ehash[hash]; | 555 | struct inet_ehash_bucket *head = &tcp_ehash[hash]; |
550 | struct sock *sk2; | 556 | struct sock *sk2; |
551 | struct hlist_node *node; | 557 | struct hlist_node *node; |
552 | struct tcp_tw_bucket *tw; | 558 | struct tcp_tw_bucket *tw; |
@@ -639,9 +645,9 @@ static inline u32 connect_port_offset(const struct sock *sk) | |||
639 | */ | 645 | */ |
640 | static inline int tcp_v4_hash_connect(struct sock *sk) | 646 | static inline int tcp_v4_hash_connect(struct sock *sk) |
641 | { | 647 | { |
642 | unsigned short snum = inet_sk(sk)->num; | 648 | const unsigned short snum = inet_sk(sk)->num; |
643 | struct tcp_bind_hashbucket *head; | 649 | struct inet_bind_hashbucket *head; |
644 | struct tcp_bind_bucket *tb; | 650 | struct inet_bind_bucket *tb; |
645 | int ret; | 651 | int ret; |
646 | 652 | ||
647 | if (!snum) { | 653 | if (!snum) { |
@@ -658,14 +664,14 @@ static inline int tcp_v4_hash_connect(struct sock *sk) | |||
658 | local_bh_disable(); | 664 | local_bh_disable(); |
659 | for (i = 1; i <= range; i++) { | 665 | for (i = 1; i <= range; i++) { |
660 | port = low + (i + offset) % range; | 666 | port = low + (i + offset) % range; |
661 | head = &tcp_bhash[tcp_bhashfn(port)]; | 667 | head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; |
662 | spin_lock(&head->lock); | 668 | spin_lock(&head->lock); |
663 | 669 | ||
664 | /* Does not bother with rcv_saddr checks, | 670 | /* Does not bother with rcv_saddr checks, |
665 | * because the established check is already | 671 | * because the established check is already |
666 | * unique enough. | 672 | * unique enough. |
667 | */ | 673 | */ |
668 | tb_for_each(tb, node, &head->chain) { | 674 | inet_bind_bucket_for_each(tb, node, &head->chain) { |
669 | if (tb->port == port) { | 675 | if (tb->port == port) { |
670 | BUG_TRAP(!hlist_empty(&tb->owners)); | 676 | BUG_TRAP(!hlist_empty(&tb->owners)); |
671 | if (tb->fastreuse >= 0) | 677 | if (tb->fastreuse >= 0) |
@@ -678,7 +684,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk) | |||
678 | } | 684 | } |
679 | } | 685 | } |
680 | 686 | ||
681 | tb = tcp_bucket_create(head, port); | 687 | tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); |
682 | if (!tb) { | 688 | if (!tb) { |
683 | spin_unlock(&head->lock); | 689 | spin_unlock(&head->lock); |
684 | break; | 690 | break; |
@@ -713,7 +719,7 @@ ok: | |||
713 | goto out; | 719 | goto out; |
714 | } | 720 | } |
715 | 721 | ||
716 | head = &tcp_bhash[tcp_bhashfn(snum)]; | 722 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; |
717 | tb = tcp_sk(sk)->bind_hash; | 723 | tb = tcp_sk(sk)->bind_hash; |
718 | spin_lock_bh(&head->lock); | 724 | spin_lock_bh(&head->lock); |
719 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 725 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
@@ -2055,7 +2061,7 @@ start_req: | |||
2055 | } | 2061 | } |
2056 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 2062 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); |
2057 | } | 2063 | } |
2058 | if (++st->bucket < TCP_LHTABLE_SIZE) { | 2064 | if (++st->bucket < INET_LHTABLE_SIZE) { |
2059 | sk = sk_head(&tcp_listening_hash[st->bucket]); | 2065 | sk = sk_head(&tcp_listening_hash[st->bucket]); |
2060 | goto get_sk; | 2066 | goto get_sk; |
2061 | } | 2067 | } |
@@ -2506,7 +2512,7 @@ void __init tcp_v4_init(struct net_proto_family *ops) | |||
2506 | 2512 | ||
2507 | EXPORT_SYMBOL(ipv4_specific); | 2513 | EXPORT_SYMBOL(ipv4_specific); |
2508 | EXPORT_SYMBOL(tcp_bind_hash); | 2514 | EXPORT_SYMBOL(tcp_bind_hash); |
2509 | EXPORT_SYMBOL(tcp_bucket_create); | 2515 | EXPORT_SYMBOL(inet_bind_bucket_create); |
2510 | EXPORT_SYMBOL(tcp_hashinfo); | 2516 | EXPORT_SYMBOL(tcp_hashinfo); |
2511 | EXPORT_SYMBOL(tcp_inherit_port); | 2517 | EXPORT_SYMBOL(tcp_inherit_port); |
2512 | EXPORT_SYMBOL(tcp_listen_wlock); | 2518 | EXPORT_SYMBOL(tcp_listen_wlock); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 7c46a553c4af..1df6cd46066b 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -60,9 +60,9 @@ int tcp_tw_count; | |||
60 | /* Must be called with locally disabled BHs. */ | 60 | /* Must be called with locally disabled BHs. */ |
61 | static void tcp_timewait_kill(struct tcp_tw_bucket *tw) | 61 | static void tcp_timewait_kill(struct tcp_tw_bucket *tw) |
62 | { | 62 | { |
63 | struct tcp_ehash_bucket *ehead; | 63 | struct inet_ehash_bucket *ehead; |
64 | struct tcp_bind_hashbucket *bhead; | 64 | struct inet_bind_hashbucket *bhead; |
65 | struct tcp_bind_bucket *tb; | 65 | struct inet_bind_bucket *tb; |
66 | 66 | ||
67 | /* Unlink from established hashes. */ | 67 | /* Unlink from established hashes. */ |
68 | ehead = &tcp_ehash[tw->tw_hashent]; | 68 | ehead = &tcp_ehash[tw->tw_hashent]; |
@@ -76,12 +76,12 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw) | |||
76 | write_unlock(&ehead->lock); | 76 | write_unlock(&ehead->lock); |
77 | 77 | ||
78 | /* Disassociate with bind bucket. */ | 78 | /* Disassociate with bind bucket. */ |
79 | bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)]; | 79 | bhead = &tcp_bhash[inet_bhashfn(tw->tw_num, tcp_bhash_size)]; |
80 | spin_lock(&bhead->lock); | 80 | spin_lock(&bhead->lock); |
81 | tb = tw->tw_tb; | 81 | tb = tw->tw_tb; |
82 | __hlist_del(&tw->tw_bind_node); | 82 | __hlist_del(&tw->tw_bind_node); |
83 | tw->tw_tb = NULL; | 83 | tw->tw_tb = NULL; |
84 | tcp_bucket_destroy(tb); | 84 | inet_bind_bucket_destroy(tcp_bucket_cachep, tb); |
85 | spin_unlock(&bhead->lock); | 85 | spin_unlock(&bhead->lock); |
86 | 86 | ||
87 | #ifdef SOCK_REFCNT_DEBUG | 87 | #ifdef SOCK_REFCNT_DEBUG |
@@ -296,14 +296,14 @@ kill: | |||
296 | */ | 296 | */ |
297 | static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) | 297 | static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) |
298 | { | 298 | { |
299 | struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent]; | 299 | struct inet_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent]; |
300 | struct tcp_bind_hashbucket *bhead; | 300 | struct inet_bind_hashbucket *bhead; |
301 | 301 | ||
302 | /* Step 1: Put TW into bind hash. Original socket stays there too. | 302 | /* Step 1: Put TW into bind hash. Original socket stays there too. |
303 | Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in | 303 | Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in |
304 | binding cache, even if it is closed. | 304 | binding cache, even if it is closed. |
305 | */ | 305 | */ |
306 | bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)]; | 306 | bhead = &tcp_bhash[inet_bhashfn(inet_sk(sk)->num, tcp_bhash_size)]; |
307 | spin_lock(&bhead->lock); | 307 | spin_lock(&bhead->lock); |
308 | tw->tw_tb = tcp_sk(sk)->bind_hash; | 308 | tw->tw_tb = tcp_sk(sk)->bind_hash; |
309 | BUG_TRAP(tcp_sk(sk)->bind_hash); | 309 | BUG_TRAP(tcp_sk(sk)->bind_hash); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4e32a8496be3..31f50fb29ffb 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -98,11 +98,11 @@ static __inline__ int tcp_v6_sk_hashfn(struct sock *sk) | |||
98 | return tcp_v6_hashfn(laddr, lport, faddr, fport); | 98 | return tcp_v6_hashfn(laddr, lport, faddr, fport); |
99 | } | 99 | } |
100 | 100 | ||
101 | static inline int tcp_v6_bind_conflict(struct sock *sk, | 101 | static inline int tcp_v6_bind_conflict(const struct sock *sk, |
102 | struct tcp_bind_bucket *tb) | 102 | const struct inet_bind_bucket *tb) |
103 | { | 103 | { |
104 | struct sock *sk2; | 104 | const struct sock *sk2; |
105 | struct hlist_node *node; | 105 | const struct hlist_node *node; |
106 | 106 | ||
107 | /* We must walk the whole port owner list in this case. -DaveM */ | 107 | /* We must walk the whole port owner list in this case. -DaveM */ |
108 | sk_for_each_bound(sk2, node, &tb->owners) { | 108 | sk_for_each_bound(sk2, node, &tb->owners) { |
@@ -126,8 +126,8 @@ static inline int tcp_v6_bind_conflict(struct sock *sk, | |||
126 | */ | 126 | */ |
127 | static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | 127 | static int tcp_v6_get_port(struct sock *sk, unsigned short snum) |
128 | { | 128 | { |
129 | struct tcp_bind_hashbucket *head; | 129 | struct inet_bind_hashbucket *head; |
130 | struct tcp_bind_bucket *tb; | 130 | struct inet_bind_bucket *tb; |
131 | struct hlist_node *node; | 131 | struct hlist_node *node; |
132 | int ret; | 132 | int ret; |
133 | 133 | ||
@@ -146,9 +146,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
146 | do { rover++; | 146 | do { rover++; |
147 | if (rover > high) | 147 | if (rover > high) |
148 | rover = low; | 148 | rover = low; |
149 | head = &tcp_bhash[tcp_bhashfn(rover)]; | 149 | head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; |
150 | spin_lock(&head->lock); | 150 | spin_lock(&head->lock); |
151 | tb_for_each(tb, node, &head->chain) | 151 | inet_bind_bucket_for_each(tb, node, &head->chain) |
152 | if (tb->port == rover) | 152 | if (tb->port == rover) |
153 | goto next; | 153 | goto next; |
154 | break; | 154 | break; |
@@ -171,9 +171,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
171 | /* OK, here is the one we will use. */ | 171 | /* OK, here is the one we will use. */ |
172 | snum = rover; | 172 | snum = rover; |
173 | } else { | 173 | } else { |
174 | head = &tcp_bhash[tcp_bhashfn(snum)]; | 174 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; |
175 | spin_lock(&head->lock); | 175 | spin_lock(&head->lock); |
176 | tb_for_each(tb, node, &head->chain) | 176 | inet_bind_bucket_for_each(tb, node, &head->chain) |
177 | if (tb->port == snum) | 177 | if (tb->port == snum) |
178 | goto tb_found; | 178 | goto tb_found; |
179 | } | 179 | } |
@@ -192,7 +192,7 @@ tb_found: | |||
192 | } | 192 | } |
193 | tb_not_found: | 193 | tb_not_found: |
194 | ret = 1; | 194 | ret = 1; |
195 | if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL) | 195 | if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) |
196 | goto fail_unlock; | 196 | goto fail_unlock; |
197 | if (hlist_empty(&tb->owners)) { | 197 | if (hlist_empty(&tb->owners)) { |
198 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 198 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) |
@@ -224,7 +224,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) | |||
224 | BUG_TRAP(sk_unhashed(sk)); | 224 | BUG_TRAP(sk_unhashed(sk)); |
225 | 225 | ||
226 | if (sk->sk_state == TCP_LISTEN) { | 226 | if (sk->sk_state == TCP_LISTEN) { |
227 | list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; | 227 | list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; |
228 | lock = &tcp_lhash_lock; | 228 | lock = &tcp_lhash_lock; |
229 | tcp_listen_wlock(); | 229 | tcp_listen_wlock(); |
230 | } else { | 230 | } else { |
@@ -264,7 +264,7 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor | |||
264 | 264 | ||
265 | hiscore=0; | 265 | hiscore=0; |
266 | read_lock(&tcp_lhash_lock); | 266 | read_lock(&tcp_lhash_lock); |
267 | sk_for_each(sk, node, &tcp_listening_hash[tcp_lhashfn(hnum)]) { | 267 | sk_for_each(sk, node, &tcp_listening_hash[inet_lhashfn(hnum)]) { |
268 | if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { | 268 | if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { |
269 | struct ipv6_pinfo *np = inet6_sk(sk); | 269 | struct ipv6_pinfo *np = inet6_sk(sk); |
270 | 270 | ||
@@ -305,7 +305,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u | |||
305 | struct in6_addr *daddr, u16 hnum, | 305 | struct in6_addr *daddr, u16 hnum, |
306 | int dif) | 306 | int dif) |
307 | { | 307 | { |
308 | struct tcp_ehash_bucket *head; | 308 | struct inet_ehash_bucket *head; |
309 | struct sock *sk; | 309 | struct sock *sk; |
310 | struct hlist_node *node; | 310 | struct hlist_node *node; |
311 | __u32 ports = TCP_COMBINED_PORTS(sport, hnum); | 311 | __u32 ports = TCP_COMBINED_PORTS(sport, hnum); |
@@ -461,7 +461,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport, | |||
461 | int dif = sk->sk_bound_dev_if; | 461 | int dif = sk->sk_bound_dev_if; |
462 | u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); | 462 | u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); |
463 | int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); | 463 | int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); |
464 | struct tcp_ehash_bucket *head = &tcp_ehash[hash]; | 464 | struct inet_ehash_bucket *head = &tcp_ehash[hash]; |
465 | struct sock *sk2; | 465 | struct sock *sk2; |
466 | struct hlist_node *node; | 466 | struct hlist_node *node; |
467 | struct tcp_tw_bucket *tw; | 467 | struct tcp_tw_bucket *tw; |
@@ -540,8 +540,8 @@ static inline u32 tcpv6_port_offset(const struct sock *sk) | |||
540 | static int tcp_v6_hash_connect(struct sock *sk) | 540 | static int tcp_v6_hash_connect(struct sock *sk) |
541 | { | 541 | { |
542 | unsigned short snum = inet_sk(sk)->num; | 542 | unsigned short snum = inet_sk(sk)->num; |
543 | struct tcp_bind_hashbucket *head; | 543 | struct inet_bind_hashbucket *head; |
544 | struct tcp_bind_bucket *tb; | 544 | struct inet_bind_bucket *tb; |
545 | int ret; | 545 | int ret; |
546 | 546 | ||
547 | if (!snum) { | 547 | if (!snum) { |
@@ -558,14 +558,14 @@ static int tcp_v6_hash_connect(struct sock *sk) | |||
558 | local_bh_disable(); | 558 | local_bh_disable(); |
559 | for (i = 1; i <= range; i++) { | 559 | for (i = 1; i <= range; i++) { |
560 | port = low + (i + offset) % range; | 560 | port = low + (i + offset) % range; |
561 | head = &tcp_bhash[tcp_bhashfn(port)]; | 561 | head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; |
562 | spin_lock(&head->lock); | 562 | spin_lock(&head->lock); |
563 | 563 | ||
564 | /* Does not bother with rcv_saddr checks, | 564 | /* Does not bother with rcv_saddr checks, |
565 | * because the established check is already | 565 | * because the established check is already |
566 | * unique enough. | 566 | * unique enough. |
567 | */ | 567 | */ |
568 | tb_for_each(tb, node, &head->chain) { | 568 | inet_bind_bucket_for_each(tb, node, &head->chain) { |
569 | if (tb->port == port) { | 569 | if (tb->port == port) { |
570 | BUG_TRAP(!hlist_empty(&tb->owners)); | 570 | BUG_TRAP(!hlist_empty(&tb->owners)); |
571 | if (tb->fastreuse >= 0) | 571 | if (tb->fastreuse >= 0) |
@@ -578,7 +578,7 @@ static int tcp_v6_hash_connect(struct sock *sk) | |||
578 | } | 578 | } |
579 | } | 579 | } |
580 | 580 | ||
581 | tb = tcp_bucket_create(head, port); | 581 | tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); |
582 | if (!tb) { | 582 | if (!tb) { |
583 | spin_unlock(&head->lock); | 583 | spin_unlock(&head->lock); |
584 | break; | 584 | break; |
@@ -613,7 +613,7 @@ ok: | |||
613 | goto out; | 613 | goto out; |
614 | } | 614 | } |
615 | 615 | ||
616 | head = &tcp_bhash[tcp_bhashfn(snum)]; | 616 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; |
617 | tb = tcp_sk(sk)->bind_hash; | 617 | tb = tcp_sk(sk)->bind_hash; |
618 | spin_lock_bh(&head->lock); | 618 | spin_lock_bh(&head->lock); |
619 | 619 | ||