diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-08-09 23:07:35 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:41:44 -0400 |
commit | 6e04e02165a7209a71db553b7bc48d68421e5ebf (patch) | |
tree | 004157924013e6c099cacac59f39d3dd61f3e0e5 /net/ipv6/tcp_ipv6.c | |
parent | 2d8c4ce51903636ce0f60addc8134aa50ab8fa76 (diff) |
[INET]: Move tcp_port_rover to inet_hashinfo
Also expose all of the tcp_hashinfo members, i.e. killing those
tcp_ehash, etc macros, this will more clearly expose already generic
functions and some that need just a bit of work to become generic, as
we'll see in the upcoming changesets.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 51 |
1 files changed, 27 insertions, 24 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index bfbedb56bce2..362ef5a64062 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -84,7 +84,7 @@ static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport, | |||
84 | hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); | 84 | hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); |
85 | hashent ^= hashent>>16; | 85 | hashent ^= hashent>>16; |
86 | hashent ^= hashent>>8; | 86 | hashent ^= hashent>>8; |
87 | return (hashent & (tcp_ehash_size - 1)); | 87 | return (hashent & (tcp_hashinfo.ehash_size - 1)); |
88 | } | 88 | } |
89 | 89 | ||
90 | static __inline__ int tcp_v6_sk_hashfn(struct sock *sk) | 90 | static __inline__ int tcp_v6_sk_hashfn(struct sock *sk) |
@@ -138,15 +138,15 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
138 | int remaining = (high - low) + 1; | 138 | int remaining = (high - low) + 1; |
139 | int rover; | 139 | int rover; |
140 | 140 | ||
141 | spin_lock(&tcp_portalloc_lock); | 141 | spin_lock(&tcp_hashinfo.portalloc_lock); |
142 | if (tcp_port_rover < low) | 142 | if (tcp_hashinfo.port_rover < low) |
143 | rover = low; | 143 | rover = low; |
144 | else | 144 | else |
145 | rover = tcp_port_rover; | 145 | rover = tcp_hashinfo.port_rover; |
146 | do { rover++; | 146 | do { rover++; |
147 | if (rover > high) | 147 | if (rover > high) |
148 | rover = low; | 148 | rover = low; |
149 | head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; | 149 | head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)]; |
150 | spin_lock(&head->lock); | 150 | spin_lock(&head->lock); |
151 | inet_bind_bucket_for_each(tb, node, &head->chain) | 151 | inet_bind_bucket_for_each(tb, node, &head->chain) |
152 | if (tb->port == rover) | 152 | if (tb->port == rover) |
@@ -155,8 +155,8 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
155 | next: | 155 | next: |
156 | spin_unlock(&head->lock); | 156 | spin_unlock(&head->lock); |
157 | } while (--remaining > 0); | 157 | } while (--remaining > 0); |
158 | tcp_port_rover = rover; | 158 | tcp_hashinfo.port_rover = rover; |
159 | spin_unlock(&tcp_portalloc_lock); | 159 | spin_unlock(&tcp_hashinfo.portalloc_lock); |
160 | 160 | ||
161 | /* Exhausted local port range during search? It is not | 161 | /* Exhausted local port range during search? It is not |
162 | * possible for us to be holding one of the bind hash | 162 | * possible for us to be holding one of the bind hash |
@@ -171,7 +171,7 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
171 | /* OK, here is the one we will use. */ | 171 | /* OK, here is the one we will use. */ |
172 | snum = rover; | 172 | snum = rover; |
173 | } else { | 173 | } else { |
174 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; | 174 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; |
175 | spin_lock(&head->lock); | 175 | spin_lock(&head->lock); |
176 | inet_bind_bucket_for_each(tb, node, &head->chain) | 176 | inet_bind_bucket_for_each(tb, node, &head->chain) |
177 | if (tb->port == snum) | 177 | if (tb->port == snum) |
@@ -192,8 +192,11 @@ tb_found: | |||
192 | } | 192 | } |
193 | tb_not_found: | 193 | tb_not_found: |
194 | ret = 1; | 194 | ret = 1; |
195 | if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) | 195 | if (tb == NULL) { |
196 | goto fail_unlock; | 196 | tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum); |
197 | if (tb == NULL) | ||
198 | goto fail_unlock; | ||
199 | } | ||
197 | if (hlist_empty(&tb->owners)) { | 200 | if (hlist_empty(&tb->owners)) { |
198 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 201 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) |
199 | tb->fastreuse = 1; | 202 | tb->fastreuse = 1; |
@@ -224,13 +227,13 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) | |||
224 | BUG_TRAP(sk_unhashed(sk)); | 227 | BUG_TRAP(sk_unhashed(sk)); |
225 | 228 | ||
226 | if (sk->sk_state == TCP_LISTEN) { | 229 | if (sk->sk_state == TCP_LISTEN) { |
227 | list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; | 230 | list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)]; |
228 | lock = &tcp_lhash_lock; | 231 | lock = &tcp_hashinfo.lhash_lock; |
229 | tcp_listen_wlock(); | 232 | tcp_listen_wlock(); |
230 | } else { | 233 | } else { |
231 | sk->sk_hashent = tcp_v6_sk_hashfn(sk); | 234 | sk->sk_hashent = tcp_v6_sk_hashfn(sk); |
232 | list = &tcp_ehash[sk->sk_hashent].chain; | 235 | list = &tcp_hashinfo.ehash[sk->sk_hashent].chain; |
233 | lock = &tcp_ehash[sk->sk_hashent].lock; | 236 | lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock; |
234 | write_lock(lock); | 237 | write_lock(lock); |
235 | } | 238 | } |
236 | 239 | ||
@@ -263,8 +266,8 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor | |||
263 | int score, hiscore; | 266 | int score, hiscore; |
264 | 267 | ||
265 | hiscore=0; | 268 | hiscore=0; |
266 | read_lock(&tcp_lhash_lock); | 269 | read_lock(&tcp_hashinfo.lhash_lock); |
267 | sk_for_each(sk, node, &tcp_listening_hash[inet_lhashfn(hnum)]) { | 270 | sk_for_each(sk, node, &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)]) { |
268 | if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { | 271 | if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { |
269 | struct ipv6_pinfo *np = inet6_sk(sk); | 272 | struct ipv6_pinfo *np = inet6_sk(sk); |
270 | 273 | ||
@@ -291,7 +294,7 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor | |||
291 | } | 294 | } |
292 | if (result) | 295 | if (result) |
293 | sock_hold(result); | 296 | sock_hold(result); |
294 | read_unlock(&tcp_lhash_lock); | 297 | read_unlock(&tcp_hashinfo.lhash_lock); |
295 | return result; | 298 | return result; |
296 | } | 299 | } |
297 | 300 | ||
@@ -315,7 +318,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u | |||
315 | * have wildcards anyways. | 318 | * have wildcards anyways. |
316 | */ | 319 | */ |
317 | hash = tcp_v6_hashfn(daddr, hnum, saddr, sport); | 320 | hash = tcp_v6_hashfn(daddr, hnum, saddr, sport); |
318 | head = &tcp_ehash[hash]; | 321 | head = &tcp_hashinfo.ehash[hash]; |
319 | read_lock(&head->lock); | 322 | read_lock(&head->lock); |
320 | sk_for_each(sk, node, &head->chain) { | 323 | sk_for_each(sk, node, &head->chain) { |
321 | /* For IPV6 do the cheaper port and family tests first. */ | 324 | /* For IPV6 do the cheaper port and family tests first. */ |
@@ -323,7 +326,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u | |||
323 | goto hit; /* You sunk my battleship! */ | 326 | goto hit; /* You sunk my battleship! */ |
324 | } | 327 | } |
325 | /* Must check for a TIME_WAIT'er before going to listener hash. */ | 328 | /* Must check for a TIME_WAIT'er before going to listener hash. */ |
326 | sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) { | 329 | sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) { |
327 | /* FIXME: acme: check this... */ | 330 | /* FIXME: acme: check this... */ |
328 | struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; | 331 | struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; |
329 | 332 | ||
@@ -461,7 +464,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport, | |||
461 | int dif = sk->sk_bound_dev_if; | 464 | int dif = sk->sk_bound_dev_if; |
462 | u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); | 465 | u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); |
463 | int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); | 466 | int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); |
464 | struct inet_ehash_bucket *head = &tcp_ehash[hash]; | 467 | struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; |
465 | struct sock *sk2; | 468 | struct sock *sk2; |
466 | struct hlist_node *node; | 469 | struct hlist_node *node; |
467 | struct tcp_tw_bucket *tw; | 470 | struct tcp_tw_bucket *tw; |
@@ -469,7 +472,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport, | |||
469 | write_lock(&head->lock); | 472 | write_lock(&head->lock); |
470 | 473 | ||
471 | /* Check TIME-WAIT sockets first. */ | 474 | /* Check TIME-WAIT sockets first. */ |
472 | sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { | 475 | sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { |
473 | tw = (struct tcp_tw_bucket*)sk2; | 476 | tw = (struct tcp_tw_bucket*)sk2; |
474 | 477 | ||
475 | if(*((__u32 *)&(tw->tw_dport)) == ports && | 478 | if(*((__u32 *)&(tw->tw_dport)) == ports && |
@@ -558,7 +561,7 @@ static int tcp_v6_hash_connect(struct sock *sk) | |||
558 | local_bh_disable(); | 561 | local_bh_disable(); |
559 | for (i = 1; i <= range; i++) { | 562 | for (i = 1; i <= range; i++) { |
560 | port = low + (i + offset) % range; | 563 | port = low + (i + offset) % range; |
561 | head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; | 564 | head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)]; |
562 | spin_lock(&head->lock); | 565 | spin_lock(&head->lock); |
563 | 566 | ||
564 | /* Does not bother with rcv_saddr checks, | 567 | /* Does not bother with rcv_saddr checks, |
@@ -578,7 +581,7 @@ static int tcp_v6_hash_connect(struct sock *sk) | |||
578 | } | 581 | } |
579 | } | 582 | } |
580 | 583 | ||
581 | tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); | 584 | tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port); |
582 | if (!tb) { | 585 | if (!tb) { |
583 | spin_unlock(&head->lock); | 586 | spin_unlock(&head->lock); |
584 | break; | 587 | break; |
@@ -613,7 +616,7 @@ ok: | |||
613 | goto out; | 616 | goto out; |
614 | } | 617 | } |
615 | 618 | ||
616 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; | 619 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; |
617 | tb = inet_sk(sk)->bind_hash; | 620 | tb = inet_sk(sk)->bind_hash; |
618 | spin_lock_bh(&head->lock); | 621 | spin_lock_bh(&head->lock); |
619 | 622 | ||