diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-08-09 22:59:44 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:38:32 -0400 |
commit | 0f7ff9274e72fd254fbd1ab117bbc1db6e7cdb34 (patch) | |
tree | 95736729a2f5302666604c4287a2af97ececd734 /net/ipv6/tcp_ipv6.c | |
parent | 304a16180fb6d2b153b45f6fbbcec1fa814496e5 (diff) |
[INET]: Just rename the TCP hashtable functions/structs to inet_
This is to break down the complexity of the series of patches,
making it very clear that this one just does:
1. renames tcp_ prefixed hashtable functions and data structures that
were already mostly generic to inet_ to share it with DCCP and
other INET transport protocols.
2. Removes not used functions (__tb_head & tb_head)
3. Removes some leftover prototypes in the headers (tcp_bucket_unlock &
tcp_v4_build_header)
Next changesets will move tcp_sk(sk)->bind_hash to inet_sock so that we can
make functions such as tcp_inherit_port, __tcp_inherit_port, tcp_v4_get_port,
__tcp_put_port, generic and get others like tcp_destroy_sock closer to generic
(tcp_orphan_count will go to sk->sk_prot to allow this).
Eventually most of these functions will be used passing the transport protocol
inet_hashinfo structure.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 42 |
1 files changed, 21 insertions, 21 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4e32a8496be3..31f50fb29ffb 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -98,11 +98,11 @@ static __inline__ int tcp_v6_sk_hashfn(struct sock *sk) | |||
98 | return tcp_v6_hashfn(laddr, lport, faddr, fport); | 98 | return tcp_v6_hashfn(laddr, lport, faddr, fport); |
99 | } | 99 | } |
100 | 100 | ||
101 | static inline int tcp_v6_bind_conflict(struct sock *sk, | 101 | static inline int tcp_v6_bind_conflict(const struct sock *sk, |
102 | struct tcp_bind_bucket *tb) | 102 | const struct inet_bind_bucket *tb) |
103 | { | 103 | { |
104 | struct sock *sk2; | 104 | const struct sock *sk2; |
105 | struct hlist_node *node; | 105 | const struct hlist_node *node; |
106 | 106 | ||
107 | /* We must walk the whole port owner list in this case. -DaveM */ | 107 | /* We must walk the whole port owner list in this case. -DaveM */ |
108 | sk_for_each_bound(sk2, node, &tb->owners) { | 108 | sk_for_each_bound(sk2, node, &tb->owners) { |
@@ -126,8 +126,8 @@ static inline int tcp_v6_bind_conflict(struct sock *sk, | |||
126 | */ | 126 | */ |
127 | static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | 127 | static int tcp_v6_get_port(struct sock *sk, unsigned short snum) |
128 | { | 128 | { |
129 | struct tcp_bind_hashbucket *head; | 129 | struct inet_bind_hashbucket *head; |
130 | struct tcp_bind_bucket *tb; | 130 | struct inet_bind_bucket *tb; |
131 | struct hlist_node *node; | 131 | struct hlist_node *node; |
132 | int ret; | 132 | int ret; |
133 | 133 | ||
@@ -146,9 +146,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
146 | do { rover++; | 146 | do { rover++; |
147 | if (rover > high) | 147 | if (rover > high) |
148 | rover = low; | 148 | rover = low; |
149 | head = &tcp_bhash[tcp_bhashfn(rover)]; | 149 | head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; |
150 | spin_lock(&head->lock); | 150 | spin_lock(&head->lock); |
151 | tb_for_each(tb, node, &head->chain) | 151 | inet_bind_bucket_for_each(tb, node, &head->chain) |
152 | if (tb->port == rover) | 152 | if (tb->port == rover) |
153 | goto next; | 153 | goto next; |
154 | break; | 154 | break; |
@@ -171,9 +171,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
171 | /* OK, here is the one we will use. */ | 171 | /* OK, here is the one we will use. */ |
172 | snum = rover; | 172 | snum = rover; |
173 | } else { | 173 | } else { |
174 | head = &tcp_bhash[tcp_bhashfn(snum)]; | 174 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; |
175 | spin_lock(&head->lock); | 175 | spin_lock(&head->lock); |
176 | tb_for_each(tb, node, &head->chain) | 176 | inet_bind_bucket_for_each(tb, node, &head->chain) |
177 | if (tb->port == snum) | 177 | if (tb->port == snum) |
178 | goto tb_found; | 178 | goto tb_found; |
179 | } | 179 | } |
@@ -192,7 +192,7 @@ tb_found: | |||
192 | } | 192 | } |
193 | tb_not_found: | 193 | tb_not_found: |
194 | ret = 1; | 194 | ret = 1; |
195 | if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL) | 195 | if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) |
196 | goto fail_unlock; | 196 | goto fail_unlock; |
197 | if (hlist_empty(&tb->owners)) { | 197 | if (hlist_empty(&tb->owners)) { |
198 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 198 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) |
@@ -224,7 +224,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) | |||
224 | BUG_TRAP(sk_unhashed(sk)); | 224 | BUG_TRAP(sk_unhashed(sk)); |
225 | 225 | ||
226 | if (sk->sk_state == TCP_LISTEN) { | 226 | if (sk->sk_state == TCP_LISTEN) { |
227 | list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; | 227 | list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; |
228 | lock = &tcp_lhash_lock; | 228 | lock = &tcp_lhash_lock; |
229 | tcp_listen_wlock(); | 229 | tcp_listen_wlock(); |
230 | } else { | 230 | } else { |
@@ -264,7 +264,7 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor | |||
264 | 264 | ||
265 | hiscore=0; | 265 | hiscore=0; |
266 | read_lock(&tcp_lhash_lock); | 266 | read_lock(&tcp_lhash_lock); |
267 | sk_for_each(sk, node, &tcp_listening_hash[tcp_lhashfn(hnum)]) { | 267 | sk_for_each(sk, node, &tcp_listening_hash[inet_lhashfn(hnum)]) { |
268 | if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { | 268 | if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) { |
269 | struct ipv6_pinfo *np = inet6_sk(sk); | 269 | struct ipv6_pinfo *np = inet6_sk(sk); |
270 | 270 | ||
@@ -305,7 +305,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u | |||
305 | struct in6_addr *daddr, u16 hnum, | 305 | struct in6_addr *daddr, u16 hnum, |
306 | int dif) | 306 | int dif) |
307 | { | 307 | { |
308 | struct tcp_ehash_bucket *head; | 308 | struct inet_ehash_bucket *head; |
309 | struct sock *sk; | 309 | struct sock *sk; |
310 | struct hlist_node *node; | 310 | struct hlist_node *node; |
311 | __u32 ports = TCP_COMBINED_PORTS(sport, hnum); | 311 | __u32 ports = TCP_COMBINED_PORTS(sport, hnum); |
@@ -461,7 +461,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport, | |||
461 | int dif = sk->sk_bound_dev_if; | 461 | int dif = sk->sk_bound_dev_if; |
462 | u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); | 462 | u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); |
463 | int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); | 463 | int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); |
464 | struct tcp_ehash_bucket *head = &tcp_ehash[hash]; | 464 | struct inet_ehash_bucket *head = &tcp_ehash[hash]; |
465 | struct sock *sk2; | 465 | struct sock *sk2; |
466 | struct hlist_node *node; | 466 | struct hlist_node *node; |
467 | struct tcp_tw_bucket *tw; | 467 | struct tcp_tw_bucket *tw; |
@@ -540,8 +540,8 @@ static inline u32 tcpv6_port_offset(const struct sock *sk) | |||
540 | static int tcp_v6_hash_connect(struct sock *sk) | 540 | static int tcp_v6_hash_connect(struct sock *sk) |
541 | { | 541 | { |
542 | unsigned short snum = inet_sk(sk)->num; | 542 | unsigned short snum = inet_sk(sk)->num; |
543 | struct tcp_bind_hashbucket *head; | 543 | struct inet_bind_hashbucket *head; |
544 | struct tcp_bind_bucket *tb; | 544 | struct inet_bind_bucket *tb; |
545 | int ret; | 545 | int ret; |
546 | 546 | ||
547 | if (!snum) { | 547 | if (!snum) { |
@@ -558,14 +558,14 @@ static int tcp_v6_hash_connect(struct sock *sk) | |||
558 | local_bh_disable(); | 558 | local_bh_disable(); |
559 | for (i = 1; i <= range; i++) { | 559 | for (i = 1; i <= range; i++) { |
560 | port = low + (i + offset) % range; | 560 | port = low + (i + offset) % range; |
561 | head = &tcp_bhash[tcp_bhashfn(port)]; | 561 | head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; |
562 | spin_lock(&head->lock); | 562 | spin_lock(&head->lock); |
563 | 563 | ||
564 | /* Does not bother with rcv_saddr checks, | 564 | /* Does not bother with rcv_saddr checks, |
565 | * because the established check is already | 565 | * because the established check is already |
566 | * unique enough. | 566 | * unique enough. |
567 | */ | 567 | */ |
568 | tb_for_each(tb, node, &head->chain) { | 568 | inet_bind_bucket_for_each(tb, node, &head->chain) { |
569 | if (tb->port == port) { | 569 | if (tb->port == port) { |
570 | BUG_TRAP(!hlist_empty(&tb->owners)); | 570 | BUG_TRAP(!hlist_empty(&tb->owners)); |
571 | if (tb->fastreuse >= 0) | 571 | if (tb->fastreuse >= 0) |
@@ -578,7 +578,7 @@ static int tcp_v6_hash_connect(struct sock *sk) | |||
578 | } | 578 | } |
579 | } | 579 | } |
580 | 580 | ||
581 | tb = tcp_bucket_create(head, port); | 581 | tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); |
582 | if (!tb) { | 582 | if (!tb) { |
583 | spin_unlock(&head->lock); | 583 | spin_unlock(&head->lock); |
584 | break; | 584 | break; |
@@ -613,7 +613,7 @@ ok: | |||
613 | goto out; | 613 | goto out; |
614 | } | 614 | } |
615 | 615 | ||
616 | head = &tcp_bhash[tcp_bhashfn(snum)]; | 616 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; |
617 | tb = tcp_sk(sk)->bind_hash; | 617 | tb = tcp_sk(sk)->bind_hash; |
618 | spin_lock_bh(&head->lock); | 618 | spin_lock_bh(&head->lock); |
619 | 619 | ||