diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-08-09 22:59:44 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:38:32 -0400 |
commit | 0f7ff9274e72fd254fbd1ab117bbc1db6e7cdb34 (patch) | |
tree | 95736729a2f5302666604c4287a2af97ececd734 /net/ipv4/tcp_ipv4.c | |
parent | 304a16180fb6d2b153b45f6fbbcec1fa814496e5 (diff) |
[INET]: Just rename the TCP hashtable functions/structs to inet_
This is to break down the complexity of the series of patches,
making it very clear that this one just does:
1. renames tcp_ prefixed hashtable functions and data structures that
were already mostly generic to inet_ to share it with DCCP and
other INET transport protocols.
2. Removes not used functions (__tb_head & tb_head)
3. Removes some leftover prototypes in the headers (tcp_bucket_unlock &
tcp_v4_build_header)
Next changesets will move tcp_sk(sk)->bind_hash to inet_sock so that we can
make functions such as tcp_inherit_port, __tcp_inherit_port, tcp_v4_get_port,
__tcp_put_port, generic and get others like tcp_destroy_sock closer to generic
(tcp_orphan_count will go to sk->sk_prot to allow this).
Eventually most of these functions will be used passing the transport protocol
inet_hashinfo structure.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 106 |
1 files changed, 56 insertions, 50 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c03d7e9688c8..4138630556e3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -89,12 +89,11 @@ static struct socket *tcp_socket; | |||
89 | void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, | 89 | void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, |
90 | struct sk_buff *skb); | 90 | struct sk_buff *skb); |
91 | 91 | ||
92 | struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = { | 92 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { |
93 | .__tcp_lhash_lock = RW_LOCK_UNLOCKED, | 93 | .lhash_lock = RW_LOCK_UNLOCKED, |
94 | .__tcp_lhash_users = ATOMIC_INIT(0), | 94 | .lhash_users = ATOMIC_INIT(0), |
95 | .__tcp_lhash_wait | 95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), |
96 | = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait), | 96 | .portalloc_lock = SPIN_LOCK_UNLOCKED, |
97 | .__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED | ||
98 | }; | 97 | }; |
99 | 98 | ||
100 | /* | 99 | /* |
@@ -105,14 +104,14 @@ struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = { | |||
105 | int sysctl_local_port_range[2] = { 1024, 4999 }; | 104 | int sysctl_local_port_range[2] = { 1024, 4999 }; |
106 | int tcp_port_rover = 1024 - 1; | 105 | int tcp_port_rover = 1024 - 1; |
107 | 106 | ||
108 | /* Allocate and initialize a new TCP local port bind bucket. | 107 | /* Allocate and initialize a new local port bind bucket. |
109 | * The bindhash mutex for snum's hash chain must be held here. | 108 | * The bindhash mutex for snum's hash chain must be held here. |
110 | */ | 109 | */ |
111 | struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head, | 110 | struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, |
112 | unsigned short snum) | 111 | struct inet_bind_hashbucket *head, |
112 | const unsigned short snum) | ||
113 | { | 113 | { |
114 | struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep, | 114 | struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC); |
115 | SLAB_ATOMIC); | ||
116 | if (tb) { | 115 | if (tb) { |
117 | tb->port = snum; | 116 | tb->port = snum; |
118 | tb->fastreuse = 0; | 117 | tb->fastreuse = 0; |
@@ -123,20 +122,21 @@ struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head, | |||
123 | } | 122 | } |
124 | 123 | ||
125 | /* Caller must hold hashbucket lock for this tb with local BH disabled */ | 124 | /* Caller must hold hashbucket lock for this tb with local BH disabled */ |
126 | void tcp_bucket_destroy(struct tcp_bind_bucket *tb) | 125 | void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) |
127 | { | 126 | { |
128 | if (hlist_empty(&tb->owners)) { | 127 | if (hlist_empty(&tb->owners)) { |
129 | __hlist_del(&tb->node); | 128 | __hlist_del(&tb->node); |
130 | kmem_cache_free(tcp_bucket_cachep, tb); | 129 | kmem_cache_free(cachep, tb); |
131 | } | 130 | } |
132 | } | 131 | } |
133 | 132 | ||
134 | /* Caller must disable local BH processing. */ | 133 | /* Caller must disable local BH processing. */ |
135 | static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child) | 134 | static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child) |
136 | { | 135 | { |
137 | struct tcp_bind_hashbucket *head = | 136 | struct inet_bind_hashbucket *head = |
138 | &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)]; | 137 | &tcp_bhash[inet_bhashfn(inet_sk(child)->num, |
139 | struct tcp_bind_bucket *tb; | 138 | tcp_bhash_size)]; |
139 | struct inet_bind_bucket *tb; | ||
140 | 140 | ||
141 | spin_lock(&head->lock); | 141 | spin_lock(&head->lock); |
142 | tb = tcp_sk(sk)->bind_hash; | 142 | tb = tcp_sk(sk)->bind_hash; |
@@ -152,15 +152,15 @@ inline void tcp_inherit_port(struct sock *sk, struct sock *child) | |||
152 | local_bh_enable(); | 152 | local_bh_enable(); |
153 | } | 153 | } |
154 | 154 | ||
155 | void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb, | 155 | void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, |
156 | unsigned short snum) | 156 | const unsigned short snum) |
157 | { | 157 | { |
158 | inet_sk(sk)->num = snum; | 158 | inet_sk(sk)->num = snum; |
159 | sk_add_bind_node(sk, &tb->owners); | 159 | sk_add_bind_node(sk, &tb->owners); |
160 | tcp_sk(sk)->bind_hash = tb; | 160 | tcp_sk(sk)->bind_hash = tb; |
161 | } | 161 | } |
162 | 162 | ||
163 | static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb) | 163 | static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) |
164 | { | 164 | { |
165 | const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk); | 165 | const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk); |
166 | struct sock *sk2; | 166 | struct sock *sk2; |
@@ -190,9 +190,9 @@ static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb) | |||
190 | */ | 190 | */ |
191 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | 191 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) |
192 | { | 192 | { |
193 | struct tcp_bind_hashbucket *head; | 193 | struct inet_bind_hashbucket *head; |
194 | struct hlist_node *node; | 194 | struct hlist_node *node; |
195 | struct tcp_bind_bucket *tb; | 195 | struct inet_bind_bucket *tb; |
196 | int ret; | 196 | int ret; |
197 | 197 | ||
198 | local_bh_disable(); | 198 | local_bh_disable(); |
@@ -211,9 +211,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
211 | rover++; | 211 | rover++; |
212 | if (rover > high) | 212 | if (rover > high) |
213 | rover = low; | 213 | rover = low; |
214 | head = &tcp_bhash[tcp_bhashfn(rover)]; | 214 | head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; |
215 | spin_lock(&head->lock); | 215 | spin_lock(&head->lock); |
216 | tb_for_each(tb, node, &head->chain) | 216 | inet_bind_bucket_for_each(tb, node, &head->chain) |
217 | if (tb->port == rover) | 217 | if (tb->port == rover) |
218 | goto next; | 218 | goto next; |
219 | break; | 219 | break; |
@@ -238,9 +238,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
238 | */ | 238 | */ |
239 | snum = rover; | 239 | snum = rover; |
240 | } else { | 240 | } else { |
241 | head = &tcp_bhash[tcp_bhashfn(snum)]; | 241 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; |
242 | spin_lock(&head->lock); | 242 | spin_lock(&head->lock); |
243 | tb_for_each(tb, node, &head->chain) | 243 | inet_bind_bucket_for_each(tb, node, &head->chain) |
244 | if (tb->port == snum) | 244 | if (tb->port == snum) |
245 | goto tb_found; | 245 | goto tb_found; |
246 | } | 246 | } |
@@ -261,7 +261,7 @@ tb_found: | |||
261 | } | 261 | } |
262 | tb_not_found: | 262 | tb_not_found: |
263 | ret = 1; | 263 | ret = 1; |
264 | if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL) | 264 | if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) |
265 | goto fail_unlock; | 265 | goto fail_unlock; |
266 | if (hlist_empty(&tb->owners)) { | 266 | if (hlist_empty(&tb->owners)) { |
267 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | 267 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) |
@@ -290,15 +290,16 @@ fail: | |||
290 | static void __tcp_put_port(struct sock *sk) | 290 | static void __tcp_put_port(struct sock *sk) |
291 | { | 291 | { |
292 | struct inet_sock *inet = inet_sk(sk); | 292 | struct inet_sock *inet = inet_sk(sk); |
293 | struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)]; | 293 | struct inet_bind_hashbucket *head = &tcp_bhash[inet_bhashfn(inet->num, |
294 | struct tcp_bind_bucket *tb; | 294 | tcp_bhash_size)]; |
295 | struct inet_bind_bucket *tb; | ||
295 | 296 | ||
296 | spin_lock(&head->lock); | 297 | spin_lock(&head->lock); |
297 | tb = tcp_sk(sk)->bind_hash; | 298 | tb = tcp_sk(sk)->bind_hash; |
298 | __sk_del_bind_node(sk); | 299 | __sk_del_bind_node(sk); |
299 | tcp_sk(sk)->bind_hash = NULL; | 300 | tcp_sk(sk)->bind_hash = NULL; |
300 | inet->num = 0; | 301 | inet->num = 0; |
301 | tcp_bucket_destroy(tb); | 302 | inet_bind_bucket_destroy(tcp_bucket_cachep, tb); |
302 | spin_unlock(&head->lock); | 303 | spin_unlock(&head->lock); |
303 | } | 304 | } |
304 | 305 | ||
@@ -344,7 +345,7 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible) | |||
344 | 345 | ||
345 | BUG_TRAP(sk_unhashed(sk)); | 346 | BUG_TRAP(sk_unhashed(sk)); |
346 | if (listen_possible && sk->sk_state == TCP_LISTEN) { | 347 | if (listen_possible && sk->sk_state == TCP_LISTEN) { |
347 | list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; | 348 | list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; |
348 | lock = &tcp_lhash_lock; | 349 | lock = &tcp_lhash_lock; |
349 | tcp_listen_wlock(); | 350 | tcp_listen_wlock(); |
350 | } else { | 351 | } else { |
@@ -381,7 +382,7 @@ void tcp_unhash(struct sock *sk) | |||
381 | tcp_listen_wlock(); | 382 | tcp_listen_wlock(); |
382 | lock = &tcp_lhash_lock; | 383 | lock = &tcp_lhash_lock; |
383 | } else { | 384 | } else { |
384 | struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent]; | 385 | struct inet_ehash_bucket *head = &tcp_ehash[sk->sk_hashent]; |
385 | lock = &head->lock; | 386 | lock = &head->lock; |
386 | write_lock_bh(&head->lock); | 387 | write_lock_bh(&head->lock); |
387 | } | 388 | } |
@@ -401,8 +402,10 @@ void tcp_unhash(struct sock *sk) | |||
401 | * connection. So always assume those are both wildcarded | 402 | * connection. So always assume those are both wildcarded |
402 | * during the search since they can never be otherwise. | 403 | * during the search since they can never be otherwise. |
403 | */ | 404 | */ |
404 | static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr, | 405 | static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, |
405 | unsigned short hnum, int dif) | 406 | const u32 daddr, |
407 | const unsigned short hnum, | ||
408 | const int dif) | ||
406 | { | 409 | { |
407 | struct sock *result = NULL, *sk; | 410 | struct sock *result = NULL, *sk; |
408 | struct hlist_node *node; | 411 | struct hlist_node *node; |
@@ -438,14 +441,15 @@ static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr, | |||
438 | } | 441 | } |
439 | 442 | ||
440 | /* Optimize the common listener case. */ | 443 | /* Optimize the common listener case. */ |
441 | static inline struct sock *tcp_v4_lookup_listener(u32 daddr, | 444 | static inline struct sock *tcp_v4_lookup_listener(const u32 daddr, |
442 | unsigned short hnum, int dif) | 445 | const unsigned short hnum, |
446 | const int dif) | ||
443 | { | 447 | { |
444 | struct sock *sk = NULL; | 448 | struct sock *sk = NULL; |
445 | struct hlist_head *head; | 449 | struct hlist_head *head; |
446 | 450 | ||
447 | read_lock(&tcp_lhash_lock); | 451 | read_lock(&tcp_lhash_lock); |
448 | head = &tcp_listening_hash[tcp_lhashfn(hnum)]; | 452 | head = &tcp_listening_hash[inet_lhashfn(hnum)]; |
449 | if (!hlist_empty(head)) { | 453 | if (!hlist_empty(head)) { |
450 | struct inet_sock *inet = inet_sk((sk = __sk_head(head))); | 454 | struct inet_sock *inet = inet_sk((sk = __sk_head(head))); |
451 | 455 | ||
@@ -470,11 +474,13 @@ sherry_cache: | |||
470 | * Local BH must be disabled here. | 474 | * Local BH must be disabled here. |
471 | */ | 475 | */ |
472 | 476 | ||
473 | static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport, | 477 | static inline struct sock *__tcp_v4_lookup_established(const u32 saddr, |
474 | u32 daddr, u16 hnum, | 478 | const u16 sport, |
475 | int dif) | 479 | const u32 daddr, |
480 | const u16 hnum, | ||
481 | const int dif) | ||
476 | { | 482 | { |
477 | struct tcp_ehash_bucket *head; | 483 | struct inet_ehash_bucket *head; |
478 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) | 484 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) |
479 | __u32 ports = TCP_COMBINED_PORTS(sport, hnum); | 485 | __u32 ports = TCP_COMBINED_PORTS(sport, hnum); |
480 | struct sock *sk; | 486 | struct sock *sk; |
@@ -546,7 +552,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, | |||
546 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) | 552 | TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) |
547 | __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); | 553 | __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); |
548 | const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size); | 554 | const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size); |
549 | struct tcp_ehash_bucket *head = &tcp_ehash[hash]; | 555 | struct inet_ehash_bucket *head = &tcp_ehash[hash]; |
550 | struct sock *sk2; | 556 | struct sock *sk2; |
551 | struct hlist_node *node; | 557 | struct hlist_node *node; |
552 | struct tcp_tw_bucket *tw; | 558 | struct tcp_tw_bucket *tw; |
@@ -639,9 +645,9 @@ static inline u32 connect_port_offset(const struct sock *sk) | |||
639 | */ | 645 | */ |
640 | static inline int tcp_v4_hash_connect(struct sock *sk) | 646 | static inline int tcp_v4_hash_connect(struct sock *sk) |
641 | { | 647 | { |
642 | unsigned short snum = inet_sk(sk)->num; | 648 | const unsigned short snum = inet_sk(sk)->num; |
643 | struct tcp_bind_hashbucket *head; | 649 | struct inet_bind_hashbucket *head; |
644 | struct tcp_bind_bucket *tb; | 650 | struct inet_bind_bucket *tb; |
645 | int ret; | 651 | int ret; |
646 | 652 | ||
647 | if (!snum) { | 653 | if (!snum) { |
@@ -658,14 +664,14 @@ static inline int tcp_v4_hash_connect(struct sock *sk) | |||
658 | local_bh_disable(); | 664 | local_bh_disable(); |
659 | for (i = 1; i <= range; i++) { | 665 | for (i = 1; i <= range; i++) { |
660 | port = low + (i + offset) % range; | 666 | port = low + (i + offset) % range; |
661 | head = &tcp_bhash[tcp_bhashfn(port)]; | 667 | head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; |
662 | spin_lock(&head->lock); | 668 | spin_lock(&head->lock); |
663 | 669 | ||
664 | /* Does not bother with rcv_saddr checks, | 670 | /* Does not bother with rcv_saddr checks, |
665 | * because the established check is already | 671 | * because the established check is already |
666 | * unique enough. | 672 | * unique enough. |
667 | */ | 673 | */ |
668 | tb_for_each(tb, node, &head->chain) { | 674 | inet_bind_bucket_for_each(tb, node, &head->chain) { |
669 | if (tb->port == port) { | 675 | if (tb->port == port) { |
670 | BUG_TRAP(!hlist_empty(&tb->owners)); | 676 | BUG_TRAP(!hlist_empty(&tb->owners)); |
671 | if (tb->fastreuse >= 0) | 677 | if (tb->fastreuse >= 0) |
@@ -678,7 +684,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk) | |||
678 | } | 684 | } |
679 | } | 685 | } |
680 | 686 | ||
681 | tb = tcp_bucket_create(head, port); | 687 | tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); |
682 | if (!tb) { | 688 | if (!tb) { |
683 | spin_unlock(&head->lock); | 689 | spin_unlock(&head->lock); |
684 | break; | 690 | break; |
@@ -713,7 +719,7 @@ ok: | |||
713 | goto out; | 719 | goto out; |
714 | } | 720 | } |
715 | 721 | ||
716 | head = &tcp_bhash[tcp_bhashfn(snum)]; | 722 | head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; |
717 | tb = tcp_sk(sk)->bind_hash; | 723 | tb = tcp_sk(sk)->bind_hash; |
718 | spin_lock_bh(&head->lock); | 724 | spin_lock_bh(&head->lock); |
719 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 725 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
@@ -2055,7 +2061,7 @@ start_req: | |||
2055 | } | 2061 | } |
2056 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); | 2062 | read_unlock_bh(&tp->accept_queue.syn_wait_lock); |
2057 | } | 2063 | } |
2058 | if (++st->bucket < TCP_LHTABLE_SIZE) { | 2064 | if (++st->bucket < INET_LHTABLE_SIZE) { |
2059 | sk = sk_head(&tcp_listening_hash[st->bucket]); | 2065 | sk = sk_head(&tcp_listening_hash[st->bucket]); |
2060 | goto get_sk; | 2066 | goto get_sk; |
2061 | } | 2067 | } |
@@ -2506,7 +2512,7 @@ void __init tcp_v4_init(struct net_proto_family *ops) | |||
2506 | 2512 | ||
2507 | EXPORT_SYMBOL(ipv4_specific); | 2513 | EXPORT_SYMBOL(ipv4_specific); |
2508 | EXPORT_SYMBOL(tcp_bind_hash); | 2514 | EXPORT_SYMBOL(tcp_bind_hash); |
2509 | EXPORT_SYMBOL(tcp_bucket_create); | 2515 | EXPORT_SYMBOL(inet_bind_bucket_create); |
2510 | EXPORT_SYMBOL(tcp_hashinfo); | 2516 | EXPORT_SYMBOL(tcp_hashinfo); |
2511 | EXPORT_SYMBOL(tcp_inherit_port); | 2517 | EXPORT_SYMBOL(tcp_inherit_port); |
2512 | EXPORT_SYMBOL(tcp_listen_wlock); | 2518 | EXPORT_SYMBOL(tcp_listen_wlock); |