aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 23:07:35 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:41:44 -0400
commit6e04e02165a7209a71db553b7bc48d68421e5ebf (patch)
tree004157924013e6c099cacac59f39d3dd61f3e0e5 /net/ipv4/tcp_ipv4.c
parent2d8c4ce51903636ce0f60addc8134aa50ab8fa76 (diff)
[INET]: Move tcp_port_rover to inet_hashinfo
Also expose all of the tcp_hashinfo members, i.e. killing those tcp_ehash, etc macros, this will more clearly expose already generic functions and some that need just a bit of work to become generic, as we'll see in the upcoming changesets. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c101
1 files changed, 50 insertions, 51 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 40fe4f5fca1c..f5373f9f00ac 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -94,6 +94,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
94 .lhash_users = ATOMIC_INIT(0), 94 .lhash_users = ATOMIC_INIT(0),
95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), 95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
96 .portalloc_lock = SPIN_LOCK_UNLOCKED, 96 .portalloc_lock = SPIN_LOCK_UNLOCKED,
97 .port_rover = 1024 - 1,
97}; 98};
98 99
99/* 100/*
@@ -102,7 +103,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
102 * 32768-61000 103 * 32768-61000
103 */ 104 */
104int sysctl_local_port_range[2] = { 1024, 4999 }; 105int sysctl_local_port_range[2] = { 1024, 4999 };
105int tcp_port_rover = 1024 - 1;
106 106
107static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) 107static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
108{ 108{
@@ -146,16 +146,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
146 int remaining = (high - low) + 1; 146 int remaining = (high - low) + 1;
147 int rover; 147 int rover;
148 148
149 spin_lock(&tcp_portalloc_lock); 149 spin_lock(&tcp_hashinfo.portalloc_lock);
150 if (tcp_port_rover < low) 150 if (tcp_hashinfo.port_rover < low)
151 rover = low; 151 rover = low;
152 else 152 else
153 rover = tcp_port_rover; 153 rover = tcp_hashinfo.port_rover;
154 do { 154 do {
155 rover++; 155 rover++;
156 if (rover > high) 156 if (rover > high)
157 rover = low; 157 rover = low;
158 head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)]; 158 head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
159 spin_lock(&head->lock); 159 spin_lock(&head->lock);
160 inet_bind_bucket_for_each(tb, node, &head->chain) 160 inet_bind_bucket_for_each(tb, node, &head->chain)
161 if (tb->port == rover) 161 if (tb->port == rover)
@@ -164,8 +164,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
164 next: 164 next:
165 spin_unlock(&head->lock); 165 spin_unlock(&head->lock);
166 } while (--remaining > 0); 166 } while (--remaining > 0);
167 tcp_port_rover = rover; 167 tcp_hashinfo.port_rover = rover;
168 spin_unlock(&tcp_portalloc_lock); 168 spin_unlock(&tcp_hashinfo.portalloc_lock);
169 169
170 /* Exhausted local port range during search? It is not 170 /* Exhausted local port range during search? It is not
171 * possible for us to be holding one of the bind hash 171 * possible for us to be holding one of the bind hash
@@ -182,7 +182,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
182 */ 182 */
183 snum = rover; 183 snum = rover;
184 } else { 184 } else {
185 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; 185 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
186 spin_lock(&head->lock); 186 spin_lock(&head->lock);
187 inet_bind_bucket_for_each(tb, node, &head->chain) 187 inet_bind_bucket_for_each(tb, node, &head->chain)
188 if (tb->port == snum) 188 if (tb->port == snum)
@@ -205,7 +205,7 @@ tb_found:
205 } 205 }
206tb_not_found: 206tb_not_found:
207 ret = 1; 207 ret = 1;
208 if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL) 208 if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL)
209 goto fail_unlock; 209 goto fail_unlock;
210 if (hlist_empty(&tb->owners)) { 210 if (hlist_empty(&tb->owners)) {
211 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) 211 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
@@ -237,22 +237,22 @@ fail:
237 237
238void tcp_listen_wlock(void) 238void tcp_listen_wlock(void)
239{ 239{
240 write_lock(&tcp_lhash_lock); 240 write_lock(&tcp_hashinfo.lhash_lock);
241 241
242 if (atomic_read(&tcp_lhash_users)) { 242 if (atomic_read(&tcp_hashinfo.lhash_users)) {
243 DEFINE_WAIT(wait); 243 DEFINE_WAIT(wait);
244 244
245 for (;;) { 245 for (;;) {
246 prepare_to_wait_exclusive(&tcp_lhash_wait, 246 prepare_to_wait_exclusive(&tcp_hashinfo.lhash_wait,
247 &wait, TASK_UNINTERRUPTIBLE); 247 &wait, TASK_UNINTERRUPTIBLE);
248 if (!atomic_read(&tcp_lhash_users)) 248 if (!atomic_read(&tcp_hashinfo.lhash_users))
249 break; 249 break;
250 write_unlock_bh(&tcp_lhash_lock); 250 write_unlock_bh(&tcp_hashinfo.lhash_lock);
251 schedule(); 251 schedule();
252 write_lock_bh(&tcp_lhash_lock); 252 write_lock_bh(&tcp_hashinfo.lhash_lock);
253 } 253 }
254 254
255 finish_wait(&tcp_lhash_wait, &wait); 255 finish_wait(&tcp_hashinfo.lhash_wait, &wait);
256 } 256 }
257} 257}
258 258
@@ -263,20 +263,20 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
263 263
264 BUG_TRAP(sk_unhashed(sk)); 264 BUG_TRAP(sk_unhashed(sk));
265 if (listen_possible && sk->sk_state == TCP_LISTEN) { 265 if (listen_possible && sk->sk_state == TCP_LISTEN) {
266 list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)]; 266 list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
267 lock = &tcp_lhash_lock; 267 lock = &tcp_hashinfo.lhash_lock;
268 tcp_listen_wlock(); 268 tcp_listen_wlock();
269 } else { 269 } else {
270 sk->sk_hashent = inet_sk_ehashfn(sk, tcp_ehash_size); 270 sk->sk_hashent = inet_sk_ehashfn(sk, tcp_hashinfo.ehash_size);
271 list = &tcp_ehash[sk->sk_hashent].chain; 271 list = &tcp_hashinfo.ehash[sk->sk_hashent].chain;
272 lock = &tcp_ehash[sk->sk_hashent].lock; 272 lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock;
273 write_lock(lock); 273 write_lock(lock);
274 } 274 }
275 __sk_add_node(sk, list); 275 __sk_add_node(sk, list);
276 sock_prot_inc_use(sk->sk_prot); 276 sock_prot_inc_use(sk->sk_prot);
277 write_unlock(lock); 277 write_unlock(lock);
278 if (listen_possible && sk->sk_state == TCP_LISTEN) 278 if (listen_possible && sk->sk_state == TCP_LISTEN)
279 wake_up(&tcp_lhash_wait); 279 wake_up(&tcp_hashinfo.lhash_wait);
280} 280}
281 281
282static void tcp_v4_hash(struct sock *sk) 282static void tcp_v4_hash(struct sock *sk)
@@ -298,9 +298,9 @@ void tcp_unhash(struct sock *sk)
298 if (sk->sk_state == TCP_LISTEN) { 298 if (sk->sk_state == TCP_LISTEN) {
299 local_bh_disable(); 299 local_bh_disable();
300 tcp_listen_wlock(); 300 tcp_listen_wlock();
301 lock = &tcp_lhash_lock; 301 lock = &tcp_hashinfo.lhash_lock;
302 } else { 302 } else {
303 struct inet_ehash_bucket *head = &tcp_ehash[sk->sk_hashent]; 303 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[sk->sk_hashent];
304 lock = &head->lock; 304 lock = &head->lock;
305 write_lock_bh(&head->lock); 305 write_lock_bh(&head->lock);
306 } 306 }
@@ -311,7 +311,7 @@ void tcp_unhash(struct sock *sk)
311 311
312 ende: 312 ende:
313 if (sk->sk_state == TCP_LISTEN) 313 if (sk->sk_state == TCP_LISTEN)
314 wake_up(&tcp_lhash_wait); 314 wake_up(&tcp_hashinfo.lhash_wait);
315} 315}
316 316
317/* Don't inline this cruft. Here are some nice properties to 317/* Don't inline this cruft. Here are some nice properties to
@@ -366,8 +366,8 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
366 struct sock *sk = NULL; 366 struct sock *sk = NULL;
367 struct hlist_head *head; 367 struct hlist_head *head;
368 368
369 read_lock(&tcp_lhash_lock); 369 read_lock(&tcp_hashinfo.lhash_lock);
370 head = &tcp_listening_hash[inet_lhashfn(hnum)]; 370 head = &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)];
371 if (!hlist_empty(head)) { 371 if (!hlist_empty(head)) {
372 struct inet_sock *inet = inet_sk((sk = __sk_head(head))); 372 struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
373 373
@@ -382,7 +382,7 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
382sherry_cache: 382sherry_cache:
383 sock_hold(sk); 383 sock_hold(sk);
384 } 384 }
385 read_unlock(&tcp_lhash_lock); 385 read_unlock(&tcp_hashinfo.lhash_lock);
386 return sk; 386 return sk;
387} 387}
388 388
@@ -406,8 +406,8 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
406 /* Optimize here for direct hit, only listening connections can 406 /* Optimize here for direct hit, only listening connections can
407 * have wildcards anyways. 407 * have wildcards anyways.
408 */ 408 */
409 const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_ehash_size); 409 const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_hashinfo.ehash_size);
410 head = &tcp_ehash[hash]; 410 head = &tcp_hashinfo.ehash[hash];
411 read_lock(&head->lock); 411 read_lock(&head->lock);
412 sk_for_each(sk, node, &head->chain) { 412 sk_for_each(sk, node, &head->chain) {
413 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) 413 if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
@@ -415,7 +415,7 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
415 } 415 }
416 416
417 /* Must check for a TIME_WAIT'er before going to listener hash. */ 417 /* Must check for a TIME_WAIT'er before going to listener hash. */
418 sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) { 418 sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
419 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) 419 if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
420 goto hit; 420 goto hit;
421 } 421 }
@@ -469,8 +469,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
469 int dif = sk->sk_bound_dev_if; 469 int dif = sk->sk_bound_dev_if;
470 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) 470 TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
471 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); 471 __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
472 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size); 472 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
473 struct inet_ehash_bucket *head = &tcp_ehash[hash]; 473 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
474 struct sock *sk2; 474 struct sock *sk2;
475 struct hlist_node *node; 475 struct hlist_node *node;
476 struct tcp_tw_bucket *tw; 476 struct tcp_tw_bucket *tw;
@@ -478,7 +478,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
478 write_lock(&head->lock); 478 write_lock(&head->lock);
479 479
480 /* Check TIME-WAIT sockets first. */ 480 /* Check TIME-WAIT sockets first. */
481 sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) { 481 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
482 tw = (struct tcp_tw_bucket *)sk2; 482 tw = (struct tcp_tw_bucket *)sk2;
483 483
484 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { 484 if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
@@ -582,7 +582,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
582 local_bh_disable(); 582 local_bh_disable();
583 for (i = 1; i <= range; i++) { 583 for (i = 1; i <= range; i++) {
584 port = low + (i + offset) % range; 584 port = low + (i + offset) % range;
585 head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)]; 585 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
586 spin_lock(&head->lock); 586 spin_lock(&head->lock);
587 587
588 /* Does not bother with rcv_saddr checks, 588 /* Does not bother with rcv_saddr checks,
@@ -602,7 +602,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
602 } 602 }
603 } 603 }
604 604
605 tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port); 605 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
606 if (!tb) { 606 if (!tb) {
607 spin_unlock(&head->lock); 607 spin_unlock(&head->lock);
608 break; 608 break;
@@ -637,7 +637,7 @@ ok:
637 goto out; 637 goto out;
638 } 638 }
639 639
640 head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)]; 640 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
641 tb = inet_sk(sk)->bind_hash; 641 tb = inet_sk(sk)->bind_hash;
642 spin_lock_bh(&head->lock); 642 spin_lock_bh(&head->lock);
643 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 643 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
@@ -1926,7 +1926,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
1926 1926
1927 if (!sk) { 1927 if (!sk) {
1928 st->bucket = 0; 1928 st->bucket = 0;
1929 sk = sk_head(&tcp_listening_hash[0]); 1929 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1930 goto get_sk; 1930 goto get_sk;
1931 } 1931 }
1932 1932
@@ -1980,7 +1980,7 @@ start_req:
1980 read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1980 read_unlock_bh(&tp->accept_queue.syn_wait_lock);
1981 } 1981 }
1982 if (++st->bucket < INET_LHTABLE_SIZE) { 1982 if (++st->bucket < INET_LHTABLE_SIZE) {
1983 sk = sk_head(&tcp_listening_hash[st->bucket]); 1983 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1984 goto get_sk; 1984 goto get_sk;
1985 } 1985 }
1986 cur = NULL; 1986 cur = NULL;
@@ -2004,7 +2004,7 @@ static void *established_get_first(struct seq_file *seq)
2004 struct tcp_iter_state* st = seq->private; 2004 struct tcp_iter_state* st = seq->private;
2005 void *rc = NULL; 2005 void *rc = NULL;
2006 2006
2007 for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) { 2007 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
2008 struct sock *sk; 2008 struct sock *sk;
2009 struct hlist_node *node; 2009 struct hlist_node *node;
2010 struct tcp_tw_bucket *tw; 2010 struct tcp_tw_bucket *tw;
@@ -2012,8 +2012,8 @@ static void *established_get_first(struct seq_file *seq)
2012 /* We can reschedule _before_ having picked the target: */ 2012 /* We can reschedule _before_ having picked the target: */
2013 cond_resched_softirq(); 2013 cond_resched_softirq();
2014 2014
2015 read_lock(&tcp_ehash[st->bucket].lock); 2015 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
2016 sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) { 2016 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2017 if (sk->sk_family != st->family) { 2017 if (sk->sk_family != st->family) {
2018 continue; 2018 continue;
2019 } 2019 }
@@ -2022,14 +2022,14 @@ static void *established_get_first(struct seq_file *seq)
2022 } 2022 }
2023 st->state = TCP_SEQ_STATE_TIME_WAIT; 2023 st->state = TCP_SEQ_STATE_TIME_WAIT;
2024 tw_for_each(tw, node, 2024 tw_for_each(tw, node,
2025 &tcp_ehash[st->bucket + tcp_ehash_size].chain) { 2025 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
2026 if (tw->tw_family != st->family) { 2026 if (tw->tw_family != st->family) {
2027 continue; 2027 continue;
2028 } 2028 }
2029 rc = tw; 2029 rc = tw;
2030 goto out; 2030 goto out;
2031 } 2031 }
2032 read_unlock(&tcp_ehash[st->bucket].lock); 2032 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
2033 st->state = TCP_SEQ_STATE_ESTABLISHED; 2033 st->state = TCP_SEQ_STATE_ESTABLISHED;
2034 } 2034 }
2035out: 2035out:
@@ -2056,15 +2056,15 @@ get_tw:
2056 cur = tw; 2056 cur = tw;
2057 goto out; 2057 goto out;
2058 } 2058 }
2059 read_unlock(&tcp_ehash[st->bucket].lock); 2059 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
2060 st->state = TCP_SEQ_STATE_ESTABLISHED; 2060 st->state = TCP_SEQ_STATE_ESTABLISHED;
2061 2061
2062 /* We can reschedule between buckets: */ 2062 /* We can reschedule between buckets: */
2063 cond_resched_softirq(); 2063 cond_resched_softirq();
2064 2064
2065 if (++st->bucket < tcp_ehash_size) { 2065 if (++st->bucket < tcp_hashinfo.ehash_size) {
2066 read_lock(&tcp_ehash[st->bucket].lock); 2066 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
2067 sk = sk_head(&tcp_ehash[st->bucket].chain); 2067 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2068 } else { 2068 } else {
2069 cur = NULL; 2069 cur = NULL;
2070 goto out; 2070 goto out;
@@ -2078,7 +2078,7 @@ get_tw:
2078 } 2078 }
2079 2079
2080 st->state = TCP_SEQ_STATE_TIME_WAIT; 2080 st->state = TCP_SEQ_STATE_TIME_WAIT;
2081 tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain); 2081 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
2082 goto get_tw; 2082 goto get_tw;
2083found: 2083found:
2084 cur = sk; 2084 cur = sk;
@@ -2173,7 +2173,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
2173 case TCP_SEQ_STATE_TIME_WAIT: 2173 case TCP_SEQ_STATE_TIME_WAIT:
2174 case TCP_SEQ_STATE_ESTABLISHED: 2174 case TCP_SEQ_STATE_ESTABLISHED:
2175 if (v) 2175 if (v)
2176 read_unlock(&tcp_ehash[st->bucket].lock); 2176 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
2177 local_bh_enable(); 2177 local_bh_enable();
2178 break; 2178 break;
2179 } 2179 }
@@ -2432,7 +2432,6 @@ EXPORT_SYMBOL(ipv4_specific);
2432EXPORT_SYMBOL(inet_bind_bucket_create); 2432EXPORT_SYMBOL(inet_bind_bucket_create);
2433EXPORT_SYMBOL(tcp_hashinfo); 2433EXPORT_SYMBOL(tcp_hashinfo);
2434EXPORT_SYMBOL(tcp_listen_wlock); 2434EXPORT_SYMBOL(tcp_listen_wlock);
2435EXPORT_SYMBOL(tcp_port_rover);
2436EXPORT_SYMBOL(tcp_prot); 2435EXPORT_SYMBOL(tcp_prot);
2437EXPORT_SYMBOL(tcp_unhash); 2436EXPORT_SYMBOL(tcp_unhash);
2438EXPORT_SYMBOL(tcp_v4_conn_request); 2437EXPORT_SYMBOL(tcp_v4_conn_request);