aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 23:07:13 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:40:29 -0400
commit2d8c4ce51903636ce0f60addc8134aa50ab8fa76 (patch)
tree1ea4d4faf831b832489b30b13d8910777020feed
parentff21d5774b4a186c98be6398eacde75d896db804 (diff)
[INET]: Generalise tcp_bind_hash & tcp_inherit_port
This required moving tcp_bucket_cachep to inet_hashinfo. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/inet_hashtables.h32
-rw-r--r--include/net/tcp.h11
-rw-r--r--net/ipv4/inet_hashtables.c40
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_ipv4.c68
-rw-r--r--net/ipv6/tcp_ipv6.c6
6 files changed, 81 insertions, 80 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 3a6c11ca421d..da9705525f15 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -14,12 +14,15 @@
14#ifndef _INET_HASHTABLES_H 14#ifndef _INET_HASHTABLES_H
15#define _INET_HASHTABLES_H 15#define _INET_HASHTABLES_H
16 16
17#include <linux/interrupt.h>
17#include <linux/ip.h> 18#include <linux/ip.h>
18#include <linux/list.h> 19#include <linux/list.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/spinlock.h> 21#include <linux/spinlock.h>
21#include <linux/types.h> 22#include <linux/types.h>
22 23
24#include <net/sock.h>
25
23/* This is for all connections with a full identity, no wildcards. 26/* This is for all connections with a full identity, no wildcards.
24 * New scheme, half the table is for TIME_WAIT, the other half is 27 * New scheme, half the table is for TIME_WAIT, the other half is
25 * for the rest. I'll experiment with dynamic table growth later. 28 * for the rest. I'll experiment with dynamic table growth later.
@@ -113,6 +116,7 @@ struct inet_hashinfo {
113 atomic_t lhash_users; 116 atomic_t lhash_users;
114 wait_queue_head_t lhash_wait; 117 wait_queue_head_t lhash_wait;
115 spinlock_t portalloc_lock; 118 spinlock_t portalloc_lock;
119 kmem_cache_t *bind_bucket_cachep;
116}; 120};
117 121
118static inline int inet_ehashfn(const __u32 laddr, const __u16 lport, 122static inline int inet_ehashfn(const __u32 laddr, const __u16 lport,
@@ -148,6 +152,9 @@ static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
148 return lport & (bhash_size - 1); 152 return lport & (bhash_size - 1);
149} 153}
150 154
155extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
156 const unsigned short snum);
157
151/* These can have wildcards, don't try too hard. */ 158/* These can have wildcards, don't try too hard. */
152static inline int inet_lhashfn(const unsigned short num) 159static inline int inet_lhashfn(const unsigned short num)
153{ 160{
@@ -159,4 +166,29 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk)
159 return inet_lhashfn(inet_sk(sk)->num); 166 return inet_lhashfn(inet_sk(sk)->num);
160} 167}
161 168
169/* Caller must disable local BH processing. */
170static inline void __inet_inherit_port(struct inet_hashinfo *table,
171 struct sock *sk, struct sock *child)
172{
173 const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
174 struct inet_bind_hashbucket *head = &table->bhash[bhash];
175 struct inet_bind_bucket *tb;
176
177 spin_lock(&head->lock);
178 tb = inet_sk(sk)->bind_hash;
179 sk_add_bind_node(child, &tb->owners);
180 inet_sk(child)->bind_hash = tb;
181 spin_unlock(&head->lock);
182}
183
184static inline void inet_inherit_port(struct inet_hashinfo *table,
185 struct sock *sk, struct sock *child)
186{
187 local_bh_disable();
188 __inet_inherit_port(table, sk, child);
189 local_bh_enable();
190}
191
192extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
193
162#endif /* _INET_HASHTABLES_H */ 194#endif /* _INET_HASHTABLES_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6c6c879e7e87..9eb8ff7c911e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -51,14 +51,10 @@ extern struct inet_hashinfo tcp_hashinfo;
51#define tcp_lhash_users (tcp_hashinfo.lhash_users) 51#define tcp_lhash_users (tcp_hashinfo.lhash_users)
52#define tcp_lhash_wait (tcp_hashinfo.lhash_wait) 52#define tcp_lhash_wait (tcp_hashinfo.lhash_wait)
53#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock) 53#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock)
54 54#define tcp_bucket_cachep (tcp_hashinfo.bind_bucket_cachep)
55extern kmem_cache_t *tcp_bucket_cachep;
56 55
57extern int tcp_port_rover; 56extern int tcp_port_rover;
58 57
59extern void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
60 unsigned short snum);
61
62#if (BITS_PER_LONG == 64) 58#if (BITS_PER_LONG == 64)
63#define TCP_ADDRCMP_ALIGN_BYTES 8 59#define TCP_ADDRCMP_ALIGN_BYTES 8
64#else 60#else
@@ -549,9 +545,6 @@ DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
549#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val) 545#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
550#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val) 546#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
551 547
552extern void tcp_put_port(struct sock *sk);
553extern void tcp_inherit_port(struct sock *sk, struct sock *child);
554
555extern void tcp_v4_err(struct sk_buff *skb, u32); 548extern void tcp_v4_err(struct sk_buff *skb, u32);
556 549
557extern void tcp_shutdown (struct sock *sk, int how); 550extern void tcp_shutdown (struct sock *sk, int how);
@@ -1268,7 +1261,7 @@ static __inline__ void tcp_set_state(struct sock *sk, int state)
1268 sk->sk_prot->unhash(sk); 1261 sk->sk_prot->unhash(sk);
1269 if (inet_sk(sk)->bind_hash && 1262 if (inet_sk(sk)->bind_hash &&
1270 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1263 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1271 tcp_put_port(sk); 1264 inet_put_port(&tcp_hashinfo, sk);
1272 /* fall through */ 1265 /* fall through */
1273 default: 1266 default:
1274 if (oldstate==TCP_ESTABLISHED) 1267 if (oldstate==TCP_ESTABLISHED)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 343a890bd617..33d6cbe32cdc 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <linux/module.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18 19
19#include <net/inet_hashtables.h> 20#include <net/inet_hashtables.h>
@@ -49,3 +50,42 @@ void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb)
49 kmem_cache_free(cachep, tb); 50 kmem_cache_free(cachep, tb);
50 } 51 }
51} 52}
53
54void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
55 const unsigned short snum)
56{
57 struct inet_sock *inet = inet_sk(sk);
58 inet->num = snum;
59 sk_add_bind_node(sk, &tb->owners);
60 inet->bind_hash = tb;
61}
62
63EXPORT_SYMBOL(inet_bind_hash);
64
65/*
66 * Get rid of any references to a local port held by the given sock.
67 */
68static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
69{
70 struct inet_sock *inet = inet_sk(sk);
71 const int bhash = inet_bhashfn(inet->num, hashinfo->bhash_size);
72 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
73 struct inet_bind_bucket *tb;
74
75 spin_lock(&head->lock);
76 tb = inet->bind_hash;
77 __sk_del_bind_node(sk);
78 inet->bind_hash = NULL;
79 inet->num = 0;
80 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
81 spin_unlock(&head->lock);
82}
83
84void inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
85{
86 local_bh_disable();
87 __inet_put_port(hashinfo, sk);
88 local_bh_enable();
89}
90
91EXPORT_SYMBOL(inet_put_port);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e54a410ca701..38c04c1a754c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -271,10 +271,6 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271 271
272DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); 272DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
273 273
274kmem_cache_t *tcp_bucket_cachep;
275
276EXPORT_SYMBOL_GPL(tcp_bucket_cachep);
277
278kmem_cache_t *tcp_timewait_cachep; 274kmem_cache_t *tcp_timewait_cachep;
279 275
280atomic_t tcp_orphan_count = ATOMIC_INIT(0); 276atomic_t tcp_orphan_count = ATOMIC_INIT(0);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 10a9b3ae3442..40fe4f5fca1c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -104,37 +104,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
104int sysctl_local_port_range[2] = { 1024, 4999 }; 104int sysctl_local_port_range[2] = { 1024, 4999 };
105int tcp_port_rover = 1024 - 1; 105int tcp_port_rover = 1024 - 1;
106 106
107/* Caller must disable local BH processing. */
108static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
109{
110 struct inet_bind_hashbucket *head =
111 &tcp_bhash[inet_bhashfn(inet_sk(child)->num,
112 tcp_bhash_size)];
113 struct inet_bind_bucket *tb;
114
115 spin_lock(&head->lock);
116 tb = inet_sk(sk)->bind_hash;
117 sk_add_bind_node(child, &tb->owners);
118 inet_sk(child)->bind_hash = tb;
119 spin_unlock(&head->lock);
120}
121
122inline void tcp_inherit_port(struct sock *sk, struct sock *child)
123{
124 local_bh_disable();
125 __tcp_inherit_port(sk, child);
126 local_bh_enable();
127}
128
129void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
130 const unsigned short snum)
131{
132 struct inet_sock *inet = inet_sk(sk);
133 inet->num = snum;
134 sk_add_bind_node(sk, &tb->owners);
135 inet->bind_hash = tb;
136}
137
138static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) 107static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
139{ 108{
140 const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk); 109 const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
@@ -248,7 +217,7 @@ tb_not_found:
248 tb->fastreuse = 0; 217 tb->fastreuse = 0;
249success: 218success:
250 if (!inet_sk(sk)->bind_hash) 219 if (!inet_sk(sk)->bind_hash)
251 tcp_bind_hash(sk, tb, snum); 220 inet_bind_hash(sk, tb, snum);
252 BUG_TRAP(inet_sk(sk)->bind_hash == tb); 221 BUG_TRAP(inet_sk(sk)->bind_hash == tb);
253 ret = 0; 222 ret = 0;
254 223
@@ -259,32 +228,6 @@ fail:
259 return ret; 228 return ret;
260} 229}
261 230
262/* Get rid of any references to a local port held by the
263 * given sock.
264 */
265static void __tcp_put_port(struct sock *sk)
266{
267 struct inet_sock *inet = inet_sk(sk);
268 struct inet_bind_hashbucket *head = &tcp_bhash[inet_bhashfn(inet->num,
269 tcp_bhash_size)];
270 struct inet_bind_bucket *tb;
271
272 spin_lock(&head->lock);
273 tb = inet->bind_hash;
274 __sk_del_bind_node(sk);
275 inet->bind_hash = NULL;
276 inet->num = 0;
277 inet_bind_bucket_destroy(tcp_bucket_cachep, tb);
278 spin_unlock(&head->lock);
279}
280
281void tcp_put_port(struct sock *sk)
282{
283 local_bh_disable();
284 __tcp_put_port(sk);
285 local_bh_enable();
286}
287
288/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP. 231/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
289 * Look, when several writers sleep and reader wakes them up, all but one 232 * Look, when several writers sleep and reader wakes them up, all but one
290 * immediately hit write lock and grab all the cpus. Exclusive sleep solves 233 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
@@ -678,7 +621,7 @@ ok:
678 hint += i; 621 hint += i;
679 622
680 /* Head lock still held and bh's disabled */ 623 /* Head lock still held and bh's disabled */
681 tcp_bind_hash(sk, tb, port); 624 inet_bind_hash(sk, tb, port);
682 if (sk_unhashed(sk)) { 625 if (sk_unhashed(sk)) {
683 inet_sk(sk)->sport = htons(port); 626 inet_sk(sk)->sport = htons(port);
684 __tcp_v4_hash(sk, 0); 627 __tcp_v4_hash(sk, 0);
@@ -1537,7 +1480,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1537 tcp_initialize_rcv_mss(newsk); 1480 tcp_initialize_rcv_mss(newsk);
1538 1481
1539 __tcp_v4_hash(newsk, 0); 1482 __tcp_v4_hash(newsk, 0);
1540 __tcp_inherit_port(sk, newsk); 1483 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
1541 1484
1542 return newsk; 1485 return newsk;
1543 1486
@@ -1942,7 +1885,7 @@ int tcp_v4_destroy_sock(struct sock *sk)
1942 1885
1943 /* Clean up a referenced TCP bind bucket. */ 1886 /* Clean up a referenced TCP bind bucket. */
1944 if (inet_sk(sk)->bind_hash) 1887 if (inet_sk(sk)->bind_hash)
1945 tcp_put_port(sk); 1888 inet_put_port(&tcp_hashinfo, sk);
1946 1889
1947 /* 1890 /*
1948 * If sendmsg cached page exists, toss it. 1891 * If sendmsg cached page exists, toss it.
@@ -2486,14 +2429,11 @@ void __init tcp_v4_init(struct net_proto_family *ops)
2486} 2429}
2487 2430
2488EXPORT_SYMBOL(ipv4_specific); 2431EXPORT_SYMBOL(ipv4_specific);
2489EXPORT_SYMBOL(tcp_bind_hash);
2490EXPORT_SYMBOL(inet_bind_bucket_create); 2432EXPORT_SYMBOL(inet_bind_bucket_create);
2491EXPORT_SYMBOL(tcp_hashinfo); 2433EXPORT_SYMBOL(tcp_hashinfo);
2492EXPORT_SYMBOL(tcp_inherit_port);
2493EXPORT_SYMBOL(tcp_listen_wlock); 2434EXPORT_SYMBOL(tcp_listen_wlock);
2494EXPORT_SYMBOL(tcp_port_rover); 2435EXPORT_SYMBOL(tcp_port_rover);
2495EXPORT_SYMBOL(tcp_prot); 2436EXPORT_SYMBOL(tcp_prot);
2496EXPORT_SYMBOL(tcp_put_port);
2497EXPORT_SYMBOL(tcp_unhash); 2437EXPORT_SYMBOL(tcp_unhash);
2498EXPORT_SYMBOL(tcp_v4_conn_request); 2438EXPORT_SYMBOL(tcp_v4_conn_request);
2499EXPORT_SYMBOL(tcp_v4_connect); 2439EXPORT_SYMBOL(tcp_v4_connect);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a8ca7ba06c1c..bfbedb56bce2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -205,7 +205,7 @@ tb_not_found:
205 205
206success: 206success:
207 if (!inet_sk(sk)->bind_hash) 207 if (!inet_sk(sk)->bind_hash)
208 tcp_bind_hash(sk, tb, snum); 208 inet_bind_hash(sk, tb, snum);
209 BUG_TRAP(inet_sk(sk)->bind_hash == tb); 209 BUG_TRAP(inet_sk(sk)->bind_hash == tb);
210 ret = 0; 210 ret = 0;
211 211
@@ -597,7 +597,7 @@ ok:
597 hint += i; 597 hint += i;
598 598
599 /* Head lock still held and bh's disabled */ 599 /* Head lock still held and bh's disabled */
600 tcp_bind_hash(sk, tb, port); 600 inet_bind_hash(sk, tb, port);
601 if (sk_unhashed(sk)) { 601 if (sk_unhashed(sk)) {
602 inet_sk(sk)->sport = htons(port); 602 inet_sk(sk)->sport = htons(port);
603 __tcp_v6_hash(sk); 603 __tcp_v6_hash(sk);
@@ -1536,7 +1536,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1536 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 1536 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1537 1537
1538 __tcp_v6_hash(newsk); 1538 __tcp_v6_hash(newsk);
1539 tcp_inherit_port(sk, newsk); 1539 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1540 1540
1541 return newsk; 1541 return newsk;
1542 1542