diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-09-01 18:02:01 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-09-01 18:02:01 -0400 |
commit | e3ee3b78f83688a0ae4315e8be71b2eac559904a (patch) | |
tree | deb03bcdd020262af450ed23382d7c921263f5cf /include/net/inet_hashtables.h | |
parent | 91cb70c1769d9b72dd1efe40c31f01005820b09e (diff) | |
parent | 6b39374a27eb4be7e9d82145ae270ba02ea90dc8 (diff) |
/spare/repo/netdev-2.6 branch 'master'
Diffstat (limited to 'include/net/inet_hashtables.h')
-rw-r--r-- | include/net/inet_hashtables.h | 427 |
1 files changed, 427 insertions, 0 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h new file mode 100644 index 000000000000..646b6ea7fe26 --- /dev/null +++ b/include/net/inet_hashtables.h | |||
@@ -0,0 +1,427 @@ | |||
1 | /* | ||
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | ||
3 | * operating system. INET is implemented using the BSD Socket | ||
4 | * interface as the means of communication with the user level. | ||
5 | * | ||
6 | * Authors: Lotsa people, from code originally in tcp | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef _INET_HASHTABLES_H | ||
15 | #define _INET_HASHTABLES_H | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | |||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/ipv6.h> | ||
21 | #include <linux/list.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/socket.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/wait.h> | ||
27 | |||
28 | #include <net/inet_connection_sock.h> | ||
29 | #include <net/route.h> | ||
30 | #include <net/sock.h> | ||
31 | #include <net/tcp_states.h> | ||
32 | |||
33 | #include <asm/atomic.h> | ||
34 | #include <asm/byteorder.h> | ||
35 | |||
36 | /* This is for all connections with a full identity, no wildcards. | ||
37 | * New scheme, half the table is for TIME_WAIT, the other half is | ||
38 | * for the rest. I'll experiment with dynamic table growth later. | ||
39 | */ | ||
40 | struct inet_ehash_bucket { | ||
41 | rwlock_t lock; | ||
42 | struct hlist_head chain; | ||
43 | } __attribute__((__aligned__(8))); | ||
44 | |||
45 | /* There are a few simple rules, which allow for local port reuse by | ||
46 | * an application. In essence: | ||
47 | * | ||
48 | * 1) Sockets bound to different interfaces may share a local port. | ||
49 | * Failing that, goto test 2. | ||
50 | * 2) If all sockets have sk->sk_reuse set, and none of them are in | ||
51 | * TCP_LISTEN state, the port may be shared. | ||
52 | * Failing that, goto test 3. | ||
53 | * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local | ||
54 | * address, and none of them are the same, the port may be | ||
55 | * shared. | ||
56 | * Failing this, the port cannot be shared. | ||
57 | * | ||
58 | * The interesting point, is test #2. This is what an FTP server does | ||
59 | * all day. To optimize this case we use a specific flag bit defined | ||
60 | * below. As we add sockets to a bind bucket list, we perform a | ||
61 | * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN)) | ||
62 | * As long as all sockets added to a bind bucket pass this test, | ||
63 | * the flag bit will be set. | ||
64 | * The resulting situation is that tcp_v[46]_verify_bind() can just check | ||
65 | * for this flag bit, if it is set and the socket trying to bind has | ||
66 | * sk->sk_reuse set, we don't even have to walk the owners list at all, | ||
67 | * we return that it is ok to bind this socket to the requested local port. | ||
68 | * | ||
69 | * Sounds like a lot of work, but it is worth it. In a more naive | ||
70 | * implementation (ie. current FreeBSD etc.) the entire list of ports | ||
71 | * must be walked for each data port opened by an ftp server. Needless | ||
72 | * to say, this does not scale at all. With a couple thousand FTP | ||
73 | * users logged onto your box, isn't it nice to know that new data | ||
74 | * ports are created in O(1) time? I thought so. ;-) -DaveM | ||
75 | */ | ||
76 | struct inet_bind_bucket { | ||
77 | unsigned short port; | ||
78 | signed short fastreuse; | ||
79 | struct hlist_node node; | ||
80 | struct hlist_head owners; | ||
81 | }; | ||
82 | |||
83 | #define inet_bind_bucket_for_each(tb, node, head) \ | ||
84 | hlist_for_each_entry(tb, node, head, node) | ||
85 | |||
86 | struct inet_bind_hashbucket { | ||
87 | spinlock_t lock; | ||
88 | struct hlist_head chain; | ||
89 | }; | ||
90 | |||
91 | /* This is for listening sockets, thus all sockets which possess wildcards. */ | ||
92 | #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ | ||
93 | |||
94 | struct inet_hashinfo { | ||
95 | /* This is for sockets with full identity only. Sockets here will | ||
96 | * always be without wildcards and will have the following invariant: | ||
97 | * | ||
98 | * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE | ||
99 | * | ||
100 | * First half of the table is for sockets not in TIME_WAIT, second half | ||
101 | * is for TIME_WAIT sockets only. | ||
102 | */ | ||
103 | struct inet_ehash_bucket *ehash; | ||
104 | |||
105 | /* Ok, let's try this, I give up, we do need a local binding | ||
106 | * TCP hash as well as the others for fast bind/connect. | ||
107 | */ | ||
108 | struct inet_bind_hashbucket *bhash; | ||
109 | |||
110 | int bhash_size; | ||
111 | int ehash_size; | ||
112 | |||
113 | /* All sockets in TCP_LISTEN state will be in here. This is the only | ||
114 | * table where wildcard'd TCP sockets can exist. Hash function here | ||
115 | * is just local port number. | ||
116 | */ | ||
117 | struct hlist_head listening_hash[INET_LHTABLE_SIZE]; | ||
118 | |||
119 | /* All the above members are written once at bootup and | ||
120 | * never written again _or_ are predominantly read-access. | ||
121 | * | ||
122 | * Now align to a new cache line as all the following members | ||
123 | * are often dirty. | ||
124 | */ | ||
125 | rwlock_t lhash_lock ____cacheline_aligned; | ||
126 | atomic_t lhash_users; | ||
127 | wait_queue_head_t lhash_wait; | ||
128 | spinlock_t portalloc_lock; | ||
129 | kmem_cache_t *bind_bucket_cachep; | ||
130 | int port_rover; | ||
131 | }; | ||
132 | |||
133 | static inline int inet_ehashfn(const __u32 laddr, const __u16 lport, | ||
134 | const __u32 faddr, const __u16 fport, | ||
135 | const int ehash_size) | ||
136 | { | ||
137 | int h = (laddr ^ lport) ^ (faddr ^ fport); | ||
138 | h ^= h >> 16; | ||
139 | h ^= h >> 8; | ||
140 | return h & (ehash_size - 1); | ||
141 | } | ||
142 | |||
143 | static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size) | ||
144 | { | ||
145 | const struct inet_sock *inet = inet_sk(sk); | ||
146 | const __u32 laddr = inet->rcv_saddr; | ||
147 | const __u16 lport = inet->num; | ||
148 | const __u32 faddr = inet->daddr; | ||
149 | const __u16 fport = inet->dport; | ||
150 | |||
151 | return inet_ehashfn(laddr, lport, faddr, fport, ehash_size); | ||
152 | } | ||
153 | |||
154 | extern struct inet_bind_bucket * | ||
155 | inet_bind_bucket_create(kmem_cache_t *cachep, | ||
156 | struct inet_bind_hashbucket *head, | ||
157 | const unsigned short snum); | ||
158 | extern void inet_bind_bucket_destroy(kmem_cache_t *cachep, | ||
159 | struct inet_bind_bucket *tb); | ||
160 | |||
161 | static inline int inet_bhashfn(const __u16 lport, const int bhash_size) | ||
162 | { | ||
163 | return lport & (bhash_size - 1); | ||
164 | } | ||
165 | |||
166 | extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, | ||
167 | const unsigned short snum); | ||
168 | |||
169 | /* These can have wildcards, don't try too hard. */ | ||
170 | static inline int inet_lhashfn(const unsigned short num) | ||
171 | { | ||
172 | return num & (INET_LHTABLE_SIZE - 1); | ||
173 | } | ||
174 | |||
175 | static inline int inet_sk_listen_hashfn(const struct sock *sk) | ||
176 | { | ||
177 | return inet_lhashfn(inet_sk(sk)->num); | ||
178 | } | ||
179 | |||
180 | /* Caller must disable local BH processing. */ | ||
181 | static inline void __inet_inherit_port(struct inet_hashinfo *table, | ||
182 | struct sock *sk, struct sock *child) | ||
183 | { | ||
184 | const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); | ||
185 | struct inet_bind_hashbucket *head = &table->bhash[bhash]; | ||
186 | struct inet_bind_bucket *tb; | ||
187 | |||
188 | spin_lock(&head->lock); | ||
189 | tb = inet_csk(sk)->icsk_bind_hash; | ||
190 | sk_add_bind_node(child, &tb->owners); | ||
191 | inet_csk(child)->icsk_bind_hash = tb; | ||
192 | spin_unlock(&head->lock); | ||
193 | } | ||
194 | |||
195 | static inline void inet_inherit_port(struct inet_hashinfo *table, | ||
196 | struct sock *sk, struct sock *child) | ||
197 | { | ||
198 | local_bh_disable(); | ||
199 | __inet_inherit_port(table, sk, child); | ||
200 | local_bh_enable(); | ||
201 | } | ||
202 | |||
203 | extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk); | ||
204 | |||
205 | extern void inet_listen_wlock(struct inet_hashinfo *hashinfo); | ||
206 | |||
207 | /* | ||
208 | * - We may sleep inside this lock. | ||
209 | * - If sleeping is not required (or called from BH), | ||
210 | * use plain read_(un)lock(&inet_hashinfo.lhash_lock). | ||
211 | */ | ||
212 | static inline void inet_listen_lock(struct inet_hashinfo *hashinfo) | ||
213 | { | ||
214 | /* read_lock synchronizes to candidates to writers */ | ||
215 | read_lock(&hashinfo->lhash_lock); | ||
216 | atomic_inc(&hashinfo->lhash_users); | ||
217 | read_unlock(&hashinfo->lhash_lock); | ||
218 | } | ||
219 | |||
220 | static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo) | ||
221 | { | ||
222 | if (atomic_dec_and_test(&hashinfo->lhash_users)) | ||
223 | wake_up(&hashinfo->lhash_wait); | ||
224 | } | ||
225 | |||
226 | static inline void __inet_hash(struct inet_hashinfo *hashinfo, | ||
227 | struct sock *sk, const int listen_possible) | ||
228 | { | ||
229 | struct hlist_head *list; | ||
230 | rwlock_t *lock; | ||
231 | |||
232 | BUG_TRAP(sk_unhashed(sk)); | ||
233 | if (listen_possible && sk->sk_state == TCP_LISTEN) { | ||
234 | list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; | ||
235 | lock = &hashinfo->lhash_lock; | ||
236 | inet_listen_wlock(hashinfo); | ||
237 | } else { | ||
238 | sk->sk_hashent = inet_sk_ehashfn(sk, hashinfo->ehash_size); | ||
239 | list = &hashinfo->ehash[sk->sk_hashent].chain; | ||
240 | lock = &hashinfo->ehash[sk->sk_hashent].lock; | ||
241 | write_lock(lock); | ||
242 | } | ||
243 | __sk_add_node(sk, list); | ||
244 | sock_prot_inc_use(sk->sk_prot); | ||
245 | write_unlock(lock); | ||
246 | if (listen_possible && sk->sk_state == TCP_LISTEN) | ||
247 | wake_up(&hashinfo->lhash_wait); | ||
248 | } | ||
249 | |||
250 | static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk) | ||
251 | { | ||
252 | if (sk->sk_state != TCP_CLOSE) { | ||
253 | local_bh_disable(); | ||
254 | __inet_hash(hashinfo, sk, 1); | ||
255 | local_bh_enable(); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk) | ||
260 | { | ||
261 | rwlock_t *lock; | ||
262 | |||
263 | if (sk_unhashed(sk)) | ||
264 | goto out; | ||
265 | |||
266 | if (sk->sk_state == TCP_LISTEN) { | ||
267 | local_bh_disable(); | ||
268 | inet_listen_wlock(hashinfo); | ||
269 | lock = &hashinfo->lhash_lock; | ||
270 | } else { | ||
271 | struct inet_ehash_bucket *head = &hashinfo->ehash[sk->sk_hashent]; | ||
272 | lock = &head->lock; | ||
273 | write_lock_bh(&head->lock); | ||
274 | } | ||
275 | |||
276 | if (__sk_del_node_init(sk)) | ||
277 | sock_prot_dec_use(sk->sk_prot); | ||
278 | write_unlock_bh(lock); | ||
279 | out: | ||
280 | if (sk->sk_state == TCP_LISTEN) | ||
281 | wake_up(&hashinfo->lhash_wait); | ||
282 | } | ||
283 | |||
284 | static inline int inet_iif(const struct sk_buff *skb) | ||
285 | { | ||
286 | return ((struct rtable *)skb->dst)->rt_iif; | ||
287 | } | ||
288 | |||
289 | extern struct sock *__inet_lookup_listener(const struct hlist_head *head, | ||
290 | const u32 daddr, | ||
291 | const unsigned short hnum, | ||
292 | const int dif); | ||
293 | |||
294 | /* Optimize the common listener case. */ | ||
295 | static inline struct sock * | ||
296 | inet_lookup_listener(struct inet_hashinfo *hashinfo, | ||
297 | const u32 daddr, | ||
298 | const unsigned short hnum, const int dif) | ||
299 | { | ||
300 | struct sock *sk = NULL; | ||
301 | const struct hlist_head *head; | ||
302 | |||
303 | read_lock(&hashinfo->lhash_lock); | ||
304 | head = &hashinfo->listening_hash[inet_lhashfn(hnum)]; | ||
305 | if (!hlist_empty(head)) { | ||
306 | const struct inet_sock *inet = inet_sk((sk = __sk_head(head))); | ||
307 | |||
308 | if (inet->num == hnum && !sk->sk_node.next && | ||
309 | (!inet->rcv_saddr || inet->rcv_saddr == daddr) && | ||
310 | (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) && | ||
311 | !sk->sk_bound_dev_if) | ||
312 | goto sherry_cache; | ||
313 | sk = __inet_lookup_listener(head, daddr, hnum, dif); | ||
314 | } | ||
315 | if (sk) { | ||
316 | sherry_cache: | ||
317 | sock_hold(sk); | ||
318 | } | ||
319 | read_unlock(&hashinfo->lhash_lock); | ||
320 | return sk; | ||
321 | } | ||
322 | |||
323 | /* Socket demux engine toys. */ | ||
324 | #ifdef __BIG_ENDIAN | ||
325 | #define INET_COMBINED_PORTS(__sport, __dport) \ | ||
326 | (((__u32)(__sport) << 16) | (__u32)(__dport)) | ||
327 | #else /* __LITTLE_ENDIAN */ | ||
328 | #define INET_COMBINED_PORTS(__sport, __dport) \ | ||
329 | (((__u32)(__dport) << 16) | (__u32)(__sport)) | ||
330 | #endif | ||
331 | |||
332 | #if (BITS_PER_LONG == 64) | ||
333 | #ifdef __BIG_ENDIAN | ||
334 | #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ | ||
335 | const __u64 __name = (((__u64)(__saddr)) << 32) | ((__u64)(__daddr)); | ||
336 | #else /* __LITTLE_ENDIAN */ | ||
337 | #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ | ||
338 | const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr)); | ||
339 | #endif /* __BIG_ENDIAN */ | ||
340 | #define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ | ||
341 | (((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ | ||
342 | ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ | ||
343 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | ||
344 | #define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ | ||
345 | (((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ | ||
346 | ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ | ||
347 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | ||
348 | #else /* 32-bit arch */ | ||
349 | #define INET_ADDR_COOKIE(__name, __saddr, __daddr) | ||
350 | #define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ | ||
351 | ((inet_sk(__sk)->daddr == (__saddr)) && \ | ||
352 | (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ | ||
353 | ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ | ||
354 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | ||
355 | #define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ | ||
356 | ((inet_twsk(__sk)->tw_daddr == (__saddr)) && \ | ||
357 | (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ | ||
358 | ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ | ||
359 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | ||
360 | #endif /* 64-bit arch */ | ||
361 | |||
362 | /* | ||
363 | * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need | ||
364 | * not check it for lookups anymore, thanks Alexey. -DaveM | ||
365 | * | ||
366 | * Local BH must be disabled here. | ||
367 | */ | ||
368 | static inline struct sock * | ||
369 | __inet_lookup_established(struct inet_hashinfo *hashinfo, | ||
370 | const u32 saddr, const u16 sport, | ||
371 | const u32 daddr, const u16 hnum, | ||
372 | const int dif) | ||
373 | { | ||
374 | INET_ADDR_COOKIE(acookie, saddr, daddr) | ||
375 | const __u32 ports = INET_COMBINED_PORTS(sport, hnum); | ||
376 | struct sock *sk; | ||
377 | const struct hlist_node *node; | ||
378 | /* Optimize here for direct hit, only listening connections can | ||
379 | * have wildcards anyways. | ||
380 | */ | ||
381 | const int hash = inet_ehashfn(daddr, hnum, saddr, sport, hashinfo->ehash_size); | ||
382 | struct inet_ehash_bucket *head = &hashinfo->ehash[hash]; | ||
383 | |||
384 | read_lock(&head->lock); | ||
385 | sk_for_each(sk, node, &head->chain) { | ||
386 | if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif)) | ||
387 | goto hit; /* You sunk my battleship! */ | ||
388 | } | ||
389 | |||
390 | /* Must check for a TIME_WAIT'er before going to listener hash. */ | ||
391 | sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { | ||
392 | if (INET_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) | ||
393 | goto hit; | ||
394 | } | ||
395 | sk = NULL; | ||
396 | out: | ||
397 | read_unlock(&head->lock); | ||
398 | return sk; | ||
399 | hit: | ||
400 | sock_hold(sk); | ||
401 | goto out; | ||
402 | } | ||
403 | |||
404 | static inline struct sock *__inet_lookup(struct inet_hashinfo *hashinfo, | ||
405 | const u32 saddr, const u16 sport, | ||
406 | const u32 daddr, const u16 hnum, | ||
407 | const int dif) | ||
408 | { | ||
409 | struct sock *sk = __inet_lookup_established(hashinfo, saddr, sport, daddr, | ||
410 | hnum, dif); | ||
411 | return sk ? : inet_lookup_listener(hashinfo, daddr, hnum, dif); | ||
412 | } | ||
413 | |||
414 | static inline struct sock *inet_lookup(struct inet_hashinfo *hashinfo, | ||
415 | const u32 saddr, const u16 sport, | ||
416 | const u32 daddr, const u16 dport, | ||
417 | const int dif) | ||
418 | { | ||
419 | struct sock *sk; | ||
420 | |||
421 | local_bh_disable(); | ||
422 | sk = __inet_lookup(hashinfo, saddr, sport, daddr, ntohs(dport), dif); | ||
423 | local_bh_enable(); | ||
424 | |||
425 | return sk; | ||
426 | } | ||
427 | #endif /* _INET_HASHTABLES_H */ | ||