diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/net/inet_hashtables.h | 48 | ||||
-rw-r--r-- | include/net/tcp.h | 21 |
2 files changed, 48 insertions, 21 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index da07411b36d..f5d65121f7b 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h | |||
@@ -19,10 +19,14 @@ | |||
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/tcp.h> /* only for TCP_LISTEN, damn :-( */ | ||
22 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/wait.h> | ||
23 | 25 | ||
24 | #include <net/sock.h> | 26 | #include <net/sock.h> |
25 | 27 | ||
28 | #include <asm/atomic.h> | ||
29 | |||
26 | /* This is for all connections with a full identity, no wildcards. | 30 | /* This is for all connections with a full identity, no wildcards. |
27 | * New scheme, half the table is for TIME_WAIT, the other half is | 31 | * New scheme, half the table is for TIME_WAIT, the other half is |
28 | * for the rest. I'll experiment with dynamic table growth later. | 32 | * for the rest. I'll experiment with dynamic table growth later. |
@@ -192,4 +196,48 @@ static inline void inet_inherit_port(struct inet_hashinfo *table, | |||
192 | 196 | ||
193 | extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk); | 197 | extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk); |
194 | 198 | ||
199 | extern void inet_listen_wlock(struct inet_hashinfo *hashinfo); | ||
200 | |||
201 | /* | ||
202 | * - We may sleep inside this lock. | ||
203 | * - If sleeping is not required (or called from BH), | ||
204 | * use plain read_(un)lock(&inet_hashinfo.lhash_lock). | ||
205 | */ | ||
206 | static inline void inet_listen_lock(struct inet_hashinfo *hashinfo) | ||
207 | { | ||
208 | /* read_lock synchronizes to candidates to writers */ | ||
209 | read_lock(&hashinfo->lhash_lock); | ||
210 | atomic_inc(&hashinfo->lhash_users); | ||
211 | read_unlock(&hashinfo->lhash_lock); | ||
212 | } | ||
213 | |||
214 | static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo) | ||
215 | { | ||
216 | if (atomic_dec_and_test(&hashinfo->lhash_users)) | ||
217 | wake_up(&hashinfo->lhash_wait); | ||
218 | } | ||
219 | |||
220 | static inline void __inet_hash(struct inet_hashinfo *hashinfo, | ||
221 | struct sock *sk, const int listen_possible) | ||
222 | { | ||
223 | struct hlist_head *list; | ||
224 | rwlock_t *lock; | ||
225 | |||
226 | BUG_TRAP(sk_unhashed(sk)); | ||
227 | if (listen_possible && sk->sk_state == TCP_LISTEN) { | ||
228 | list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; | ||
229 | lock = &hashinfo->lhash_lock; | ||
230 | inet_listen_wlock(hashinfo); | ||
231 | } else { | ||
232 | sk->sk_hashent = inet_sk_ehashfn(sk, hashinfo->ehash_size); | ||
233 | list = &hashinfo->ehash[sk->sk_hashent].chain; | ||
234 | lock = &hashinfo->ehash[sk->sk_hashent].lock; | ||
235 | write_lock(lock); | ||
236 | } | ||
237 | __sk_add_node(sk, list); | ||
238 | sock_prot_inc_use(sk->sk_prot); | ||
239 | write_unlock(lock); | ||
240 | if (listen_possible && sk->sk_state == TCP_LISTEN) | ||
241 | wake_up(&hashinfo->lhash_wait); | ||
242 | } | ||
195 | #endif /* _INET_HASHTABLES_H */ | 243 | #endif /* _INET_HASHTABLES_H */ |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 99e47695d4b..bc110cc7022 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1447,27 +1447,6 @@ static __inline__ void tcp_openreq_init(struct request_sock *req, | |||
1447 | 1447 | ||
1448 | extern void tcp_enter_memory_pressure(void); | 1448 | extern void tcp_enter_memory_pressure(void); |
1449 | 1449 | ||
1450 | extern void tcp_listen_wlock(void); | ||
1451 | |||
1452 | /* - We may sleep inside this lock. | ||
1453 | * - If sleeping is not required (or called from BH), | ||
1454 | * use plain read_(un)lock(&inet_hashinfo.lhash_lock). | ||
1455 | */ | ||
1456 | |||
1457 | static inline void tcp_listen_lock(void) | ||
1458 | { | ||
1459 | /* read_lock synchronizes to candidates to writers */ | ||
1460 | read_lock(&tcp_hashinfo.lhash_lock); | ||
1461 | atomic_inc(&tcp_hashinfo.lhash_users); | ||
1462 | read_unlock(&tcp_hashinfo.lhash_lock); | ||
1463 | } | ||
1464 | |||
1465 | static inline void tcp_listen_unlock(void) | ||
1466 | { | ||
1467 | if (atomic_dec_and_test(&tcp_hashinfo.lhash_users)) | ||
1468 | wake_up(&tcp_hashinfo.lhash_wait); | ||
1469 | } | ||
1470 | |||
1471 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) | 1450 | static inline int keepalive_intvl_when(const struct tcp_sock *tp) |
1472 | { | 1451 | { |
1473 | return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; | 1452 | return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl; |