diff options
-rw-r--r-- | include/net/inet6_hashtables.h | 26 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 34 |
2 files changed, 30 insertions, 30 deletions
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 5a2beed5a770..a4a204f99ea6 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h | |||
@@ -48,6 +48,32 @@ static inline int inet6_sk_ehashfn(const struct sock *sk) | |||
48 | return inet6_ehashfn(laddr, lport, faddr, fport); | 48 | return inet6_ehashfn(laddr, lport, faddr, fport); |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline void __inet6_hash(struct inet_hashinfo *hashinfo, | ||
52 | struct sock *sk) | ||
53 | { | ||
54 | struct hlist_head *list; | ||
55 | rwlock_t *lock; | ||
56 | |||
57 | BUG_TRAP(sk_unhashed(sk)); | ||
58 | |||
59 | if (sk->sk_state == TCP_LISTEN) { | ||
60 | list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; | ||
61 | lock = &hashinfo->lhash_lock; | ||
62 | inet_listen_wlock(hashinfo); | ||
63 | } else { | ||
64 | unsigned int hash; | ||
65 | sk->sk_hash = hash = inet6_sk_ehashfn(sk); | ||
66 | hash &= (hashinfo->ehash_size - 1); | ||
67 | list = &hashinfo->ehash[hash].chain; | ||
68 | lock = &hashinfo->ehash[hash].lock; | ||
69 | write_lock(lock); | ||
70 | } | ||
71 | |||
72 | __sk_add_node(sk, list); | ||
73 | sock_prot_inc_use(sk->sk_prot); | ||
74 | write_unlock(lock); | ||
75 | } | ||
76 | |||
51 | /* | 77 | /* |
52 | * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so | 78 | * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so |
53 | * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM | 79 | * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 76c8f5a2f7f3..bf41f84d6692 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -103,32 +103,6 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
103 | inet6_csk_bind_conflict); | 103 | inet6_csk_bind_conflict); |
104 | } | 104 | } |
105 | 105 | ||
106 | static __inline__ void __tcp_v6_hash(struct sock *sk) | ||
107 | { | ||
108 | struct hlist_head *list; | ||
109 | rwlock_t *lock; | ||
110 | |||
111 | BUG_TRAP(sk_unhashed(sk)); | ||
112 | |||
113 | if (sk->sk_state == TCP_LISTEN) { | ||
114 | list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)]; | ||
115 | lock = &tcp_hashinfo.lhash_lock; | ||
116 | inet_listen_wlock(&tcp_hashinfo); | ||
117 | } else { | ||
118 | unsigned int hash; | ||
119 | sk->sk_hash = hash = inet6_sk_ehashfn(sk); | ||
120 | hash &= (tcp_hashinfo.ehash_size - 1); | ||
121 | list = &tcp_hashinfo.ehash[hash].chain; | ||
122 | lock = &tcp_hashinfo.ehash[hash].lock; | ||
123 | write_lock(lock); | ||
124 | } | ||
125 | |||
126 | __sk_add_node(sk, list); | ||
127 | sock_prot_inc_use(sk->sk_prot); | ||
128 | write_unlock(lock); | ||
129 | } | ||
130 | |||
131 | |||
132 | static void tcp_v6_hash(struct sock *sk) | 106 | static void tcp_v6_hash(struct sock *sk) |
133 | { | 107 | { |
134 | if (sk->sk_state != TCP_CLOSE) { | 108 | if (sk->sk_state != TCP_CLOSE) { |
@@ -139,7 +113,7 @@ static void tcp_v6_hash(struct sock *sk) | |||
139 | return; | 113 | return; |
140 | } | 114 | } |
141 | local_bh_disable(); | 115 | local_bh_disable(); |
142 | __tcp_v6_hash(sk); | 116 | __inet6_hash(&tcp_hashinfo, sk); |
143 | local_bh_enable(); | 117 | local_bh_enable(); |
144 | } | 118 | } |
145 | } | 119 | } |
@@ -374,7 +348,7 @@ ok: | |||
374 | inet_bind_hash(sk, tb, port); | 348 | inet_bind_hash(sk, tb, port); |
375 | if (sk_unhashed(sk)) { | 349 | if (sk_unhashed(sk)) { |
376 | inet_sk(sk)->sport = htons(port); | 350 | inet_sk(sk)->sport = htons(port); |
377 | __tcp_v6_hash(sk); | 351 | __inet6_hash(&tcp_hashinfo, sk); |
378 | } | 352 | } |
379 | spin_unlock(&head->lock); | 353 | spin_unlock(&head->lock); |
380 | 354 | ||
@@ -392,7 +366,7 @@ ok: | |||
392 | spin_lock_bh(&head->lock); | 366 | spin_lock_bh(&head->lock); |
393 | 367 | ||
394 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | 368 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { |
395 | __tcp_v6_hash(sk); | 369 | __inet6_hash(&tcp_hashinfo, sk); |
396 | spin_unlock_bh(&head->lock); | 370 | spin_unlock_bh(&head->lock); |
397 | return 0; | 371 | return 0; |
398 | } else { | 372 | } else { |
@@ -1295,7 +1269,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1295 | 1269 | ||
1296 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; | 1270 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; |
1297 | 1271 | ||
1298 | __tcp_v6_hash(newsk); | 1272 | __inet6_hash(&tcp_hashinfo, newsk); |
1299 | inet_inherit_port(&tcp_hashinfo, sk, newsk); | 1273 | inet_inherit_port(&tcp_hashinfo, sk, newsk); |
1300 | 1274 | ||
1301 | return newsk; | 1275 | return newsk; |