diff options
author | Arnaldo Carvalho de Melo <acme@mandriva.com> | 2005-12-14 02:25:44 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-01-03 16:10:56 -0500 |
commit | d8313f5ca2b1f86b7df6c99fc4b3fffa1f84e92b (patch) | |
tree | 1ee41d265c7790e4389bf4d123b2b60975ad2967 /net/ipv6/tcp_ipv6.c | |
parent | a7f5e7f164788a22eb5d3de8e2d3cee1bf58fdca (diff) |
[INET6]: Generalise tcp_v6_hash_connect
Renaming it to inet6_hash_connect, making it possible to ditch
dccp_v6_hash_connect and share the same code with TCP instead.
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 173 |
1 files changed, 1 insertions, 172 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 514b57bb80b7..a682eb9093e1 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -119,177 +119,6 @@ static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb) | |||
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | static int __tcp_v6_check_established(struct sock *sk, const __u16 lport, | ||
123 | struct inet_timewait_sock **twp) | ||
124 | { | ||
125 | struct inet_sock *inet = inet_sk(sk); | ||
126 | const struct ipv6_pinfo *np = inet6_sk(sk); | ||
127 | const struct in6_addr *daddr = &np->rcv_saddr; | ||
128 | const struct in6_addr *saddr = &np->daddr; | ||
129 | const int dif = sk->sk_bound_dev_if; | ||
130 | const u32 ports = INET_COMBINED_PORTS(inet->dport, lport); | ||
131 | unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport); | ||
132 | struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash); | ||
133 | struct sock *sk2; | ||
134 | const struct hlist_node *node; | ||
135 | struct inet_timewait_sock *tw; | ||
136 | |||
137 | prefetch(head->chain.first); | ||
138 | write_lock(&head->lock); | ||
139 | |||
140 | /* Check TIME-WAIT sockets first. */ | ||
141 | sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { | ||
142 | const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2); | ||
143 | |||
144 | tw = inet_twsk(sk2); | ||
145 | |||
146 | if(*((__u32 *)&(tw->tw_dport)) == ports && | ||
147 | sk2->sk_family == PF_INET6 && | ||
148 | ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) && | ||
149 | ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) && | ||
150 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if) { | ||
151 | if (twsk_unique(sk, sk2, twp)) | ||
152 | goto unique; | ||
153 | else | ||
154 | goto not_unique; | ||
155 | } | ||
156 | } | ||
157 | tw = NULL; | ||
158 | |||
159 | /* And established part... */ | ||
160 | sk_for_each(sk2, node, &head->chain) { | ||
161 | if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif)) | ||
162 | goto not_unique; | ||
163 | } | ||
164 | |||
165 | unique: | ||
166 | BUG_TRAP(sk_unhashed(sk)); | ||
167 | __sk_add_node(sk, &head->chain); | ||
168 | sk->sk_hash = hash; | ||
169 | sock_prot_inc_use(sk->sk_prot); | ||
170 | write_unlock(&head->lock); | ||
171 | |||
172 | if (twp) { | ||
173 | *twp = tw; | ||
174 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | ||
175 | } else if (tw) { | ||
176 | /* Silly. Should hash-dance instead... */ | ||
177 | inet_twsk_deschedule(tw, &tcp_death_row); | ||
178 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | ||
179 | |||
180 | inet_twsk_put(tw); | ||
181 | } | ||
182 | return 0; | ||
183 | |||
184 | not_unique: | ||
185 | write_unlock(&head->lock); | ||
186 | return -EADDRNOTAVAIL; | ||
187 | } | ||
188 | |||
189 | static inline u32 tcpv6_port_offset(const struct sock *sk) | ||
190 | { | ||
191 | const struct inet_sock *inet = inet_sk(sk); | ||
192 | const struct ipv6_pinfo *np = inet6_sk(sk); | ||
193 | |||
194 | return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32, | ||
195 | np->daddr.s6_addr32, | ||
196 | inet->dport); | ||
197 | } | ||
198 | |||
199 | static int tcp_v6_hash_connect(struct sock *sk) | ||
200 | { | ||
201 | unsigned short snum = inet_sk(sk)->num; | ||
202 | struct inet_bind_hashbucket *head; | ||
203 | struct inet_bind_bucket *tb; | ||
204 | int ret; | ||
205 | |||
206 | if (!snum) { | ||
207 | int low = sysctl_local_port_range[0]; | ||
208 | int high = sysctl_local_port_range[1]; | ||
209 | int range = high - low; | ||
210 | int i; | ||
211 | int port; | ||
212 | static u32 hint; | ||
213 | u32 offset = hint + tcpv6_port_offset(sk); | ||
214 | struct hlist_node *node; | ||
215 | struct inet_timewait_sock *tw = NULL; | ||
216 | |||
217 | local_bh_disable(); | ||
218 | for (i = 1; i <= range; i++) { | ||
219 | port = low + (i + offset) % range; | ||
220 | head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)]; | ||
221 | spin_lock(&head->lock); | ||
222 | |||
223 | /* Does not bother with rcv_saddr checks, | ||
224 | * because the established check is already | ||
225 | * unique enough. | ||
226 | */ | ||
227 | inet_bind_bucket_for_each(tb, node, &head->chain) { | ||
228 | if (tb->port == port) { | ||
229 | BUG_TRAP(!hlist_empty(&tb->owners)); | ||
230 | if (tb->fastreuse >= 0) | ||
231 | goto next_port; | ||
232 | if (!__tcp_v6_check_established(sk, | ||
233 | port, | ||
234 | &tw)) | ||
235 | goto ok; | ||
236 | goto next_port; | ||
237 | } | ||
238 | } | ||
239 | |||
240 | tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port); | ||
241 | if (!tb) { | ||
242 | spin_unlock(&head->lock); | ||
243 | break; | ||
244 | } | ||
245 | tb->fastreuse = -1; | ||
246 | goto ok; | ||
247 | |||
248 | next_port: | ||
249 | spin_unlock(&head->lock); | ||
250 | } | ||
251 | local_bh_enable(); | ||
252 | |||
253 | return -EADDRNOTAVAIL; | ||
254 | |||
255 | ok: | ||
256 | hint += i; | ||
257 | |||
258 | /* Head lock still held and bh's disabled */ | ||
259 | inet_bind_hash(sk, tb, port); | ||
260 | if (sk_unhashed(sk)) { | ||
261 | inet_sk(sk)->sport = htons(port); | ||
262 | __inet6_hash(&tcp_hashinfo, sk); | ||
263 | } | ||
264 | spin_unlock(&head->lock); | ||
265 | |||
266 | if (tw) { | ||
267 | inet_twsk_deschedule(tw, &tcp_death_row); | ||
268 | inet_twsk_put(tw); | ||
269 | } | ||
270 | |||
271 | ret = 0; | ||
272 | goto out; | ||
273 | } | ||
274 | |||
275 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | ||
276 | tb = inet_csk(sk)->icsk_bind_hash; | ||
277 | spin_lock_bh(&head->lock); | ||
278 | |||
279 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | ||
280 | __inet6_hash(&tcp_hashinfo, sk); | ||
281 | spin_unlock_bh(&head->lock); | ||
282 | return 0; | ||
283 | } else { | ||
284 | spin_unlock(&head->lock); | ||
285 | /* No definite answer... Walk to established hash table */ | ||
286 | ret = __tcp_v6_check_established(sk, snum, NULL); | ||
287 | out: | ||
288 | local_bh_enable(); | ||
289 | return ret; | ||
290 | } | ||
291 | } | ||
292 | |||
293 | static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | 122 | static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, |
294 | int addr_len) | 123 | int addr_len) |
295 | { | 124 | { |
@@ -450,7 +279,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
450 | inet->dport = usin->sin6_port; | 279 | inet->dport = usin->sin6_port; |
451 | 280 | ||
452 | tcp_set_state(sk, TCP_SYN_SENT); | 281 | tcp_set_state(sk, TCP_SYN_SENT); |
453 | err = tcp_v6_hash_connect(sk); | 282 | err = inet6_hash_connect(&tcp_death_row, sk); |
454 | if (err) | 283 | if (err) |
455 | goto late_failure; | 284 | goto late_failure; |
456 | 285 | ||