diff options
author | Arnaldo Carvalho de Melo <acme@mandriva.com> | 2005-12-14 02:25:31 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-01-03 16:10:55 -0500 |
commit | a7f5e7f164788a22eb5d3de8e2d3cee1bf58fdca (patch) | |
tree | 809ed01d61aa9548124b9958a5a500068b1db670 /net/ipv4/tcp_ipv4.c | |
parent | 6d6ee43e0b8b8d4847627fd43739b98ec2b9404f (diff) |
[INET]: Generalise tcp_v4_hash_connect
Renaming it to inet_hash_connect, making it possible to ditch
dccp_v4_hash_connect and share the same code with TCP instead.
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 173 |
1 files changed, 1 insertions, 172 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6728772a943a..c2fe61becd61 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -152,177 +152,6 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) | |||
152 | 152 | ||
153 | EXPORT_SYMBOL_GPL(tcp_twsk_unique); | 153 | EXPORT_SYMBOL_GPL(tcp_twsk_unique); |
154 | 154 | ||
155 | /* called with local bh disabled */ | ||
156 | static int __tcp_v4_check_established(struct sock *sk, __u16 lport, | ||
157 | struct inet_timewait_sock **twp) | ||
158 | { | ||
159 | struct inet_sock *inet = inet_sk(sk); | ||
160 | u32 daddr = inet->rcv_saddr; | ||
161 | u32 saddr = inet->daddr; | ||
162 | int dif = sk->sk_bound_dev_if; | ||
163 | INET_ADDR_COOKIE(acookie, saddr, daddr) | ||
164 | const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); | ||
165 | unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); | ||
166 | struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash); | ||
167 | struct sock *sk2; | ||
168 | const struct hlist_node *node; | ||
169 | struct inet_timewait_sock *tw; | ||
170 | |||
171 | prefetch(head->chain.first); | ||
172 | write_lock(&head->lock); | ||
173 | |||
174 | /* Check TIME-WAIT sockets first. */ | ||
175 | sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { | ||
176 | tw = inet_twsk(sk2); | ||
177 | |||
178 | if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) { | ||
179 | if (twsk_unique(sk, sk2, twp)) | ||
180 | goto unique; | ||
181 | else | ||
182 | goto not_unique; | ||
183 | } | ||
184 | } | ||
185 | tw = NULL; | ||
186 | |||
187 | /* And established part... */ | ||
188 | sk_for_each(sk2, node, &head->chain) { | ||
189 | if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) | ||
190 | goto not_unique; | ||
191 | } | ||
192 | |||
193 | unique: | ||
194 | /* Must record num and sport now. Otherwise we will see | ||
195 | * in hash table socket with a funny identity. */ | ||
196 | inet->num = lport; | ||
197 | inet->sport = htons(lport); | ||
198 | sk->sk_hash = hash; | ||
199 | BUG_TRAP(sk_unhashed(sk)); | ||
200 | __sk_add_node(sk, &head->chain); | ||
201 | sock_prot_inc_use(sk->sk_prot); | ||
202 | write_unlock(&head->lock); | ||
203 | |||
204 | if (twp) { | ||
205 | *twp = tw; | ||
206 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | ||
207 | } else if (tw) { | ||
208 | /* Silly. Should hash-dance instead... */ | ||
209 | inet_twsk_deschedule(tw, &tcp_death_row); | ||
210 | NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); | ||
211 | |||
212 | inet_twsk_put(tw); | ||
213 | } | ||
214 | |||
215 | return 0; | ||
216 | |||
217 | not_unique: | ||
218 | write_unlock(&head->lock); | ||
219 | return -EADDRNOTAVAIL; | ||
220 | } | ||
221 | |||
222 | static inline u32 connect_port_offset(const struct sock *sk) | ||
223 | { | ||
224 | const struct inet_sock *inet = inet_sk(sk); | ||
225 | |||
226 | return secure_tcp_port_ephemeral(inet->rcv_saddr, inet->daddr, | ||
227 | inet->dport); | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * Bind a port for a connect operation and hash it. | ||
232 | */ | ||
233 | static inline int tcp_v4_hash_connect(struct sock *sk) | ||
234 | { | ||
235 | const unsigned short snum = inet_sk(sk)->num; | ||
236 | struct inet_bind_hashbucket *head; | ||
237 | struct inet_bind_bucket *tb; | ||
238 | int ret; | ||
239 | |||
240 | if (!snum) { | ||
241 | int low = sysctl_local_port_range[0]; | ||
242 | int high = sysctl_local_port_range[1]; | ||
243 | int range = high - low; | ||
244 | int i; | ||
245 | int port; | ||
246 | static u32 hint; | ||
247 | u32 offset = hint + connect_port_offset(sk); | ||
248 | struct hlist_node *node; | ||
249 | struct inet_timewait_sock *tw = NULL; | ||
250 | |||
251 | local_bh_disable(); | ||
252 | for (i = 1; i <= range; i++) { | ||
253 | port = low + (i + offset) % range; | ||
254 | head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)]; | ||
255 | spin_lock(&head->lock); | ||
256 | |||
257 | /* Does not bother with rcv_saddr checks, | ||
258 | * because the established check is already | ||
259 | * unique enough. | ||
260 | */ | ||
261 | inet_bind_bucket_for_each(tb, node, &head->chain) { | ||
262 | if (tb->port == port) { | ||
263 | BUG_TRAP(!hlist_empty(&tb->owners)); | ||
264 | if (tb->fastreuse >= 0) | ||
265 | goto next_port; | ||
266 | if (!__tcp_v4_check_established(sk, | ||
267 | port, | ||
268 | &tw)) | ||
269 | goto ok; | ||
270 | goto next_port; | ||
271 | } | ||
272 | } | ||
273 | |||
274 | tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port); | ||
275 | if (!tb) { | ||
276 | spin_unlock(&head->lock); | ||
277 | break; | ||
278 | } | ||
279 | tb->fastreuse = -1; | ||
280 | goto ok; | ||
281 | |||
282 | next_port: | ||
283 | spin_unlock(&head->lock); | ||
284 | } | ||
285 | local_bh_enable(); | ||
286 | |||
287 | return -EADDRNOTAVAIL; | ||
288 | |||
289 | ok: | ||
290 | hint += i; | ||
291 | |||
292 | /* Head lock still held and bh's disabled */ | ||
293 | inet_bind_hash(sk, tb, port); | ||
294 | if (sk_unhashed(sk)) { | ||
295 | inet_sk(sk)->sport = htons(port); | ||
296 | __inet_hash(&tcp_hashinfo, sk, 0); | ||
297 | } | ||
298 | spin_unlock(&head->lock); | ||
299 | |||
300 | if (tw) { | ||
301 | inet_twsk_deschedule(tw, &tcp_death_row);; | ||
302 | inet_twsk_put(tw); | ||
303 | } | ||
304 | |||
305 | ret = 0; | ||
306 | goto out; | ||
307 | } | ||
308 | |||
309 | head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; | ||
310 | tb = inet_csk(sk)->icsk_bind_hash; | ||
311 | spin_lock_bh(&head->lock); | ||
312 | if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { | ||
313 | __inet_hash(&tcp_hashinfo, sk, 0); | ||
314 | spin_unlock_bh(&head->lock); | ||
315 | return 0; | ||
316 | } else { | ||
317 | spin_unlock(&head->lock); | ||
318 | /* No definite answer... Walk to established hash table */ | ||
319 | ret = __tcp_v4_check_established(sk, snum, NULL); | ||
320 | out: | ||
321 | local_bh_enable(); | ||
322 | return ret; | ||
323 | } | ||
324 | } | ||
325 | |||
326 | /* This will initiate an outgoing connection. */ | 155 | /* This will initiate an outgoing connection. */ |
327 | int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | 156 | int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
328 | { | 157 | { |
@@ -403,7 +232,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
403 | * complete initialization after this. | 232 | * complete initialization after this. |
404 | */ | 233 | */ |
405 | tcp_set_state(sk, TCP_SYN_SENT); | 234 | tcp_set_state(sk, TCP_SYN_SENT); |
406 | err = tcp_v4_hash_connect(sk); | 235 | err = inet_hash_connect(&tcp_death_row, sk); |
407 | if (err) | 236 | if (err) |
408 | goto failure; | 237 | goto failure; |
409 | 238 | ||