aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c81
1 files changed, 5 insertions, 76 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index dca1be67164b..a678709b36f6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -238,78 +238,6 @@ void tcp_unhash(struct sock *sk)
238 inet_unhash(&tcp_hashinfo, sk); 238 inet_unhash(&tcp_hashinfo, sk);
239} 239}
240 240
241/* Don't inline this cruft. Here are some nice properties to
242 * exploit here. The BSD API does not allow a listening TCP
243 * to specify the remote port nor the remote address for the
244 * connection. So always assume those are both wildcarded
245 * during the search since they can never be otherwise.
246 */
247static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head,
248 const u32 daddr,
249 const unsigned short hnum,
250 const int dif)
251{
252 struct sock *result = NULL, *sk;
253 struct hlist_node *node;
254 int score, hiscore;
255
256 hiscore=-1;
257 sk_for_each(sk, node, head) {
258 struct inet_sock *inet = inet_sk(sk);
259
260 if (inet->num == hnum && !ipv6_only_sock(sk)) {
261 __u32 rcv_saddr = inet->rcv_saddr;
262
263 score = (sk->sk_family == PF_INET ? 1 : 0);
264 if (rcv_saddr) {
265 if (rcv_saddr != daddr)
266 continue;
267 score+=2;
268 }
269 if (sk->sk_bound_dev_if) {
270 if (sk->sk_bound_dev_if != dif)
271 continue;
272 score+=2;
273 }
274 if (score == 5)
275 return sk;
276 if (score > hiscore) {
277 hiscore = score;
278 result = sk;
279 }
280 }
281 }
282 return result;
283}
284
285/* Optimize the common listener case. */
286static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
287 const unsigned short hnum,
288 const int dif)
289{
290 struct sock *sk = NULL;
291 struct hlist_head *head;
292
293 read_lock(&tcp_hashinfo.lhash_lock);
294 head = &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)];
295 if (!hlist_empty(head)) {
296 struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
297
298 if (inet->num == hnum && !sk->sk_node.next &&
299 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
300 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
301 !sk->sk_bound_dev_if)
302 goto sherry_cache;
303 sk = __tcp_v4_lookup_listener(head, daddr, hnum, dif);
304 }
305 if (sk) {
306sherry_cache:
307 sock_hold(sk);
308 }
309 read_unlock(&tcp_hashinfo.lhash_lock);
310 return sk;
311}
312
313/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so 241/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
314 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM 242 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
315 * 243 *
@@ -358,7 +286,7 @@ static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
358 struct sock *sk = __tcp_v4_lookup_established(saddr, sport, 286 struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
359 daddr, hnum, dif); 287 daddr, hnum, dif);
360 288
361 return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif); 289 return sk ? : inet_lookup_listener(&tcp_hashinfo, daddr, hnum, dif);
362} 290}
363 291
364inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, 292inline struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
@@ -1641,9 +1569,10 @@ do_time_wait:
1641 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk, 1569 switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1642 skb, th, skb->len)) { 1570 skb, th, skb->len)) {
1643 case TCP_TW_SYN: { 1571 case TCP_TW_SYN: {
1644 struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr, 1572 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1645 ntohs(th->dest), 1573 skb->nh.iph->daddr,
1646 tcp_v4_iif(skb)); 1574 ntohs(th->dest),
1575 tcp_v4_iif(skb));
1647 if (sk2) { 1576 if (sk2) {
1648 tcp_tw_deschedule((struct tcp_tw_bucket *)sk); 1577 tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1649 tcp_tw_put((struct tcp_tw_bucket *)sk); 1578 tcp_tw_put((struct tcp_tw_bucket *)sk);