diff options
author | Eric Dumazet <edumazet@google.com> | 2015-03-19 22:04:19 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-20 12:40:25 -0400 |
commit | 52452c542559ac980b48dbf22a30ee7fa0af507c (patch) | |
tree | a212dbe95694ea11e86b9d9aca8a2eba0b06a2d1 /net/ipv4 | |
parent | a998f712f77ea4892d3fcf24e0a67603e63da128 (diff) |
inet: drop prev pointer handling in request sock
When request sock are put in ehash table, the whole notion
of having a previous request to update dl_next is pointless.
Also, following patch will get rid of big purge timer,
so we want to delete a request sock without holding listener lock.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/inet_connection_sock.c | 22 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 17 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 5 |
4 files changed, 23 insertions, 23 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index f0f91858aecf..4f57a017928c 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -480,18 +480,17 @@ static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, | |||
480 | #endif | 480 | #endif |
481 | 481 | ||
482 | struct request_sock *inet_csk_search_req(const struct sock *sk, | 482 | struct request_sock *inet_csk_search_req(const struct sock *sk, |
483 | struct request_sock ***prevp, | ||
484 | const __be16 rport, const __be32 raddr, | 483 | const __be16 rport, const __be32 raddr, |
485 | const __be32 laddr) | 484 | const __be32 laddr) |
486 | { | 485 | { |
487 | const struct inet_connection_sock *icsk = inet_csk(sk); | 486 | const struct inet_connection_sock *icsk = inet_csk(sk); |
488 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; | 487 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; |
489 | struct request_sock *req, **prev; | 488 | struct request_sock *req; |
490 | 489 | ||
491 | for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, | 490 | for (req = lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, |
492 | lopt->nr_table_entries)]; | 491 | lopt->nr_table_entries)]; |
493 | (req = *prev) != NULL; | 492 | req != NULL; |
494 | prev = &req->dl_next) { | 493 | req = req->dl_next) { |
495 | const struct inet_request_sock *ireq = inet_rsk(req); | 494 | const struct inet_request_sock *ireq = inet_rsk(req); |
496 | 495 | ||
497 | if (ireq->ir_rmt_port == rport && | 496 | if (ireq->ir_rmt_port == rport && |
@@ -499,7 +498,6 @@ struct request_sock *inet_csk_search_req(const struct sock *sk, | |||
499 | ireq->ir_loc_addr == laddr && | 498 | ireq->ir_loc_addr == laddr && |
500 | AF_INET_FAMILY(req->rsk_ops->family)) { | 499 | AF_INET_FAMILY(req->rsk_ops->family)) { |
501 | WARN_ON(req->sk); | 500 | WARN_ON(req->sk); |
502 | *prevp = prev; | ||
503 | break; | 501 | break; |
504 | } | 502 | } |
505 | } | 503 | } |
@@ -610,7 +608,10 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, | |||
610 | i = lopt->clock_hand; | 608 | i = lopt->clock_hand; |
611 | 609 | ||
612 | do { | 610 | do { |
613 | reqp=&lopt->syn_table[i]; | 611 | reqp = &lopt->syn_table[i]; |
612 | if (!*reqp) | ||
613 | goto next_bucket; | ||
614 | write_lock(&queue->syn_wait_lock); | ||
614 | while ((req = *reqp) != NULL) { | 615 | while ((req = *reqp) != NULL) { |
615 | if (time_after_eq(now, req->expires)) { | 616 | if (time_after_eq(now, req->expires)) { |
616 | int expire = 0, resend = 0; | 617 | int expire = 0, resend = 0; |
@@ -635,14 +636,15 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, | |||
635 | } | 636 | } |
636 | 637 | ||
637 | /* Drop this request */ | 638 | /* Drop this request */ |
638 | inet_csk_reqsk_queue_unlink(parent, req, reqp); | 639 | *reqp = req->dl_next; |
639 | reqsk_queue_removed(queue, req); | 640 | reqsk_queue_removed(queue, req); |
640 | reqsk_put(req); | 641 | reqsk_put(req); |
641 | continue; | 642 | continue; |
642 | } | 643 | } |
643 | reqp = &req->dl_next; | 644 | reqp = &req->dl_next; |
644 | } | 645 | } |
645 | 646 | write_unlock(&queue->syn_wait_lock); | |
647 | next_bucket: | ||
646 | i = (i + 1) & (lopt->nr_table_entries - 1); | 648 | i = (i + 1) & (lopt->nr_table_entries - 1); |
647 | 649 | ||
648 | } while (--budget > 0); | 650 | } while (--budget > 0); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1dfbaee3554e..95caea707f54 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5694,7 +5694,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5694 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && | 5694 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && |
5695 | sk->sk_state != TCP_FIN_WAIT1); | 5695 | sk->sk_state != TCP_FIN_WAIT1); |
5696 | 5696 | ||
5697 | if (tcp_check_req(sk, skb, req, NULL, true) == NULL) | 5697 | if (tcp_check_req(sk, skb, req, true) == NULL) |
5698 | goto discard; | 5698 | goto discard; |
5699 | } | 5699 | } |
5700 | 5700 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ddd0b1f25b96..19c3770f1e97 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -458,12 +458,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
458 | } | 458 | } |
459 | 459 | ||
460 | switch (sk->sk_state) { | 460 | switch (sk->sk_state) { |
461 | struct request_sock *req, **prev; | 461 | struct request_sock *req; |
462 | case TCP_LISTEN: | 462 | case TCP_LISTEN: |
463 | if (sock_owned_by_user(sk)) | 463 | if (sock_owned_by_user(sk)) |
464 | goto out; | 464 | goto out; |
465 | 465 | ||
466 | req = inet_csk_search_req(sk, &prev, th->dest, | 466 | req = inet_csk_search_req(sk, th->dest, |
467 | iph->daddr, iph->saddr); | 467 | iph->daddr, iph->saddr); |
468 | if (!req) | 468 | if (!req) |
469 | goto out; | 469 | goto out; |
@@ -484,7 +484,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
484 | * created socket, and POSIX does not want network | 484 | * created socket, and POSIX does not want network |
485 | * errors returned from accept(). | 485 | * errors returned from accept(). |
486 | */ | 486 | */ |
487 | inet_csk_reqsk_queue_drop(sk, req, prev); | 487 | inet_csk_reqsk_queue_drop(sk, req); |
488 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | 488 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
489 | goto out; | 489 | goto out; |
490 | 490 | ||
@@ -1392,15 +1392,14 @@ EXPORT_SYMBOL(tcp_v4_syn_recv_sock); | |||
1392 | 1392 | ||
1393 | static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | 1393 | static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) |
1394 | { | 1394 | { |
1395 | struct tcphdr *th = tcp_hdr(skb); | 1395 | const struct tcphdr *th = tcp_hdr(skb); |
1396 | const struct iphdr *iph = ip_hdr(skb); | 1396 | const struct iphdr *iph = ip_hdr(skb); |
1397 | struct request_sock *req; | ||
1397 | struct sock *nsk; | 1398 | struct sock *nsk; |
1398 | struct request_sock **prev; | 1399 | |
1399 | /* Find possible connection requests. */ | 1400 | req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); |
1400 | struct request_sock *req = inet_csk_search_req(sk, &prev, th->source, | ||
1401 | iph->saddr, iph->daddr); | ||
1402 | if (req) | 1401 | if (req) |
1403 | return tcp_check_req(sk, skb, req, prev, false); | 1402 | return tcp_check_req(sk, skb, req, false); |
1404 | 1403 | ||
1405 | nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr, | 1404 | nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr, |
1406 | th->source, iph->daddr, th->dest, inet_iif(skb)); | 1405 | th->source, iph->daddr, th->dest, inet_iif(skb)); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index dd11ac7798c6..848bcab358e4 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -572,7 +572,6 @@ EXPORT_SYMBOL(tcp_create_openreq_child); | |||
572 | 572 | ||
573 | struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | 573 | struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, |
574 | struct request_sock *req, | 574 | struct request_sock *req, |
575 | struct request_sock **prev, | ||
576 | bool fastopen) | 575 | bool fastopen) |
577 | { | 576 | { |
578 | struct tcp_options_received tmp_opt; | 577 | struct tcp_options_received tmp_opt; |
@@ -766,7 +765,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
766 | if (child == NULL) | 765 | if (child == NULL) |
767 | goto listen_overflow; | 766 | goto listen_overflow; |
768 | 767 | ||
769 | inet_csk_reqsk_queue_unlink(sk, req, prev); | 768 | inet_csk_reqsk_queue_unlink(sk, req); |
770 | inet_csk_reqsk_queue_removed(sk, req); | 769 | inet_csk_reqsk_queue_removed(sk, req); |
771 | 770 | ||
772 | inet_csk_reqsk_queue_add(sk, req, child); | 771 | inet_csk_reqsk_queue_add(sk, req, child); |
@@ -791,7 +790,7 @@ embryonic_reset: | |||
791 | tcp_reset(sk); | 790 | tcp_reset(sk); |
792 | } | 791 | } |
793 | if (!fastopen) { | 792 | if (!fastopen) { |
794 | inet_csk_reqsk_queue_drop(sk, req, prev); | 793 | inet_csk_reqsk_queue_drop(sk, req); |
795 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); | 794 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); |
796 | } | 795 | } |
797 | return NULL; | 796 | return NULL; |