aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_minisocks.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-19 22:04:19 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-20 12:40:25 -0400
commit52452c542559ac980b48dbf22a30ee7fa0af507c (patch)
treea212dbe95694ea11e86b9d9aca8a2eba0b06a2d1 /net/ipv4/tcp_minisocks.c
parenta998f712f77ea4892d3fcf24e0a67603e63da128 (diff)
inet: drop prev pointer handling in request sock
When request sock are put in ehash table, the whole notion of having a previous request to update dl_next is pointless. Also, following patch will get rid of big purge timer, so we want to delete a request sock without holding listener lock. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r--net/ipv4/tcp_minisocks.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index dd11ac7798c6..848bcab358e4 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -572,7 +572,6 @@ EXPORT_SYMBOL(tcp_create_openreq_child);
572 572
573struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 573struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
574 struct request_sock *req, 574 struct request_sock *req,
575 struct request_sock **prev,
576 bool fastopen) 575 bool fastopen)
577{ 576{
578 struct tcp_options_received tmp_opt; 577 struct tcp_options_received tmp_opt;
@@ -766,7 +765,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
766 if (child == NULL) 765 if (child == NULL)
767 goto listen_overflow; 766 goto listen_overflow;
768 767
769 inet_csk_reqsk_queue_unlink(sk, req, prev); 768 inet_csk_reqsk_queue_unlink(sk, req);
770 inet_csk_reqsk_queue_removed(sk, req); 769 inet_csk_reqsk_queue_removed(sk, req);
771 770
772 inet_csk_reqsk_queue_add(sk, req, child); 771 inet_csk_reqsk_queue_add(sk, req, child);
@@ -791,7 +790,7 @@ embryonic_reset:
791 tcp_reset(sk); 790 tcp_reset(sk);
792 } 791 }
793 if (!fastopen) { 792 if (!fastopen) {
794 inet_csk_reqsk_queue_drop(sk, req, prev); 793 inet_csk_reqsk_queue_drop(sk, req);
795 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 794 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
796 } 795 }
797 return NULL; 796 return NULL;