aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_minisocks.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r--net/ipv4/tcp_minisocks.c32
1 files changed, 12 insertions, 20 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 1276cab85e3e..ea68a478fad6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -569,8 +569,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
569 does sequence test, SYN is truncated, and thus we consider 569 does sequence test, SYN is truncated, and thus we consider
570 it a bare ACK. 570 it a bare ACK.
571 571
572 Both ends (listening sockets) accept the new incoming 572 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
573 connection and try to talk to each other. 8-) 573 bare ACK. Otherwise, we create an established connection. Both
574 ends (listening sockets) accept the new incoming connection and try
575 to talk to each other. 8-)
574 576
575 Note: This case is both harmless, and rare. Possibility is about the 577 Note: This case is both harmless, and rare. Possibility is about the
576 same as us discovering intelligent life on another plant tomorrow. 578 same as us discovering intelligent life on another plant tomorrow.
@@ -638,6 +640,13 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
638 if (!(flg & TCP_FLAG_ACK)) 640 if (!(flg & TCP_FLAG_ACK))
639 return NULL; 641 return NULL;
640 642
643 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
644 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
645 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
646 inet_rsk(req)->acked = 1;
647 return NULL;
648 }
649
641 /* OK, ACK is valid, create big socket and 650 /* OK, ACK is valid, create big socket and
642 * feed this segment to it. It will repeat all 651 * feed this segment to it. It will repeat all
643 * the tests. THIS SEGMENT MUST MOVE SOCKET TO 652 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
@@ -676,24 +685,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
676 inet_csk_reqsk_queue_unlink(sk, req, prev); 685 inet_csk_reqsk_queue_unlink(sk, req, prev);
677 inet_csk_reqsk_queue_removed(sk, req); 686 inet_csk_reqsk_queue_removed(sk, req);
678 687
679 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 688 inet_csk_reqsk_queue_add(sk, req, child);
680 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
681
682 /* the accept queue handling is done is est recv slow
683 * path so lets make sure to start there
684 */
685 tcp_sk(child)->pred_flags = 0;
686 sock_hold(sk);
687 sock_hold(child);
688 tcp_sk(child)->defer_tcp_accept.listen_sk = sk;
689 tcp_sk(child)->defer_tcp_accept.request = req;
690
691 inet_csk_reset_keepalive_timer(child,
692 inet_csk(sk)->icsk_accept_queue.rskq_defer_accept * HZ);
693 } else {
694 inet_csk_reqsk_queue_add(sk, req, child);
695 }
696
697 return child; 689 return child;
698 690
699 listen_overflow: 691 listen_overflow: