diff options
author | Patrick McManus <mcmanus@ducksong.com> | 2008-03-21 19:33:01 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-03-21 19:33:01 -0400 |
commit | ec3c0982a2dd1e671bad8e9d26c28dcba0039d87 (patch) | |
tree | 11a3cd7c530e4225a4c3d4c3f3cc54eb7d2e0e4f /net/ipv4/tcp_minisocks.c | |
parent | e4c78840284f3f51b1896cf3936d60a6033c4d2c (diff) |
[TCP]: TCP_DEFER_ACCEPT updates - process as established
Change TCP_DEFER_ACCEPT implementation so that it transitions a
connection to ESTABLISHED after handshake is complete instead of
leaving it in SYN-RECV until some data arrvies. Place connection in
accept queue when first data packet arrives from slow path.
Benefits:
- established connection is now reset if it never makes it
to the accept queue
- diagnostic state of established matches with the packet traces
showing completed handshake
- TCP_DEFER_ACCEPT timeouts are expressed in seconds and can now be
enforced with reasonable accuracy instead of rounding up to next
exponential back-off of syn-ack retry.
Signed-off-by: Patrick McManus <mcmanus@ducksong.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 8245247a6ceb..019c8c16e5cc 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -571,10 +571,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
571 | does sequence test, SYN is truncated, and thus we consider | 571 | does sequence test, SYN is truncated, and thus we consider |
572 | it a bare ACK. | 572 | it a bare ACK. |
573 | 573 | ||
574 | If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this | 574 | Both ends (listening sockets) accept the new incoming |
575 | bare ACK. Otherwise, we create an established connection. Both | 575 | connection and try to talk to each other. 8-) |
576 | ends (listening sockets) accept the new incoming connection and try | ||
577 | to talk to each other. 8-) | ||
578 | 576 | ||
579 | Note: This case is both harmless, and rare. Possibility is about the | 577 | Note: This case is both harmless, and rare. Possibility is about the |
580 | same as us discovering intelligent life on another plant tomorrow. | 578 | same as us discovering intelligent life on another plant tomorrow. |
@@ -642,13 +640,6 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
642 | if (!(flg & TCP_FLAG_ACK)) | 640 | if (!(flg & TCP_FLAG_ACK)) |
643 | return NULL; | 641 | return NULL; |
644 | 642 | ||
645 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ | ||
646 | if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | ||
647 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | ||
648 | inet_rsk(req)->acked = 1; | ||
649 | return NULL; | ||
650 | } | ||
651 | |||
652 | /* OK, ACK is valid, create big socket and | 643 | /* OK, ACK is valid, create big socket and |
653 | * feed this segment to it. It will repeat all | 644 | * feed this segment to it. It will repeat all |
654 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO | 645 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO |
@@ -687,7 +678,24 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
687 | inet_csk_reqsk_queue_unlink(sk, req, prev); | 678 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
688 | inet_csk_reqsk_queue_removed(sk, req); | 679 | inet_csk_reqsk_queue_removed(sk, req); |
689 | 680 | ||
690 | inet_csk_reqsk_queue_add(sk, req, child); | 681 | if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && |
682 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | ||
683 | |||
684 | /* the accept queue handling is done is est recv slow | ||
685 | * path so lets make sure to start there | ||
686 | */ | ||
687 | tcp_sk(child)->pred_flags = 0; | ||
688 | sock_hold(sk); | ||
689 | sock_hold(child); | ||
690 | tcp_sk(child)->defer_tcp_accept.listen_sk = sk; | ||
691 | tcp_sk(child)->defer_tcp_accept.request = req; | ||
692 | |||
693 | inet_csk_reset_keepalive_timer(child, | ||
694 | inet_csk(sk)->icsk_accept_queue.rskq_defer_accept * HZ); | ||
695 | } else { | ||
696 | inet_csk_reqsk_queue_add(sk, req, child); | ||
697 | } | ||
698 | |||
691 | return child; | 699 | return child; |
692 | 700 | ||
693 | listen_overflow: | 701 | listen_overflow: |