aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorPatrick McManus <mcmanus@ducksong.com>2008-03-21 19:33:01 -0400
committerDavid S. Miller <davem@davemloft.net>2008-03-21 19:33:01 -0400
commitec3c0982a2dd1e671bad8e9d26c28dcba0039d87 (patch)
tree11a3cd7c530e4225a4c3d4c3f3cc54eb7d2e0e4f /net/ipv4/tcp_input.c
parente4c78840284f3f51b1896cf3936d60a6033c4d2c (diff)
[TCP]: TCP_DEFER_ACCEPT updates - process as established
Change TCP_DEFER_ACCEPT implementation so that it transitions a connection to ESTABLISHED after handshake is complete instead of leaving it in SYN-RECV until some data arrvies. Place connection in accept queue when first data packet arrives from slow path. Benefits: - established connection is now reset if it never makes it to the accept queue - diagnostic state of established matches with the packet traces showing completed handshake - TCP_DEFER_ACCEPT timeouts are expressed in seconds and can now be enforced with reasonable accuracy instead of rounding up to next exponential back-off of syn-ack retry. Signed-off-by: Patrick McManus <mcmanus@ducksong.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 9cf446427cc2..6e46b4c0f28c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4451,6 +4451,49 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
4451 } 4451 }
4452} 4452}
4453 4453
4454static int tcp_defer_accept_check(struct sock *sk)
4455{
4456 struct tcp_sock *tp = tcp_sk(sk);
4457
4458 if (tp->defer_tcp_accept.request) {
4459 int queued_data = tp->rcv_nxt - tp->copied_seq;
4460 int hasfin = !skb_queue_empty(&sk->sk_receive_queue) ?
4461 tcp_hdr((struct sk_buff *)
4462 sk->sk_receive_queue.prev)->fin : 0;
4463
4464 if (queued_data && hasfin)
4465 queued_data--;
4466
4467 if (queued_data &&
4468 tp->defer_tcp_accept.listen_sk->sk_state == TCP_LISTEN) {
4469 if (sock_flag(sk, SOCK_KEEPOPEN)) {
4470 inet_csk_reset_keepalive_timer(sk,
4471 keepalive_time_when(tp));
4472 } else {
4473 inet_csk_delete_keepalive_timer(sk);
4474 }
4475
4476 inet_csk_reqsk_queue_add(
4477 tp->defer_tcp_accept.listen_sk,
4478 tp->defer_tcp_accept.request,
4479 sk);
4480
4481 tp->defer_tcp_accept.listen_sk->sk_data_ready(
4482 tp->defer_tcp_accept.listen_sk, 0);
4483
4484 sock_put(tp->defer_tcp_accept.listen_sk);
4485 sock_put(sk);
4486 tp->defer_tcp_accept.listen_sk = NULL;
4487 tp->defer_tcp_accept.request = NULL;
4488 } else if (hasfin ||
4489 tp->defer_tcp_accept.listen_sk->sk_state != TCP_LISTEN) {
4490 tcp_reset(sk);
4491 return -1;
4492 }
4493 }
4494 return 0;
4495}
4496
4454static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) 4497static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
4455{ 4498{
4456 struct tcp_sock *tp = tcp_sk(sk); 4499 struct tcp_sock *tp = tcp_sk(sk);
@@ -4811,6 +4854,9 @@ step5:
4811 4854
4812 tcp_data_snd_check(sk); 4855 tcp_data_snd_check(sk);
4813 tcp_ack_snd_check(sk); 4856 tcp_ack_snd_check(sk);
4857
4858 if (tcp_defer_accept_check(sk))
4859 return -1;
4814 return 0; 4860 return 0;
4815 4861
4816csum_error: 4862csum_error: