diff options
author | Eric Dumazet <edumazet@google.com> | 2012-10-27 19:16:46 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-11-03 14:45:00 -0400 |
commit | e6c022a4fa2d2d9ca9d0a7ac3b05ad988f39fc30 (patch) | |
tree | 6dcd4e2dfc7895f33329fb16653a93f7d52f4bd7 /net/ipv4/tcp_minisocks.c | |
parent | 25b1e67921f448cdddf70042ba233ffe43d33a9c (diff) |
tcp: better retrans tracking for defer-accept
For passive TCP connections using TCP_DEFER_ACCEPT facility,
we incorrectly increment req->retrans each time timeout triggers
while no SYNACK is sent.
SYNACK are not sent for TCP_DEFER_ACCEPT that were established (for
which we received the ACK from client). Only the last SYNACK is sent
so that we can receive again an ACK from client, to move the req into
accept queue. We plan to change this later to avoid the useless
retransmit (and potential problem as this SYNACK could be lost)
TCP_INFO later gives wrong information to user, claiming imaginary
retransmits.
Decouple req->retrans field into two independent fields :
num_retrans : number of retransmit
num_timeout : number of timeouts
num_timeout is the counter that is incremented at each timeout,
regardless of actual SYNACK being sent or not, and used to
compute the exponential timeout.
Introduce inet_rtx_syn_ack() helper to increment num_retrans
only if ->rtx_syn_ack() succeeded.
Use inet_rtx_syn_ack() from tcp_check_req() to increment num_retrans
when we re-send a SYNACK in answer to a (retransmitted) SYN.
Prior to this patch, we were not counting these retransmits.
Change tcp_v[46]_rtx_synack() to increment TCP_MIB_RETRANSSEGS
only if a synack packet was successfully queued.
Reported-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Julian Anastasov <ja@ssi.bg>
Cc: Vijay Subramanian <subramanian.vijay@gmail.com>
Cc: Elliott Hughes <enh@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 27536ba16c9d..0404b3f4c959 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -552,7 +552,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
552 | * it can be estimated (approximately) | 552 | * it can be estimated (approximately) |
553 | * from another data. | 553 | * from another data. |
554 | */ | 554 | */ |
555 | tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); | 555 | tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); |
556 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); | 556 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
557 | } | 557 | } |
558 | } | 558 | } |
@@ -581,7 +581,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
581 | * Note that even if there is new data in the SYN packet | 581 | * Note that even if there is new data in the SYN packet |
582 | * they will be thrown away too. | 582 | * they will be thrown away too. |
583 | */ | 583 | */ |
584 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); | 584 | inet_rtx_syn_ack(sk, req); |
585 | return NULL; | 585 | return NULL; |
586 | } | 586 | } |
587 | 587 | ||
@@ -695,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
695 | /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */ | 695 | /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */ |
696 | if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr) | 696 | if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr) |
697 | tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr; | 697 | tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr; |
698 | else if (req->retrans) /* don't take RTT sample if retrans && ~TS */ | 698 | else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */ |
699 | tcp_rsk(req)->snt_synack = 0; | 699 | tcp_rsk(req)->snt_synack = 0; |
700 | 700 | ||
701 | /* For Fast Open no more processing is needed (sk is the | 701 | /* For Fast Open no more processing is needed (sk is the |
@@ -705,7 +705,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
705 | return sk; | 705 | return sk; |
706 | 706 | ||
707 | /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ | 707 | /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ |
708 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | 708 | if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && |
709 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | 709 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
710 | inet_rsk(req)->acked = 1; | 710 | inet_rsk(req)->acked = 1; |
711 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); | 711 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); |