diff options
author | Yuchung Cheng <ycheng@google.com> | 2012-12-06 03:45:32 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-12-07 14:39:28 -0500 |
commit | 93b174ad71b08e504c2cf6e8a58ecce778b77a40 (patch) | |
tree | f9e7fd386880ae14d4625de9801a57c5bf5fa105 /net/ipv4 | |
parent | 1afa471706963643ceeda7cbbe9c605a1e883d53 (diff) |
tcp: bug fix Fast Open client retransmission
If SYN-ACK partially acks SYN-data, the client retransmits the
remaining data by tcp_retransmit_skb(). This increments lost recovery
state variables like tp->retrans_out in Open state. If loss recovery
happens before the retransmission is acked, it triggers the WARN_ON
check in tcp_fastretrans_alert(). For example: the client sends
SYN-data, gets SYN-ACK acking only ISN, retransmits data, sends
another 4 data packets and get 3 dupacks.
Since the retransmission is not caused by network drop it should not
update the recovery state variables. Further the server may return a
smaller MSS than the cached MSS used for SYN-data, so the retranmission
needs a loop. Otherwise some data will not be retransmitted until timeout
or other loss recovery events.
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_input.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 15 |
2 files changed, 15 insertions, 6 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 609ff98aeb47..181fc8234a52 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5645,7 +5645,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, | |||
5645 | tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); | 5645 | tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); |
5646 | 5646 | ||
5647 | if (data) { /* Retransmit unacked data in SYN */ | 5647 | if (data) { /* Retransmit unacked data in SYN */ |
5648 | tcp_retransmit_skb(sk, data); | 5648 | tcp_for_write_queue_from(data, sk) { |
5649 | if (data == tcp_send_head(sk) || | ||
5650 | __tcp_retransmit_skb(sk, data)) | ||
5651 | break; | ||
5652 | } | ||
5649 | tcp_rearm_rto(sk); | 5653 | tcp_rearm_rto(sk); |
5650 | return true; | 5654 | return true; |
5651 | } | 5655 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2798706cb063..948ac275b9b5 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2309,12 +2309,11 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, | |||
2309 | * state updates are done by the caller. Returns non-zero if an | 2309 | * state updates are done by the caller. Returns non-zero if an |
2310 | * error occurred which prevented the send. | 2310 | * error occurred which prevented the send. |
2311 | */ | 2311 | */ |
2312 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | 2312 | int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) |
2313 | { | 2313 | { |
2314 | struct tcp_sock *tp = tcp_sk(sk); | 2314 | struct tcp_sock *tp = tcp_sk(sk); |
2315 | struct inet_connection_sock *icsk = inet_csk(sk); | 2315 | struct inet_connection_sock *icsk = inet_csk(sk); |
2316 | unsigned int cur_mss; | 2316 | unsigned int cur_mss; |
2317 | int err; | ||
2318 | 2317 | ||
2319 | /* Inconslusive MTU probe */ | 2318 | /* Inconslusive MTU probe */ |
2320 | if (icsk->icsk_mtup.probe_size) { | 2319 | if (icsk->icsk_mtup.probe_size) { |
@@ -2387,11 +2386,17 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2387 | if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { | 2386 | if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { |
2388 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, | 2387 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, |
2389 | GFP_ATOMIC); | 2388 | GFP_ATOMIC); |
2390 | err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : | 2389 | return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : |
2391 | -ENOBUFS; | 2390 | -ENOBUFS; |
2392 | } else { | 2391 | } else { |
2393 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); | 2392 | return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
2394 | } | 2393 | } |
2394 | } | ||
2395 | |||
2396 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | ||
2397 | { | ||
2398 | struct tcp_sock *tp = tcp_sk(sk); | ||
2399 | int err = __tcp_retransmit_skb(sk, skb); | ||
2395 | 2400 | ||
2396 | if (err == 0) { | 2401 | if (err == 0) { |
2397 | /* Update global TCP statistics. */ | 2402 | /* Update global TCP statistics. */ |