diff options
| -rw-r--r-- | net/ipv4/tcp_input.c | 60 |
1 files changed, 31 insertions, 29 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a12b455928e5..88fa2d160685 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) | |||
| 2315 | 2315 | ||
| 2316 | /* Undo procedures. */ | 2316 | /* Undo procedures. */ |
| 2317 | 2317 | ||
| 2318 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
| 2319 | * window. It would seem that it is trivially available for us in | ||
| 2320 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
| 2321 | * what will happen if errors occur when sending retransmission for the | ||
| 2322 | * second time. ...It could the that such segment has only | ||
| 2323 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
| 2324 | * the head skb is enough except for some reneging corner cases that | ||
| 2325 | * are not worth the effort. | ||
| 2326 | * | ||
| 2327 | * Main reason for all this complexity is the fact that connection dying | ||
| 2328 | * time now depends on the validity of the retrans_stamp, in particular, | ||
| 2329 | * that successive retransmissions of a segment must not advance | ||
| 2330 | * retrans_stamp under any conditions. | ||
| 2331 | */ | ||
| 2332 | static bool tcp_any_retrans_done(const struct sock *sk) | ||
| 2333 | { | ||
| 2334 | const struct tcp_sock *tp = tcp_sk(sk); | ||
| 2335 | struct sk_buff *skb; | ||
| 2336 | |||
| 2337 | if (tp->retrans_out) | ||
| 2338 | return true; | ||
| 2339 | |||
| 2340 | skb = tcp_write_queue_head(sk); | ||
| 2341 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
| 2342 | return true; | ||
| 2343 | |||
| 2344 | return false; | ||
| 2345 | } | ||
| 2346 | |||
| 2318 | #if FASTRETRANS_DEBUG > 1 | 2347 | #if FASTRETRANS_DEBUG > 1 |
| 2319 | static void DBGUNDO(struct sock *sk, const char *msg) | 2348 | static void DBGUNDO(struct sock *sk, const char *msg) |
| 2320 | { | 2349 | { |
| @@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) | |||
| 2410 | * is ACKed. For Reno it is MUST to prevent false | 2439 | * is ACKed. For Reno it is MUST to prevent false |
| 2411 | * fast retransmits (RFC2582). SACK TCP is safe. */ | 2440 | * fast retransmits (RFC2582). SACK TCP is safe. */ |
| 2412 | tcp_moderate_cwnd(tp); | 2441 | tcp_moderate_cwnd(tp); |
| 2442 | if (!tcp_any_retrans_done(sk)) | ||
| 2443 | tp->retrans_stamp = 0; | ||
| 2413 | return true; | 2444 | return true; |
| 2414 | } | 2445 | } |
| 2415 | tcp_set_ca_state(sk, TCP_CA_Open); | 2446 | tcp_set_ca_state(sk, TCP_CA_Open); |
| @@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk) | |||
| 2430 | return false; | 2461 | return false; |
| 2431 | } | 2462 | } |
| 2432 | 2463 | ||
| 2433 | /* We can clear retrans_stamp when there are no retransmissions in the | ||
| 2434 | * window. It would seem that it is trivially available for us in | ||
| 2435 | * tp->retrans_out, however, that kind of assumptions doesn't consider | ||
| 2436 | * what will happen if errors occur when sending retransmission for the | ||
| 2437 | * second time. ...It could the that such segment has only | ||
| 2438 | * TCPCB_EVER_RETRANS set at the present time. It seems that checking | ||
| 2439 | * the head skb is enough except for some reneging corner cases that | ||
| 2440 | * are not worth the effort. | ||
| 2441 | * | ||
| 2442 | * Main reason for all this complexity is the fact that connection dying | ||
| 2443 | * time now depends on the validity of the retrans_stamp, in particular, | ||
| 2444 | * that successive retransmissions of a segment must not advance | ||
| 2445 | * retrans_stamp under any conditions. | ||
| 2446 | */ | ||
| 2447 | static bool tcp_any_retrans_done(const struct sock *sk) | ||
| 2448 | { | ||
| 2449 | const struct tcp_sock *tp = tcp_sk(sk); | ||
| 2450 | struct sk_buff *skb; | ||
| 2451 | |||
| 2452 | if (tp->retrans_out) | ||
| 2453 | return true; | ||
| 2454 | |||
| 2455 | skb = tcp_write_queue_head(sk); | ||
| 2456 | if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) | ||
| 2457 | return true; | ||
| 2458 | |||
| 2459 | return false; | ||
| 2460 | } | ||
| 2461 | |||
| 2462 | /* Undo during loss recovery after partial ACK or using F-RTO. */ | 2464 | /* Undo during loss recovery after partial ACK or using F-RTO. */ |
| 2463 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) | 2465 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) |
| 2464 | { | 2466 | { |
