diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b9ba8d55d8b8..53de1424c13c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -107,6 +107,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; | |||
107 | #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ | 107 | #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ |
108 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ | 108 | #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ |
109 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ | 109 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
110 | #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ | ||
110 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ | 111 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ |
111 | #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ | 112 | #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ |
112 | #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ | 113 | #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ |
@@ -3016,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk) | |||
3016 | } | 3017 | } |
3017 | } | 3018 | } |
3018 | 3019 | ||
3020 | /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ | ||
3021 | static void tcp_set_xmit_timer(struct sock *sk) | ||
3022 | { | ||
3023 | if (!tcp_schedule_loss_probe(sk)) | ||
3024 | tcp_rearm_rto(sk); | ||
3025 | } | ||
3026 | |||
3019 | /* If we get here, the whole TSO packet has not been acked. */ | 3027 | /* If we get here, the whole TSO packet has not been acked. */ |
3020 | static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) | 3028 | static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) |
3021 | { | 3029 | { |
@@ -3177,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3177 | ca_rtt_us, sack->rate); | 3185 | ca_rtt_us, sack->rate); |
3178 | 3186 | ||
3179 | if (flag & FLAG_ACKED) { | 3187 | if (flag & FLAG_ACKED) { |
3180 | tcp_rearm_rto(sk); | 3188 | flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ |
3181 | if (unlikely(icsk->icsk_mtup.probe_size && | 3189 | if (unlikely(icsk->icsk_mtup.probe_size && |
3182 | !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { | 3190 | !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { |
3183 | tcp_mtup_probe_success(sk); | 3191 | tcp_mtup_probe_success(sk); |
@@ -3205,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3205 | * after when the head was last (re)transmitted. Otherwise the | 3213 | * after when the head was last (re)transmitted. Otherwise the |
3206 | * timeout may continue to extend in loss recovery. | 3214 | * timeout may continue to extend in loss recovery. |
3207 | */ | 3215 | */ |
3208 | tcp_rearm_rto(sk); | 3216 | flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ |
3209 | } | 3217 | } |
3210 | 3218 | ||
3211 | if (icsk->icsk_ca_ops->pkts_acked) { | 3219 | if (icsk->icsk_ca_ops->pkts_acked) { |
@@ -3577,9 +3585,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3577 | if (after(ack, tp->snd_nxt)) | 3585 | if (after(ack, tp->snd_nxt)) |
3578 | goto invalid_ack; | 3586 | goto invalid_ack; |
3579 | 3587 | ||
3580 | if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) | ||
3581 | tcp_rearm_rto(sk); | ||
3582 | |||
3583 | if (after(ack, prior_snd_una)) { | 3588 | if (after(ack, prior_snd_una)) { |
3584 | flag |= FLAG_SND_UNA_ADVANCED; | 3589 | flag |= FLAG_SND_UNA_ADVANCED; |
3585 | icsk->icsk_retransmits = 0; | 3590 | icsk->icsk_retransmits = 0; |
@@ -3644,18 +3649,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3644 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, | 3649 | flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, |
3645 | &sack_state); | 3650 | &sack_state); |
3646 | 3651 | ||
3652 | if (tp->tlp_high_seq) | ||
3653 | tcp_process_tlp_ack(sk, ack, flag); | ||
3654 | /* If needed, reset TLP/RTO timer; RACK may later override this. */ | ||
3655 | if (flag & FLAG_SET_XMIT_TIMER) | ||
3656 | tcp_set_xmit_timer(sk); | ||
3657 | |||
3647 | if (tcp_ack_is_dubious(sk, flag)) { | 3658 | if (tcp_ack_is_dubious(sk, flag)) { |
3648 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); | 3659 | is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); |
3649 | tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); | 3660 | tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); |
3650 | } | 3661 | } |
3651 | if (tp->tlp_high_seq) | ||
3652 | tcp_process_tlp_ack(sk, ack, flag); | ||
3653 | 3662 | ||
3654 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) | 3663 | if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) |
3655 | sk_dst_confirm(sk); | 3664 | sk_dst_confirm(sk); |
3656 | 3665 | ||
3657 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) | ||
3658 | tcp_schedule_loss_probe(sk); | ||
3659 | delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ | 3666 | delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ |
3660 | lost = tp->lost - lost; /* freshly marked lost */ | 3667 | lost = tp->lost - lost; /* freshly marked lost */ |
3661 | tcp_rate_gen(sk, delivered, lost, sack_state.rate); | 3668 | tcp_rate_gen(sk, delivered, lost, sack_state.rate); |