aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/ipv4/tcp_input.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 561e5d404988..194e880af51e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1252,6 +1252,10 @@ int tcp_use_frto(const struct sock *sk)
1252/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO 1252/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
1253 * recovery a bit and use heuristics in tcp_process_frto() to detect if 1253 * recovery a bit and use heuristics in tcp_process_frto() to detect if
1254 * the RTO was spurious. 1254 * the RTO was spurious.
1255 *
1256 * Do like tcp_enter_loss() would; when RTO expires the second time it
1257 * does:
1258 * "Reduce ssthresh if it has not yet been made inside this window."
1255 */ 1259 */
1256void tcp_enter_frto(struct sock *sk) 1260void tcp_enter_frto(struct sock *sk)
1257{ 1261{
@@ -1259,11 +1263,10 @@ void tcp_enter_frto(struct sock *sk)
1259 struct tcp_sock *tp = tcp_sk(sk); 1263 struct tcp_sock *tp = tcp_sk(sk);
1260 struct sk_buff *skb; 1264 struct sk_buff *skb;
1261 1265
1262 tp->frto_counter = 1; 1266 if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
1263
1264 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1265 tp->snd_una == tp->high_seq || 1267 tp->snd_una == tp->high_seq ||
1266 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1268 ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
1269 !icsk->icsk_retransmits)) {
1267 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1270 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1268 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1271 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1269 tcp_ca_event(sk, CA_EVENT_FRTO); 1272 tcp_ca_event(sk, CA_EVENT_FRTO);
@@ -1285,6 +1288,7 @@ void tcp_enter_frto(struct sock *sk)
1285 1288
1286 tcp_set_ca_state(sk, TCP_CA_Open); 1289 tcp_set_ca_state(sk, TCP_CA_Open);
1287 tp->frto_highmark = tp->snd_nxt; 1290 tp->frto_highmark = tp->snd_nxt;
1291 tp->frto_counter = 1;
1288} 1292}
1289 1293
1290/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, 1294/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
@@ -2513,12 +2517,16 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
2513 * to prove that the RTO is indeed spurious. It transfers the control 2517 * to prove that the RTO is indeed spurious. It transfers the control
2514 * from F-RTO to the conventional RTO recovery 2518 * from F-RTO to the conventional RTO recovery
2515 */ 2519 */
2516static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) 2520static void tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
2517{ 2521{
2518 struct tcp_sock *tp = tcp_sk(sk); 2522 struct tcp_sock *tp = tcp_sk(sk);
2519 2523
2520 tcp_sync_left_out(tp); 2524 tcp_sync_left_out(tp);
2521 2525
2526 /* Duplicate the behavior from Loss state (fastretrans_alert) */
2527 if (flag&FLAG_DATA_ACKED)
2528 inet_csk(sk)->icsk_retransmits = 0;
2529
2522 if (tp->snd_una == prior_snd_una || 2530 if (tp->snd_una == prior_snd_una ||
2523 !before(tp->snd_una, tp->frto_highmark)) { 2531 !before(tp->snd_una, tp->frto_highmark)) {
2524 tcp_enter_frto_loss(sk); 2532 tcp_enter_frto_loss(sk);
@@ -2607,7 +2615,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2607 flag |= tcp_clean_rtx_queue(sk, &seq_rtt); 2615 flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
2608 2616
2609 if (tp->frto_counter) 2617 if (tp->frto_counter)
2610 tcp_process_frto(sk, prior_snd_una); 2618 tcp_process_frto(sk, prior_snd_una, flag);
2611 2619
2612 if (tcp_ack_is_dubious(sk, flag)) { 2620 if (tcp_ack_is_dubious(sk, flag)) {
2613 /* Advance CWND, if state allows this. */ 2621 /* Advance CWND, if state allows this. */