aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/ipv4/tcp_input.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3ef7e9e07964..055721d8495e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1289,7 +1289,31 @@ void tcp_enter_frto(struct sock *sk)
1289 ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) && 1289 ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
1290 !icsk->icsk_retransmits)) { 1290 !icsk->icsk_retransmits)) {
1291 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1291 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1292 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1292 /* Our state is too optimistic in ssthresh() call because cwnd
1293 * is not reduced until tcp_enter_frto_loss() when previous FRTO
1294 * recovery has not yet completed. Pattern would be this: RTO,
1295 * Cumulative ACK, RTO (2xRTO for the same segment does not end
1296 * up here twice).
1297 * RFC4138 should be more specific on what to do, even though
1298 * RTO is quite unlikely to occur after the first Cumulative ACK
1299 * due to back-off and complexity of triggering events ...
1300 */
1301 if (tp->frto_counter) {
1302 u32 stored_cwnd;
1303 stored_cwnd = tp->snd_cwnd;
1304 tp->snd_cwnd = 2;
1305 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1306 tp->snd_cwnd = stored_cwnd;
1307 } else {
1308 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1309 }
1310 /* ... in theory, cong.control module could do "any tricks" in
1311 * ssthresh(), which means that ca_state, lost bits and lost_out
1312 * counter would have to be faked before the call occurs. We
1313 * consider that too expensive, unlikely and hacky, so modules
1314 * using these in ssthresh() must deal these incompatibility
1315 * issues if they receives CA_EVENT_FRTO and frto_counter != 0
1316 */
1293 tcp_ca_event(sk, CA_EVENT_FRTO); 1317 tcp_ca_event(sk, CA_EVENT_FRTO);
1294 } 1318 }
1295 1319