aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/dccp/ccids/ccid2.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index d9dbd9ffe8c..67164bb6ae4 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -101,6 +101,24 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
101 min_t(u32, val, DCCPF_ACK_RATIO_MAX)); 101 min_t(u32, val, DCCPF_ACK_RATIO_MAX));
102} 102}
103 103
104static void ccid2_check_l_ack_ratio(struct sock *sk)
105{
106 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
107
108 /*
109 * After a loss, idle period, application limited period, or RTO we
110 * need to check that the ack ratio is still less than the congestion
111 * window. Otherwise, we will send an entire congestion window of
112 * packets and got no response because we haven't sent ack ratio
113 * packets yet.
114 * If the ack ratio does need to be reduced, we reduce it to half of
115 * the congestion window (or 1 if that's zero) instead of to the
116 * congestion window. This prevents problems if one ack is lost.
117 */
118 if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
119 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
120}
121
104static void ccid2_change_l_seq_window(struct sock *sk, u64 val) 122static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
105{ 123{
106 dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW, 124 dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
@@ -187,6 +205,8 @@ static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
187 } 205 }
188 hc->tx_cwnd_used = 0; 206 hc->tx_cwnd_used = 0;
189 hc->tx_cwnd_stamp = now; 207 hc->tx_cwnd_stamp = now;
208
209 ccid2_check_l_ack_ratio(sk);
190} 210}
191 211
192/* This borrows the code of tcp_cwnd_restart() */ 212/* This borrows the code of tcp_cwnd_restart() */
@@ -205,6 +225,8 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
205 225
206 hc->tx_cwnd_stamp = now; 226 hc->tx_cwnd_stamp = now;
207 hc->tx_cwnd_used = 0; 227 hc->tx_cwnd_used = 0;
228
229 ccid2_check_l_ack_ratio(sk);
208} 230}
209 231
210static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) 232static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
@@ -461,9 +483,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
461 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; 483 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
462 hc->tx_ssthresh = max(hc->tx_cwnd, 2U); 484 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
463 485
464 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */ 486 ccid2_check_l_ack_ratio(sk);
465 if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
466 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
467} 487}
468 488
469static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type, 489static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,